applied-ai-018 commited on
Commit
76d22f7
·
verified ·
1 Parent(s): 9c39b2e

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step120/zero/11.input_layernorm.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step120/zero/3.mlp.dense_h_to_4h.weight/exp_avg.pt +3 -0
  3. ckpts/universal/global_step120/zero/8.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
  4. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/FlushDenormal.h +14 -0
  5. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/Utils.h +10 -0
  6. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional.h +4 -0
  7. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional_base.h +329 -0
  8. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional_bfloat16.h +549 -0
  9. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/intrinsics.h +43 -0
  10. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec.h +47 -0
  11. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/missing_vld1_neon.h +452 -0
  12. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/missing_vst1_neon.h +8 -0
  13. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256.h +307 -0
  14. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_bfloat16.h +1096 -0
  15. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_complex_double.h +431 -0
  16. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_complex_float.h +468 -0
  17. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_double.h +442 -0
  18. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_float.h +636 -0
  19. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_float_neon.h +892 -0
  20. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_int.h +1586 -0
  21. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_qint.h +1335 -0
  22. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_bfloat16_vsx.h +73 -0
  23. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_common_vsx.h +246 -0
  24. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_complex_double_vsx.h +560 -0
  25. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_complex_float_vsx.h +628 -0
  26. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_double_vsx.h +438 -0
  27. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_float_vsx.h +461 -0
  28. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_int16_vsx.h +368 -0
  29. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_int32_vsx.h +298 -0
  30. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_int64_vsx.h +251 -0
  31. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_qint32_vsx.h +245 -0
  32. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_qint8_vsx.h +447 -0
  33. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_quint8_vsx.h +466 -0
  34. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vsx_helpers.h +474 -0
  35. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/zarch/vec256_zarch.h +2818 -0
  36. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512.h +275 -0
  37. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_bfloat16.h +1644 -0
  38. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_complex_double.h +512 -0
  39. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_complex_float.h +1018 -0
  40. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_double.h +467 -0
  41. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_float.h +793 -0
  42. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_int.h +1459 -0
  43. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_qint.h +1346 -0
  44. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_base.h +1108 -0
  45. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_half.h +50 -0
  46. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_n.h +344 -0
  47. venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vml.h +171 -0
  48. venv/lib/python3.10/site-packages/torch/include/ATen/detail/IPUHooksInterface.h +35 -0
  49. venv/lib/python3.10/site-packages/torch/include/ATen/miopen/Descriptors.h +146 -0
  50. venv/lib/python3.10/site-packages/torch/include/ATen/miopen/Exceptions.h +41 -0
ckpts/universal/global_step120/zero/11.input_layernorm.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7f7c8c82a4f6fdfc374070234ba1c8595b06b592f4fb24a84f773165ce50b0e
3
+ size 9372
ckpts/universal/global_step120/zero/3.mlp.dense_h_to_4h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7cda050583b2e2d51bdefe0f868fa73586daf65e1d0c2e947952922431c6d3f5
3
+ size 33555612
ckpts/universal/global_step120/zero/8.mlp.dense_4h_to_h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1787bc9de363877a7d5d96aad28294cb4719be3cccf3252a41597eb4c05e4846
3
+ size 33555533
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/FlushDenormal.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /// Flush-To-Zero and Denormals-Are-Zero mode
2
+ ///
3
+ /// Flush-To-Zero (FTZ) and Denormals-Are-Zero (DAZ) are modes that bypass
4
+ /// IEEE 754 methods of dealing with denormal floating-point numbers on x86-64
5
+ /// and some x86 CPUs. They result in reduced precision for values near zero,
6
+ /// but increased performance.
7
+ ///
8
+ /// See https://software.intel.com/en-us/articles/x87-and-sse-floating-point-assists-in-ia-32-flush-to-zero-ftz-and-denormals-are-zero-daz
9
+
10
+ namespace at::cpu {
11
+
12
+ bool set_flush_denormal(bool on);
13
+
14
+ } // namespace at::cpu
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/Utils.h ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/macros/Export.h>
4
+
5
+ namespace at::cpu {
6
+
7
+ // Detect if CPU support Vector Neural Network Instruction.
8
+ TORCH_API bool is_cpu_support_vnni();
9
+
10
+ } // namespace at::cpu
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional.h ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cpu/vec/functional_base.h>
4
+ #include <ATen/cpu/vec/functional_bfloat16.h>
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional_base.h ADDED
@@ -0,0 +1,329 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/vec.h>
7
+ #include <c10/util/irange.h>
8
+
9
+ namespace at::vec {
10
+
11
+ // slow path
12
+ template <typename scalar_t, typename Op>
13
+ inline scalar_t vec_reduce_all(
14
+ const Op& vec_fun,
15
+ vec::Vectorized<scalar_t> acc_vec,
16
+ int64_t size) {
17
+ using Vec = vec::Vectorized<scalar_t>;
18
+ scalar_t acc_arr[Vec::size()];
19
+ acc_vec.store(acc_arr);
20
+ for (const auto i : c10::irange(1, size)) {
21
+ std::array<scalar_t, Vec::size()> acc_arr_next = {0};
22
+ acc_arr_next[0] = acc_arr[i];
23
+ Vec acc_vec_next = Vec::loadu(acc_arr_next.data());
24
+ acc_vec = vec_fun(acc_vec, acc_vec_next);
25
+ }
26
+ acc_vec.store(acc_arr);
27
+ return acc_arr[0];
28
+ }
29
+
30
+ template <typename scalar_t, typename Op>
31
+ struct VecReduceAllSIMD {
32
+ static inline scalar_t apply(const Op& vec_fun, const Vectorized<scalar_t>& acc_vec) {
33
+ return vec_reduce_all(vec_fun, acc_vec, Vectorized<scalar_t>::size());
34
+ }
35
+ };
36
+
37
+ #if defined(__GNUC__) && (__GNUC__ > 5) && !defined(_MSC_VER) && !defined(C10_MOBILE)
38
+ #if defined(CPU_CAPABILITY_AVX2)
39
+ template <typename Op>
40
+ struct VecReduceAllSIMD<float, Op> {
41
+ static inline float apply(const Op& vec_fun, const Vectorized<float>& acc_vec) {
42
+ using Vec = Vectorized<float>;
43
+ Vec v = acc_vec;
44
+ // 128-bit shuffle
45
+ Vec v1 = _mm256_permute2f128_ps(v, v, 0x1);
46
+ v = vec_fun(v, v1);
47
+ // 64-bit shuffle
48
+ v1 = _mm256_shuffle_ps(v, v, 0x4E);
49
+ v = vec_fun(v, v1);
50
+ // 32-bit shuffle
51
+ v1 = _mm256_shuffle_ps(v, v, 0xB1);
52
+ v = vec_fun(v, v1);
53
+ return _mm256_cvtss_f32(v);
54
+ }
55
+ };
56
+ #endif // defined(CPU_CAPABILITY_AVX2)
57
+ #if defined(CPU_CAPABILITY_AVX512)
58
+ template <typename Op>
59
+ struct VecReduceAllSIMD<float, Op> {
60
+ static inline float apply(const Op& vec_fun, const Vectorized<float>& acc_vec) {
61
+ using Vec = Vectorized<float>;
62
+ Vec v = acc_vec;
63
+ // 256-bit shuffle
64
+ Vec v1 = _mm512_shuffle_f32x4(v, v, 0x4E);
65
+ v = vec_fun(v, v1);
66
+ // 128-bit shuffle
67
+ v1 = _mm512_shuffle_f32x4(v, v, 0xB1);
68
+ v = vec_fun(v, v1);
69
+ // 64-bit shuffle
70
+ v1 = _mm512_shuffle_ps(v, v, 0x4E);
71
+ v = vec_fun(v, v1);
72
+ // 32-bit shuffle
73
+ v1 = _mm512_shuffle_ps(v, v, 0xB1);
74
+ v = vec_fun(v, v1);
75
+ return _mm512_cvtss_f32(v);
76
+ }
77
+ };
78
+ #endif // defined(CPU_CAPABILITY_AVX512)
79
+ #endif // defined(__GNUC__) && (__GNUC__ > 5) && !defined(_MSC_VER) && !defined(C10_MOBILE)
80
+
81
+ template <typename scalar_t, typename Op>
82
+ inline scalar_t vec_reduce_all(const Op& vec_fun, const Vectorized<scalar_t>& acc_vec) {
83
+ return VecReduceAllSIMD<scalar_t, Op>::apply(vec_fun, acc_vec);
84
+ }
85
+
86
+ template <typename scalar_t, typename Op,
87
+ typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
88
+ inline scalar_t reduce_all(const Op& vec_fun, const scalar_t* data, int64_t size) {
89
+ using Vec = vec::Vectorized<scalar_t>;
90
+ if (size < Vec::size())
91
+ return vec_reduce_all(vec_fun, Vec::loadu(data, size), size);
92
+ int64_t d = Vec::size();
93
+ Vec acc_vec = Vec::loadu(data);
94
+ for (; d < size - (size % Vec::size()); d += Vec::size()) {
95
+ Vec data_vec = Vec::loadu(data + d);
96
+ acc_vec = vec_fun(acc_vec, data_vec);
97
+ }
98
+ if (size - d > 0) {
99
+ Vec data_vec = Vec::loadu(data + d, size - d);
100
+ acc_vec = Vec::set(acc_vec, vec_fun(acc_vec, data_vec), size - d);
101
+ }
102
+ return vec_reduce_all(vec_fun, acc_vec);
103
+ }
104
+
105
+ // similar to reduce_all, but reduces into two outputs
106
+ template <typename scalar_t, typename Op1, typename Op2,
107
+ typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
108
+ inline std::pair<scalar_t, scalar_t> reduce2_all(const Op1& vec_fun1, const Op2& vec_fun2,
109
+ const scalar_t* data, int64_t size) {
110
+ using Vec = vec::Vectorized<scalar_t>;
111
+ if (size < Vec::size()) {
112
+ auto loaded_data = Vec::loadu(data, size);
113
+ return std::pair<scalar_t, scalar_t>(
114
+ vec_reduce_all(vec_fun1, loaded_data, size),
115
+ vec_reduce_all(vec_fun2, loaded_data, size));
116
+ }
117
+ int64_t d = Vec::size();
118
+ Vec acc_vec1 = Vec::loadu(data);
119
+ Vec acc_vec2 = Vec::loadu(data);
120
+ for (; d < size - (size % Vec::size()); d += Vec::size()) {
121
+ Vec data_vec = Vec::loadu(data + d);
122
+ acc_vec1 = vec_fun1(acc_vec1, data_vec);
123
+ acc_vec2 = vec_fun2(acc_vec2, data_vec);
124
+ }
125
+ if (size - d > 0) {
126
+ Vec data_vec = Vec::loadu(data + d, size - d);
127
+ acc_vec1 = Vec::set(acc_vec1, vec_fun1(acc_vec1, data_vec), size - d);
128
+ acc_vec2 = Vec::set(acc_vec2, vec_fun2(acc_vec2, data_vec), size - d);
129
+ }
130
+ return std::pair<scalar_t, scalar_t>(
131
+ vec_reduce_all(vec_fun1, acc_vec1),
132
+ vec_reduce_all(vec_fun2, acc_vec2));
133
+ }
134
+
135
+ template <typename scalar_t, typename MapOp, typename ReduceOp,
136
+ typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
137
+ inline scalar_t map_reduce_all(
138
+ const MapOp& map_fun,
139
+ const ReduceOp& red_fun,
140
+ const scalar_t* data,
141
+ int64_t size) {
142
+ using Vec = vec::Vectorized<scalar_t>;
143
+ if (size < Vec::size())
144
+ return vec_reduce_all(red_fun, map_fun(Vec::loadu(data, size)), size);
145
+ int64_t d = Vec::size();
146
+ Vec acc_vec = map_fun(Vec::loadu(data));
147
+ for (; d < size - (size % Vec::size()); d += Vec::size()) {
148
+ Vec data_vec = Vec::loadu(data + d);
149
+ data_vec = map_fun(data_vec);
150
+ acc_vec = red_fun(acc_vec, data_vec);
151
+ }
152
+ if (size - d > 0) {
153
+ Vec data_vec = Vec::loadu(data + d, size - d);
154
+ data_vec = map_fun(data_vec);
155
+ acc_vec = Vec::set(acc_vec, red_fun(acc_vec, data_vec), size - d);
156
+ }
157
+ return vec_reduce_all(red_fun, acc_vec);
158
+ }
159
+
160
+ template <typename scalar_t, typename MapOp, typename ReduceOp,
161
+ typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
162
+ inline scalar_t map2_reduce_all(
163
+ const MapOp& map_fun,
164
+ const ReduceOp& red_fun,
165
+ const scalar_t* data,
166
+ const scalar_t* data2,
167
+ int64_t size) {
168
+ using Vec = vec::Vectorized<scalar_t>;
169
+ if (size < Vec::size()) {
170
+ Vec data_vec = Vec::loadu(data, size);
171
+ Vec data2_vec = Vec::loadu(data2, size);
172
+ data_vec = map_fun(data_vec, data2_vec);
173
+ return vec_reduce_all(red_fun, data_vec, size);
174
+ }
175
+ int64_t d = Vec::size();
176
+ Vec acc_vec = map_fun(Vec::loadu(data), Vec::loadu(data2));
177
+ for (; d < size - (size % Vec::size()); d += Vec::size()) {
178
+ Vec data_vec = Vec::loadu(data + d);
179
+ Vec data2_vec = Vec::loadu(data2 + d);
180
+ data_vec = map_fun(data_vec, data2_vec);
181
+ acc_vec = red_fun(acc_vec, data_vec);
182
+ }
183
+ if (size - d > 0) {
184
+ Vec data_vec = Vec::loadu(data + d, size - d);
185
+ Vec data2_vec = Vec::loadu(data2 + d, size - d);
186
+ data_vec = map_fun(data_vec, data2_vec);
187
+ acc_vec = Vec::set(acc_vec, red_fun(acc_vec, data_vec), size - d);
188
+ }
189
+ return vec_reduce_all(red_fun, acc_vec);
190
+ }
191
+
192
+ template <typename scalar_t, typename MapOp, typename ReduceOp,
193
+ typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
194
+ inline scalar_t map3_reduce_all(
195
+ const MapOp& map_fun,
196
+ const ReduceOp& red_fun,
197
+ const scalar_t* data,
198
+ const scalar_t* data2,
199
+ const scalar_t* data3,
200
+ int64_t size) {
201
+ using Vec = vec::Vectorized<scalar_t>;
202
+ if (size < Vec::size()) {
203
+ Vec data_vec = Vec::loadu(data, size);
204
+ Vec data2_vec = Vec::loadu(data2, size);
205
+ Vec data3_vec = Vec::loadu(data3, size);
206
+ data_vec = map_fun(data_vec, data2_vec, data3_vec);
207
+ return vec_reduce_all(red_fun, data_vec, size);
208
+ }
209
+
210
+ int64_t d = Vec::size();
211
+ Vec acc_vec = map_fun(Vec::loadu(data), Vec::loadu(data2), Vec::loadu(data3));
212
+ for (; d < size - (size % Vec::size()); d += Vec::size()) {
213
+ Vec data_vec = Vec::loadu(data + d);
214
+ Vec data2_vec = Vec::loadu(data2 + d);
215
+ Vec data3_vec = Vec::loadu(data3 + d);
216
+ data_vec = map_fun(data_vec, data2_vec, data3_vec);
217
+ acc_vec = red_fun(acc_vec, data_vec);
218
+ }
219
+ if (size - d > 0) {
220
+ Vec data_vec = Vec::loadu(data + d, size - d);
221
+ Vec data2_vec = Vec::loadu(data2 + d, size - d);
222
+ Vec data3_vec = Vec::loadu(data3 + d, size - d);
223
+ data_vec = map_fun(data_vec, data2_vec, data3_vec);
224
+ acc_vec = Vec::set(acc_vec, red_fun(acc_vec, data_vec), size - d);
225
+ }
226
+ return vec_reduce_all(red_fun, acc_vec);
227
+ }
228
+
229
+ template <typename scalar_t, typename Op,
230
+ typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
231
+ inline void map(
232
+ const Op& vec_fun,
233
+ scalar_t* output_data,
234
+ const scalar_t* input_data,
235
+ int64_t size) {
236
+ using Vec = vec::Vectorized<scalar_t>;
237
+ int64_t d = 0;
238
+ for (; d < size - (size % Vec::size()); d += Vec::size()) {
239
+ Vec output_vec = vec_fun(Vec::loadu(input_data + d));
240
+ output_vec.store(output_data + d);
241
+ }
242
+ if (size - d > 0) {
243
+ Vec output_vec = vec_fun(Vec::loadu(input_data + d, size - d));
244
+ output_vec.store(output_data + d, size - d);
245
+ }
246
+ }
247
+
248
+ template <typename scalar_t, typename Op,
249
+ typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
250
+ inline void map2(
251
+ const Op& vec_fun,
252
+ scalar_t* output_data,
253
+ const scalar_t* input_data,
254
+ const scalar_t* input_data2,
255
+ int64_t size) {
256
+ using Vec = vec::Vectorized<scalar_t>;
257
+ int64_t d = 0;
258
+ for (; d < size - (size % Vec::size()); d += Vec::size()) {
259
+ Vec data_vec = Vec::loadu(input_data + d);
260
+ Vec data_vec2 = Vec::loadu(input_data2 + d);
261
+ Vec output_vec = vec_fun(data_vec, data_vec2);
262
+ output_vec.store(output_data + d);
263
+ }
264
+ if (size - d > 0) {
265
+ Vec data_vec = Vec::loadu(input_data + d, size - d);
266
+ Vec data_vec2 = Vec::loadu(input_data2 + d, size - d);
267
+ Vec output_vec = vec_fun(data_vec, data_vec2);
268
+ output_vec.store(output_data + d, size - d);
269
+ }
270
+ }
271
+
272
+ template <typename scalar_t, typename Op,
273
+ typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
274
+ inline void map3(
275
+ const Op& vec_fun,
276
+ scalar_t* output_data,
277
+ const scalar_t* input_data1,
278
+ const scalar_t* input_data2,
279
+ const scalar_t* input_data3,
280
+ int64_t size) {
281
+ using Vec = vec::Vectorized<scalar_t>;
282
+ int64_t d = 0;
283
+ for (; d < size - (size % Vec::size()); d += Vec::size()) {
284
+ Vec data_vec1 = Vec::loadu(input_data1 + d);
285
+ Vec data_vec2 = Vec::loadu(input_data2 + d);
286
+ Vec data_vec3 = Vec::loadu(input_data3 + d);
287
+ Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3);
288
+ output_vec.store(output_data + d);
289
+ }
290
+ if (size - d > 0) {
291
+ Vec data_vec1 = Vec::loadu(input_data1 + d, size - d);
292
+ Vec data_vec2 = Vec::loadu(input_data2 + d, size - d);
293
+ Vec data_vec3 = Vec::loadu(input_data3 + d, size - d);
294
+ Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3);
295
+ output_vec.store(output_data + d, size - d);
296
+ }
297
+ }
298
+
299
+ template <typename scalar_t, typename Op,
300
+ typename std::enable_if_t<!is_reduced_floating_point_v<scalar_t>, int> = 0>
301
+ inline void map4(
302
+ const Op& vec_fun,
303
+ scalar_t* output_data,
304
+ const scalar_t* input_data1,
305
+ const scalar_t* input_data2,
306
+ const scalar_t* input_data3,
307
+ const scalar_t* input_data4,
308
+ int64_t size) {
309
+ using Vec = vec::Vectorized<scalar_t>;
310
+ int64_t d = 0;
311
+ for (; d < size - (size % Vec::size()); d += Vec::size()) {
312
+ Vec data_vec1 = Vec::loadu(input_data1 + d);
313
+ Vec data_vec2 = Vec::loadu(input_data2 + d);
314
+ Vec data_vec3 = Vec::loadu(input_data3 + d);
315
+ Vec data_vec4 = Vec::loadu(input_data4 + d);
316
+ Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3, data_vec4);
317
+ output_vec.store(output_data + d);
318
+ }
319
+ if (size - d > 0) {
320
+ Vec data_vec1 = Vec::loadu(input_data1 + d, size - d);
321
+ Vec data_vec2 = Vec::loadu(input_data2 + d, size - d);
322
+ Vec data_vec3 = Vec::loadu(input_data3 + d, size - d);
323
+ Vec data_vec4 = Vec::loadu(input_data4 + d, size - d);
324
+ Vec output_vec = vec_fun(data_vec1, data_vec2, data_vec3, data_vec4);
325
+ output_vec.store(output_data + d, size - d);
326
+ }
327
+ }
328
+
329
+ } // namespace at::vec
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/functional_bfloat16.h ADDED
@@ -0,0 +1,549 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/vec.h>
7
+
8
+ namespace at::vec {
9
+
10
+ // BFloat16 specification
11
+ template <typename scalar_t> struct VecScalarType { using type = scalar_t; };
12
+ template <> struct VecScalarType<BFloat16> { using type = float; };
13
+ template <> struct VecScalarType<Half> { using type = float; };
14
+
15
+ // This is different from at::acc_type since we only need to specialize BFloat16
16
+ template <typename scalar_t>
17
+ using vec_scalar_t = typename VecScalarType<scalar_t>::type;
18
+
19
+ // Vector conversion between float and bfloat16/half
20
+ template <typename scalar_t,
21
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
22
+ inline std::tuple<Vectorized<float>, Vectorized<float>> convert_to_float(const Vectorized<scalar_t>&);
23
+
24
+ template <>
25
+ inline std::tuple<Vectorized<float>, Vectorized<float>> convert_to_float<BFloat16> (const Vectorized<BFloat16>& a) {
26
+ return convert_bfloat16_float(a);
27
+ }
28
+
29
+ template <>
30
+ inline std::tuple<Vectorized<float>, Vectorized<float>> convert_to_float<Half> (const Vectorized<Half>& a) {
31
+ return convert_half_float(a);
32
+ }
33
+
34
+ template <typename scalar_t,
35
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
36
+ inline Vectorized<scalar_t> convert_from_float(const Vectorized<float>&, const Vectorized<float>&);
37
+
38
+ template <>
39
+ inline Vectorized<BFloat16> convert_from_float<BFloat16>(const Vectorized<float>& a, const Vectorized<float>& b) {
40
+ return convert_float_bfloat16(a, b);
41
+ }
42
+
43
+ template <>
44
+ inline Vectorized<Half> convert_from_float<Half>(const Vectorized<float>& a, const Vectorized<float>& b) {
45
+ return convert_float_half(a, b);
46
+ }
47
+
48
+ template <typename scalar_t,
49
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
50
+ inline void load_to_float(const scalar_t *data, Vectorized<float> &out1, Vectorized<float> &out2);
51
+
52
+ template <>
53
+ inline void load_to_float<BFloat16> (const BFloat16 *data, Vectorized<float> &out1, Vectorized<float> &out2) {
54
+ load_fp32_from_bf16(data, out1, out2);
55
+ }
56
+
57
+ template <>
58
+ inline void load_to_float<Half> (const Half *data, Vectorized<float> &out1, Vectorized<float> &out2) {
59
+ load_fp32_from_fp16(data, out1, out2);
60
+ }
61
+
62
+ template <typename scalar_t,
63
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
64
+ inline void load_to_float(const scalar_t *data, Vectorized<float> &out);
65
+
66
+ template <>
67
+ inline void load_to_float<BFloat16> (const BFloat16 *data, Vectorized<float> &out) {
68
+ load_fp32_from_bf16(data, out);
69
+ }
70
+
71
+ template <>
72
+ inline void load_to_float<Half> (const Half *data, Vectorized<float> &out) {
73
+ load_fp32_from_fp16(data, out);
74
+ }
75
+
76
+ // Note that we already have specialized member of Vectorized<scalar_t> for BFloat16
77
+ // so the following functions would run smoothly:
78
+ // using Vec = Vectorized<BFloat16>;
79
+ // Vec one = Vec(BFloat16(1));
80
+ // vec::map([](Vec x) { return one / (one + x.exp()); }, y_ptr, x_ptr, N);
81
+ //
82
+ // Then why we still need to specialize "functional"?
83
+ // If we do specialization at Vectorized<> level, the above example would need 3 pairs of
84
+ // conversion of bf16->fp32/fp32->bf16, each for ".exp()", "+" and "/".
85
+ // If we do specialization at vec::map<>() level, we have only 1 pair of conversion
86
+ // of bf16->fp32/fp32->bf16, for the input and output BFloat16 vector only.
87
+ //
88
+ // The following BFloat16 functionality will only do data type conversion for input
89
+ // and output vector (reduce functionality will only convert the final scalar back to bf16).
90
+ // Compared to Vectorized<> specialization,
91
+ // 1. better performance since we have less data type conversion;
92
+ // 2. less rounding error since immediate results are kept in fp32;
93
+ // 3. accumulation done on data type of fp32.
94
+ //
95
+ // If you plan to extend this file, please ensure adding unit tests at
96
+ // aten/src/ATen/test/vec_test_all_types.cpp
97
+ //
98
+ template <typename scalar_t, typename Op,
99
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
100
+ inline float reduce_all(const Op& vec_fun, const scalar_t* data, int64_t size) {
101
+ using bVec = vec::Vectorized<scalar_t>;
102
+ using fVec = vec::Vectorized<float>;
103
+ if (size < bVec::size()) {
104
+ bVec data_bvec = bVec::loadu(data, size);
105
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
106
+ if (size > fVec::size()) {
107
+ data_fvec0 = fVec::set(data_fvec0, vec_fun(data_fvec0, data_fvec1), size - fVec::size());
108
+ return vec_reduce_all<float>(vec_fun, data_fvec0, fVec::size());
109
+ } else {
110
+ return vec_reduce_all<float>(vec_fun, data_fvec0, size);
111
+ }
112
+ }
113
+ int64_t d = bVec::size();
114
+ bVec acc_bvec = bVec::loadu(data);
115
+ auto [acc_fvec0, acc_fvec1] = convert_to_float<scalar_t>(acc_bvec);
116
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
117
+ bVec data_bvec = bVec::loadu(data + d);
118
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
119
+ acc_fvec0 = vec_fun(acc_fvec0, data_fvec0);
120
+ acc_fvec1 = vec_fun(acc_fvec1, data_fvec1);
121
+ }
122
+ if (size - d > 0) {
123
+ bVec data_bvec = bVec::loadu(data + d, size - d);
124
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
125
+ if (size - d > fVec::size()) {
126
+ acc_fvec0 = vec_fun(acc_fvec0, data_fvec0);
127
+ acc_fvec1 = fVec::set(acc_fvec1, vec_fun(acc_fvec1, data_fvec1), size - d - fVec::size());
128
+ } else {
129
+ acc_fvec0 = fVec::set(acc_fvec0, vec_fun(acc_fvec0, data_fvec0), size - d);
130
+ }
131
+ }
132
+ acc_fvec0 = vec_fun(acc_fvec0, acc_fvec1);
133
+ return vec_reduce_all<float>(vec_fun, acc_fvec0);
134
+ }
135
+
136
+ template <typename scalar_t, typename Op1, typename Op2,
137
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
138
+ inline std::pair<float, float> reduce2_all(const Op1& vec_fun1, const Op2& vec_fun2,
139
+ const scalar_t* data, int64_t size) {
140
+ using bVec = vec::Vectorized<scalar_t>;
141
+ using fVec = vec::Vectorized<float>;
142
+ if (size < bVec::size()) {
143
+ bVec data_bvec = bVec::loadu(data, size);
144
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
145
+ if (size > fVec::size()) {
146
+ fVec acc1_fvec = fVec::set(data_fvec0, vec_fun1(data_fvec0, data_fvec1), size - fVec::size());
147
+ fVec acc2_fvec = fVec::set(data_fvec0, vec_fun2(data_fvec0, data_fvec1), size - fVec::size());
148
+ return std::pair<scalar_t, scalar_t>(
149
+ vec_reduce_all<float>(vec_fun1, acc1_fvec, fVec::size()),
150
+ vec_reduce_all<float>(vec_fun2, acc2_fvec, fVec::size()));
151
+ } else {
152
+ return std::pair<scalar_t, scalar_t>(
153
+ vec_reduce_all<float>(vec_fun1, data_fvec0, size),
154
+ vec_reduce_all<float>(vec_fun2, data_fvec0, size));
155
+ }
156
+ }
157
+ int64_t d = bVec::size();
158
+ bVec acc_bvec = bVec::loadu(data);
159
+ auto [acc1_fvec0, acc1_fvec1] = convert_to_float<scalar_t>(acc_bvec);
160
+ auto [acc2_fvec0, acc2_fvec1] = convert_to_float<scalar_t>(acc_bvec);
161
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
162
+ bVec data_bvec = bVec::loadu(data + d);
163
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
164
+ acc1_fvec0 = vec_fun1(acc1_fvec0, data_fvec0);
165
+ acc1_fvec1 = vec_fun1(acc1_fvec1, data_fvec1);
166
+ acc2_fvec0 = vec_fun2(acc2_fvec0, data_fvec0);
167
+ acc2_fvec1 = vec_fun2(acc2_fvec1, data_fvec1);
168
+ }
169
+ if (size - d > 0) {
170
+ bVec data_bvec = bVec::loadu(data + d, size - d);
171
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
172
+ if (size - d > fVec::size()) {
173
+ acc1_fvec0 = vec_fun1(acc1_fvec0, data_fvec0);
174
+ acc1_fvec1 = fVec::set(acc1_fvec1, vec_fun1(acc1_fvec1, data_fvec1), size - d - fVec::size());
175
+ acc2_fvec0 = vec_fun2(acc2_fvec0, data_fvec0);
176
+ acc2_fvec1 = fVec::set(acc2_fvec1, vec_fun2(acc2_fvec1, data_fvec1), size - d - fVec::size());
177
+ } else {
178
+ acc1_fvec0 = fVec::set(acc1_fvec0, vec_fun1(acc1_fvec0, data_fvec0), size - d);
179
+ acc2_fvec0 = fVec::set(acc2_fvec0, vec_fun2(acc2_fvec0, data_fvec0), size - d);
180
+ }
181
+ }
182
+ acc1_fvec0 = vec_fun1(acc1_fvec0, acc1_fvec1);
183
+ acc2_fvec0 = vec_fun2(acc2_fvec0, acc2_fvec1);
184
+ return std::pair<scalar_t, scalar_t>(
185
+ vec_reduce_all<float>(vec_fun1, acc1_fvec0),
186
+ vec_reduce_all<float>(vec_fun2, acc2_fvec0));
187
+ }
188
+
189
+ template <typename scalar_t, typename MapOp, typename ReduceOp,
190
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
191
+ inline float map_reduce_all(
192
+ const MapOp& map_fun,
193
+ const ReduceOp& red_fun,
194
+ const scalar_t* data,
195
+ int64_t size) {
196
+ using bVec = vec::Vectorized<scalar_t>;
197
+ using fVec = vec::Vectorized<float>;
198
+ if (size < bVec::size()) {
199
+ bVec data_bvec = bVec::loadu(data, size);
200
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
201
+ if (size > fVec::size()) {
202
+ data_fvec0 = map_fun(data_fvec0);
203
+ data_fvec1 = map_fun(data_fvec1);
204
+ data_fvec0 = fVec::set(data_fvec0, red_fun(data_fvec0, data_fvec1), size - fVec::size());
205
+ return vec_reduce_all<float>(red_fun, data_fvec0, fVec::size());
206
+ } else {
207
+ data_fvec0 = map_fun(data_fvec0);
208
+ return vec_reduce_all<float>(red_fun, data_fvec0, size);
209
+ }
210
+ }
211
+ int64_t d = bVec::size();
212
+ bVec acc_bvec = bVec::loadu(data);
213
+ auto [acc_fvec0, acc_fvec1] = convert_to_float<scalar_t>(acc_bvec);
214
+ acc_fvec0 = map_fun(acc_fvec0);
215
+ acc_fvec1 = map_fun(acc_fvec1);
216
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
217
+ bVec data_bvec = bVec::loadu(data + d);
218
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
219
+ data_fvec0 = map_fun(data_fvec0);
220
+ data_fvec1 = map_fun(data_fvec1);
221
+ acc_fvec0 = red_fun(acc_fvec0, data_fvec0);
222
+ acc_fvec1 = red_fun(acc_fvec1, data_fvec1);
223
+ }
224
+ if (size - d > 0) {
225
+ bVec data_bvec = bVec::loadu(data + d, size - d);
226
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
227
+ if (size - d > fVec::size()) {
228
+ data_fvec0 = map_fun(data_fvec0);
229
+ data_fvec1 = map_fun(data_fvec1);
230
+ acc_fvec0 = red_fun(acc_fvec0, data_fvec0);
231
+ acc_fvec1 = fVec::set(acc_fvec1, red_fun(acc_fvec1, data_fvec1), size - d - fVec::size());
232
+ } else {
233
+ data_fvec0 = map_fun(data_fvec0);
234
+ acc_fvec0 = fVec::set(acc_fvec0, red_fun(acc_fvec0, data_fvec0), size - d);
235
+ }
236
+ }
237
+ acc_fvec0 = red_fun(acc_fvec0, acc_fvec1);
238
+ return vec_reduce_all<float>(red_fun, acc_fvec0);
239
+ }
240
+
241
+ template <typename scalar_t, typename MapOp, typename ReduceOp,
242
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
243
+ inline float map2_reduce_all(
244
+ const MapOp& map_fun,
245
+ const ReduceOp& red_fun,
246
+ const scalar_t* data,
247
+ const scalar_t* data2,
248
+ int64_t size) {
249
+ using bVec = vec::Vectorized<scalar_t>;
250
+ using fVec = vec::Vectorized<float>;
251
+ if (size < bVec::size()) {
252
+ bVec data_bvec = bVec::loadu(data, size);
253
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
254
+ bVec data2_bvec = bVec::loadu(data2, size);
255
+ auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
256
+ if (size > fVec::size()) {
257
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0);
258
+ data_fvec1 = map_fun(data_fvec1, data2_fvec1);
259
+ data_fvec0 = fVec::set(data_fvec0, red_fun(data_fvec0, data_fvec1), size - fVec::size());
260
+ return vec_reduce_all<float>(red_fun, data_fvec0, fVec::size());
261
+ } else {
262
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0);
263
+ return vec_reduce_all<float>(red_fun, data_fvec0, size);
264
+ }
265
+ }
266
+ int64_t d = bVec::size();
267
+ bVec acc_bvec = bVec::loadu(data);
268
+ auto [acc_fvec0, acc_fvec1] = convert_to_float<scalar_t>(acc_bvec);
269
+ bVec acc2_bvec = bVec::loadu(data2);
270
+ auto [acc2_fvec0, acc2_fvec1] = convert_to_float<scalar_t>(acc2_bvec);
271
+ acc_fvec0 = map_fun(acc_fvec0, acc2_fvec0);
272
+ acc_fvec1 = map_fun(acc_fvec1, acc2_fvec1);
273
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
274
+ bVec data_bvec = bVec::loadu(data + d);
275
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
276
+ bVec data2_bvec = bVec::loadu(data2 + d);
277
+ auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
278
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0);
279
+ data_fvec1 = map_fun(data_fvec1, data2_fvec1);
280
+ acc_fvec0 = red_fun(acc_fvec0, data_fvec0);
281
+ acc_fvec1 = red_fun(acc_fvec1, data_fvec1);
282
+ }
283
+ if (size - d > 0) {
284
+ bVec data_bvec = bVec::loadu(data + d, size - d);
285
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
286
+ bVec data2_bvec = bVec::loadu(data2 + d, size - d);
287
+ auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
288
+ if (size - d > fVec::size()) {
289
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0);
290
+ data_fvec1 = map_fun(data_fvec1, data2_fvec1);
291
+ acc_fvec0 = red_fun(acc_fvec0, data_fvec0);
292
+ acc_fvec1 = fVec::set(acc_fvec1, red_fun(acc_fvec1, data_fvec1), size - d - fVec::size());
293
+ } else {
294
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0);
295
+ acc_fvec0 = fVec::set(acc_fvec0, red_fun(acc_fvec0, data_fvec0), size - d);
296
+ }
297
+ }
298
+ acc_fvec0 = red_fun(acc_fvec0, acc_fvec1);
299
+ return vec_reduce_all<float>(red_fun, acc_fvec0);
300
+ }
301
+
302
+ template <typename scalar_t, typename MapOp, typename ReduceOp,
303
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
304
+ inline float map3_reduce_all(
305
+ const MapOp& map_fun,
306
+ const ReduceOp& red_fun,
307
+ const scalar_t* data,
308
+ const scalar_t* data2,
309
+ const scalar_t* data3,
310
+ int64_t size) {
311
+ using bVec = vec::Vectorized<scalar_t>;
312
+ using fVec = vec::Vectorized<float>;
313
+ if (size < bVec::size()) {
314
+ bVec data_bvec = bVec::loadu(data, size);
315
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
316
+ bVec data2_bvec = bVec::loadu(data2, size);
317
+ auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
318
+ bVec data3_bvec = bVec::loadu(data3, size);
319
+ auto [data3_fvec0, data3_fvec1] = convert_to_float<scalar_t>(data3_bvec);
320
+ if (size > fVec::size()) {
321
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0);
322
+ data_fvec1 = map_fun(data_fvec1, data2_fvec1, data3_fvec1);
323
+ data_fvec0 = fVec::set(data_fvec0, red_fun(data_fvec0, data_fvec1), size - fVec::size());
324
+ return vec_reduce_all<float>(red_fun, data_fvec0, fVec::size());
325
+ } else {
326
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0);
327
+ return vec_reduce_all<float>(red_fun, data_fvec0, size);
328
+ }
329
+ }
330
+ int64_t d = bVec::size();
331
+ bVec acc_bvec = bVec::loadu(data);
332
+ auto [acc_fvec0, acc_fvec1] = convert_to_float<scalar_t>(acc_bvec);
333
+ bVec acc2_bvec = bVec::loadu(data2);
334
+ auto [acc2_fvec0, acc2_fvec1] = convert_to_float<scalar_t>(acc2_bvec);
335
+ bVec acc3_bvec = bVec::loadu(data3);
336
+ auto [acc3_fvec0, acc3_fvec1] = convert_to_float<scalar_t>(acc3_bvec);
337
+ acc_fvec0 = map_fun(acc_fvec0, acc2_fvec0, acc3_fvec0);
338
+ acc_fvec1 = map_fun(acc_fvec1, acc2_fvec1, acc3_fvec1);
339
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
340
+ bVec data_bvec = bVec::loadu(data + d);
341
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
342
+ bVec data2_bvec = bVec::loadu(data2 + d);
343
+ auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
344
+ bVec data3_bvec = bVec::loadu(data3 + d);
345
+ auto [data3_fvec0, data3_fvec1] = convert_to_float<scalar_t>(data3_bvec);
346
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0);
347
+ data_fvec1 = map_fun(data_fvec1, data2_fvec1, data3_fvec1);
348
+ acc_fvec0 = red_fun(acc_fvec0, data_fvec0);
349
+ acc_fvec1 = red_fun(acc_fvec1, data_fvec1);
350
+ }
351
+ if (size - d > 0) {
352
+ bVec data_bvec = bVec::loadu(data + d, size - d);
353
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
354
+ bVec data2_bvec = bVec::loadu(data2 + d, size - d);
355
+ auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
356
+ bVec data3_bvec = bVec::loadu(data3 + d, size - d);
357
+ auto [data3_fvec0, data3_fvec1] = convert_to_float<scalar_t>(data3_bvec);
358
+ if (size - d > fVec::size()) {
359
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0);
360
+ data_fvec1 = map_fun(data_fvec1, data2_fvec1, data3_fvec1);
361
+ acc_fvec0 = red_fun(acc_fvec0, data_fvec0);
362
+ acc_fvec1 = fVec::set(acc_fvec1, red_fun(acc_fvec1, data_fvec1), size - d - fVec::size());
363
+ } else {
364
+ data_fvec0 = map_fun(data_fvec0, data2_fvec0, data3_fvec0);
365
+ acc_fvec0 = fVec::set(acc_fvec0, red_fun(acc_fvec0, data_fvec0), size - d);
366
+ }
367
+ }
368
+ acc_fvec0 = red_fun(acc_fvec0, acc_fvec1);
369
+ return vec_reduce_all<float>(red_fun, acc_fvec0);
370
+ }
371
+
372
+ template <typename scalar_t, typename Op,
373
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
374
+ inline void map(
375
+ const Op& vec_fun,
376
+ scalar_t* output_data,
377
+ const scalar_t* input_data,
378
+ int64_t size) {
379
+ using bVec = vec::Vectorized<scalar_t>;
380
+ using fVec = vec::Vectorized<float>;
381
+ int64_t d = 0;
382
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
383
+ bVec data_bvec = bVec::loadu(input_data + d);
384
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
385
+ fVec output_fvec0 = vec_fun(data_fvec0);
386
+ fVec output_fvec1 = vec_fun(data_fvec1);
387
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
388
+ output_bvec.store(output_data + d);
389
+ }
390
+ if (size - d > 0) {
391
+ bVec data_bvec = bVec::loadu(input_data + d, size - d);
392
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
393
+ fVec output_fvec0 = vec_fun(data_fvec0);
394
+ fVec output_fvec1 = vec_fun(data_fvec1);
395
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
396
+ output_bvec.store(output_data + d, size - d);
397
+ }
398
+ }
399
+
400
+ template <typename scalar_t, typename Op,
401
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
402
+ inline void map(
403
+ const Op& vec_fun,
404
+ scalar_t* output_data,
405
+ const float* input_data,
406
+ int64_t size) {
407
+ using bVec = vec::Vectorized<scalar_t>;
408
+ using fVec = vec::Vectorized<float>;
409
+ int64_t d = 0;
410
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
411
+ fVec data_fvec0 = fVec::loadu(input_data + d);
412
+ fVec data_fvec1 = fVec::loadu(input_data + d + fVec::size());
413
+ fVec output_fvec0 = vec_fun(data_fvec0);
414
+ fVec output_fvec1 = vec_fun(data_fvec1);
415
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
416
+ output_bvec.store(output_data + d);
417
+ }
418
+ if (size - d > 0) {
419
+ fVec data_fvec0, data_fvec1;
420
+ if (size - d > fVec::size()) {
421
+ data_fvec0 = fVec::loadu(input_data + d);
422
+ data_fvec1 = fVec::loadu(input_data + d + fVec::size(), size - d - fVec::size());
423
+ } else {
424
+ // choose to align with behaviour of bVec::loadu(ptr, size),
425
+ // which leaves data_fvec1 uninitialized
426
+ data_fvec0 = fVec::loadu(input_data + d, size - d);
427
+ }
428
+ fVec output_fvec0 = vec_fun(data_fvec0);
429
+ fVec output_fvec1 = vec_fun(data_fvec1);
430
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
431
+ output_bvec.store(output_data + d, size - d);
432
+ }
433
+ }
434
+
435
+ template <typename scalar_t, typename Op,
436
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
437
+ inline void map2(
438
+ const Op& vec_fun,
439
+ scalar_t* output_data,
440
+ const scalar_t* input_data,
441
+ const scalar_t* input_data2,
442
+ int64_t size) {
443
+ using bVec = vec::Vectorized<scalar_t>;
444
+ using fVec = vec::Vectorized<float>;
445
+ int64_t d = 0;
446
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
447
+ bVec data_bvec = bVec::loadu(input_data + d);
448
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
449
+ bVec data2_bvec = bVec::loadu(input_data2 + d);
450
+ auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
451
+ fVec output_fvec0 = vec_fun(data_fvec0, data2_fvec0);
452
+ fVec output_fvec1 = vec_fun(data_fvec1, data2_fvec1);
453
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
454
+ output_bvec.store(output_data + d);
455
+ }
456
+ if (size - d > 0) {
457
+ bVec data_bvec = bVec::loadu(input_data + d, size - d);
458
+ auto [data_fvec0, data_fvec1] = convert_to_float<scalar_t>(data_bvec);
459
+ bVec data2_bvec = bVec::loadu(input_data2 + d, size - d);
460
+ auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
461
+ fVec output_fvec0 = vec_fun(data_fvec0, data2_fvec0);
462
+ fVec output_fvec1 = vec_fun(data_fvec1, data2_fvec1);
463
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
464
+ output_bvec.store(output_data + d, size - d);
465
+ }
466
+ }
467
+
468
+ template <typename scalar_t, typename Op,
469
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
470
+ inline void map3(
471
+ const Op& vec_fun,
472
+ scalar_t* output_data,
473
+ const scalar_t* input_data1,
474
+ const scalar_t* input_data2,
475
+ const scalar_t* input_data3,
476
+ int64_t size) {
477
+ using bVec = vec::Vectorized<scalar_t>;
478
+ using fVec = vec::Vectorized<float>;
479
+ int64_t d = 0;
480
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
481
+ bVec data1_bvec = bVec::loadu(input_data1 + d);
482
+ auto [data1_fvec0, data1_fvec1] = convert_to_float<scalar_t>(data1_bvec);
483
+ bVec data2_bvec = bVec::loadu(input_data2 + d);
484
+ auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
485
+ bVec data3_bvec = bVec::loadu(input_data3 + d);
486
+ auto [data3_fvec0, data3_fvec1] = convert_to_float<scalar_t>(data3_bvec);
487
+ fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0);
488
+ fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1);
489
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
490
+ output_bvec.store(output_data + d);
491
+ }
492
+ if (size - d > 0) {
493
+ bVec data1_bvec = bVec::loadu(input_data1 + d, size - d);
494
+ auto [data1_fvec0, data1_fvec1] = convert_to_float<scalar_t>(data1_bvec);
495
+ bVec data2_bvec = bVec::loadu(input_data2 + d, size - d);
496
+ auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
497
+ bVec data3_bvec = bVec::loadu(input_data3 + d, size - d);
498
+ auto [data3_fvec0, data3_fvec1] = convert_to_float<scalar_t>(data3_bvec);
499
+ fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0);
500
+ fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1);
501
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
502
+ output_bvec.store(output_data + d, size - d);
503
+ }
504
+ }
505
+
506
+ template <typename scalar_t, typename Op,
507
+ typename std::enable_if_t<is_reduced_floating_point_v<scalar_t>, int> = 0>
508
+ inline void map4(
509
+ const Op& vec_fun,
510
+ scalar_t* output_data,
511
+ const scalar_t* input_data1,
512
+ const scalar_t* input_data2,
513
+ const scalar_t* input_data3,
514
+ const scalar_t* input_data4,
515
+ int64_t size) {
516
+ using bVec = vec::Vectorized<scalar_t>;
517
+ using fVec = vec::Vectorized<float>;
518
+ int64_t d = 0;
519
+ for (; d < size - (size % bVec::size()); d += bVec::size()) {
520
+ bVec data1_bvec = bVec::loadu(input_data1 + d);
521
+ auto [data1_fvec0, data1_fvec1] = convert_to_float<scalar_t>(data1_bvec);
522
+ bVec data2_bvec = bVec::loadu(input_data2 + d);
523
+ auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
524
+ bVec data3_bvec = bVec::loadu(input_data3 + d);
525
+ auto [data3_fvec0, data3_fvec1] = convert_to_float<scalar_t>(data3_bvec);
526
+ bVec data4_bvec = bVec::loadu(input_data4 + d);
527
+ auto [data4_fvec0, data4_fvec1] = convert_to_float<scalar_t>(data4_bvec);
528
+ fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0, data4_fvec0);
529
+ fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1, data4_fvec1);
530
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
531
+ output_bvec.store(output_data + d);
532
+ }
533
+ if (size - d > 0) {
534
+ bVec data1_bvec = bVec::loadu(input_data1 + d, size - d);
535
+ auto [data1_fvec0, data1_fvec1] = convert_to_float<scalar_t>(data1_bvec);
536
+ bVec data2_bvec = bVec::loadu(input_data2 + d, size - d);
537
+ auto [data2_fvec0, data2_fvec1] = convert_to_float<scalar_t>(data2_bvec);
538
+ bVec data3_bvec = bVec::loadu(input_data3 + d, size - d);
539
+ auto [data3_fvec0, data3_fvec1] = convert_to_float<scalar_t>(data3_bvec);
540
+ bVec data4_bvec = bVec::loadu(input_data4 + d, size - d);
541
+ auto [data4_fvec0, data4_fvec1] = convert_to_float<scalar_t>(data4_bvec);
542
+ fVec output_fvec0 = vec_fun(data1_fvec0, data2_fvec0, data3_fvec0, data4_fvec0);
543
+ fVec output_fvec1 = vec_fun(data1_fvec1, data2_fvec1, data3_fvec1, data4_fvec1);
544
+ bVec output_bvec = convert_from_float<scalar_t>(output_fvec0, output_fvec1);
545
+ output_bvec.store(output_data + d, size - d);
546
+ }
547
+ }
548
+
549
+ } // namespace at::vec
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/intrinsics.h ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #if defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__))
3
+ /* GCC or clang-compatible compiler, targeting x86/x86-64 */
4
+ #include <x86intrin.h>
5
+ #elif defined(__clang__) && (defined(__ARM_NEON__) || defined(__aarch64__))
6
+ /* Clang-compatible compiler, targeting arm neon */
7
+ #include <arm_neon.h>
8
+ #elif defined(_MSC_VER)
9
+ /* Microsoft C/C++-compatible compiler */
10
+ #include <intrin.h>
11
+ #if _MSC_VER <= 1900
12
+ #define _mm256_extract_epi64(X, Y) (_mm_extract_epi64(_mm256_extractf128_si256(X, Y >> 1), Y % 2))
13
+ #define _mm256_extract_epi32(X, Y) (_mm_extract_epi32(_mm256_extractf128_si256(X, Y >> 2), Y % 4))
14
+ #define _mm256_extract_epi16(X, Y) (_mm_extract_epi16(_mm256_extractf128_si256(X, Y >> 3), Y % 8))
15
+ #define _mm256_extract_epi8(X, Y) (_mm_extract_epi8(_mm256_extractf128_si256(X, Y >> 4), Y % 16))
16
+ #endif
17
+ #elif defined(__GNUC__) && (defined(__ARM_NEON__) || defined(__aarch64__))
18
+ /* GCC-compatible compiler, targeting ARM with NEON */
19
+ #include <arm_neon.h>
20
+ #if defined (MISSING_ARM_VLD1)
21
+ #include <ATen/cpu/vec/vec256/missing_vld1_neon.h>
22
+ #elif defined (MISSING_ARM_VST1)
23
+ #include <ATen/cpu/vec/vec256/missing_vst1_neon.h>
24
+ #endif
25
+ #elif defined(__GNUC__) && defined(__IWMMXT__)
26
+ /* GCC-compatible compiler, targeting ARM with WMMX */
27
+ #include <mmintrin.h>
28
+ #elif defined(__s390x__)
29
+ // targets Z/architecture
30
+ // we will include vecintrin later
31
+ #elif (defined(__GNUC__) || defined(__xlC__)) && \
32
+ (defined(__VEC__) || defined(__ALTIVEC__))
33
+ /* XLC or GCC-compatible compiler, targeting PowerPC with VMX/VSX */
34
+ #include <altivec.h>
35
+ /* We need to undef those tokens defined by <altivec.h> to avoid conflicts
36
+ with the C++ types. => Can still use __bool/__vector */
37
+ #undef bool
38
+ #undef vector
39
+ #undef pixel
40
+ #elif defined(__GNUC__) && defined(__SPE__)
41
+ /* GCC-compatible compiler, targeting PowerPC with SPE */
42
+ #include <spe.h>
43
+ #endif
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec.h ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #if defined(CPU_CAPABILITY_AVX512)
4
+ #include <ATen/cpu/vec/vec512/vec512.h>
5
+ #else
6
+ #include <ATen/cpu/vec/vec256/vec256.h>
7
+ #endif
8
+
9
+ namespace at::vec {
10
+ // See Note [CPU_CAPABILITY namespace]
11
+ inline namespace CPU_CAPABILITY {
12
+
13
+ inline Vectorized<bool> convert_to_bool(Vectorized<int8_t> x) {
14
+ __at_align__ bool buffer[x.size()];
15
+ x.ne(Vectorized<int8_t>(0)).store(buffer);
16
+
17
+ Vectorized<bool> ret;
18
+ static_assert(x.size() == ret.size(), "");
19
+ std::memcpy(ret, buffer, ret.size() * sizeof(bool));
20
+ return ret;
21
+ }
22
+
23
+ template <>
24
+ inline Vectorized<bool> Vectorized<bool>::loadu(const void* ptr) {
25
+ // See NOTE [Loading boolean values]
26
+ return convert_to_bool(Vectorized<int8_t>::loadu(ptr));
27
+ }
28
+
29
+ template <>
30
+ inline Vectorized<bool> Vectorized<bool>::loadu(const void* ptr, int64_t count) {
31
+ // See NOTE [Loading boolean values]
32
+ return convert_to_bool(Vectorized<int8_t>::loadu(ptr, count));
33
+ }
34
+
35
+ template <typename VT>
36
+ struct VecHoldType { using hold_type = typename VT::value_type; };
37
+
38
+ template <>
39
+ struct VecHoldType<Vectorized<BFloat16>> { using hold_type = BFloat16; };
40
+
41
+ template <>
42
+ struct VecHoldType<Vectorized<Half>> {using hold_type = Half; };
43
+
44
+ template <typename VT>
45
+ using vechold_type = typename VecHoldType<VT>::hold_type;
46
+
47
+ }} // namespace at::vec::CPU_CAPABILITY
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/missing_vld1_neon.h ADDED
@@ -0,0 +1,452 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Workaround for missing vld1_*_x2 and vst1_*_x2 intrinsics in gcc-7. */
2
+
3
+ __extension__ extern __inline uint8x8x2_t
4
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5
+ vld1_u8_x2 (const uint8_t *__a)
6
+ {
7
+ uint8x8x2_t ret;
8
+ asm volatile("ld1 {%S0.8b - %T0.8b}, %1" : "=w" (ret) : "Q"(*__a));
9
+ return ret;
10
+ }
11
+
12
+ __extension__ extern __inline int8x8x2_t
13
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
14
+ vld1_s8_x2 (const int8_t *__a)
15
+ {
16
+ int8x8x2_t ret;
17
+ asm volatile("ld1 {%S0.8b - %T0.8b}, %1" : "=w" (ret) : "Q"(*__a));
18
+ return ret;
19
+ }
20
+
21
+ __extension__ extern __inline uint16x4x2_t
22
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
23
+ vld1_u16_x2 (const uint16_t *__a)
24
+ {
25
+ uint16x4x2_t ret;
26
+ asm volatile("ld1 {%S0.4h - %T0.4h}, %1" : "=w" (ret) : "Q"(*__a));
27
+ return ret;
28
+ }
29
+
30
+ __extension__ extern __inline int16x4x2_t
31
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
32
+ vld1_s16_x2 (const int16_t *__a)
33
+ {
34
+ int16x4x2_t ret;
35
+ asm volatile("ld1 {%S0.4h - %T0.4h}, %1" : "=w" (ret) : "Q"(*__a));
36
+ return ret;
37
+ }
38
+
39
+ __extension__ extern __inline uint32x2x2_t
40
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
41
+ vld1_u32_x2 (const uint32_t *__a)
42
+ {
43
+ uint32x2x2_t ret;
44
+ asm volatile("ld1 {%S0.2s - %T0.2s}, %1" : "=w" (ret) : "Q"(*__a));
45
+ return ret;
46
+ }
47
+
48
+ __extension__ extern __inline int32x2x2_t
49
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
50
+ vld1_s32_x2 (const int32_t *__a)
51
+ {
52
+ int32x2x2_t ret;
53
+ asm volatile("ld1 {%S0.2s - %T0.2s}, %1" : "=w" (ret) : "Q"(*__a));
54
+ return ret;
55
+ }
56
+
57
+ __extension__ extern __inline uint64x1x2_t
58
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
59
+ vld1_u64_x2 (const uint64_t *__a)
60
+ {
61
+ uint64x1x2_t ret;
62
+ asm volatile("ld1 {%S0.1d - %T0.1d}, %1" : "=w" (ret) : "Q"(*__a));
63
+ return ret;
64
+ }
65
+
66
+ __extension__ extern __inline int64x1x2_t
67
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
68
+ vld1_s64_x2 (const int64_t *__a)
69
+ {
70
+ int64x1x2_t ret;
71
+ __builtin_aarch64_simd_oi __o;
72
+ asm volatile("ld1 {%S0.1d - %T0.1d}, %1" : "=w" (ret) : "Q"(*__a));
73
+ return ret;
74
+ }
75
+
76
+ __extension__ extern __inline float16x4x2_t
77
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
78
+ vld1_f16_x2 (const float16_t *__a)
79
+ {
80
+ float16x4x2_t ret;
81
+ asm volatile("ld1 {%S0.4h - %T0.4h}, %1" : "=w" (ret) : "Q"(*__a));
82
+ return ret;
83
+ }
84
+
85
+ __extension__ extern __inline float32x2x2_t
86
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
87
+ vld1_f32_x2 (const float32_t *__a)
88
+ {
89
+ float32x2x2_t ret;
90
+ asm volatile("ld1 {%S0.2s - %T0.2s}, %1" : "=w" (ret) : "Q"(*__a));
91
+ return ret;
92
+ }
93
+
94
+ __extension__ extern __inline float64x1x2_t
95
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
96
+ vld1_f64_x2 (const float64_t *__a)
97
+ {
98
+ float64x1x2_t ret;
99
+ asm volatile("ld1 {%S0.1d - %T0.1d}, %1" : "=w" (ret) : "Q"(*__a));
100
+ return ret;
101
+ }
102
+
103
+ __extension__ extern __inline poly8x8x2_t
104
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
105
+ vld1_p8_x2 (const poly8_t *__a)
106
+ {
107
+ poly8x8x2_t ret;
108
+ asm volatile("ld1 {%S0.8b - %T0.8b}, %1" : "=w" (ret) : "Q"(*__a));
109
+ return ret;
110
+ }
111
+
112
+ __extension__ extern __inline poly16x4x2_t
113
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
114
+ vld1_p16_x2 (const poly16_t *__a)
115
+ {
116
+ poly16x4x2_t ret;
117
+ asm volatile("ld1 {%S0.4h - %T0.4h}, %1" : "=w" (ret) : "Q"(*__a));
118
+ return ret;
119
+ }
120
+
121
+ __extension__ extern __inline poly64x1x2_t
122
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
123
+ vld1_p64_x2 (const poly64_t *__a)
124
+ {
125
+ poly64x1x2_t ret;
126
+ asm volatile("ld1 {%S0.1d - %T0.1d}, %1" : "=w" (ret) : "Q"(*__a));
127
+ return ret;
128
+ }
129
+
130
+ __extension__ extern __inline uint8x16x2_t
131
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
132
+ vld1q_u8_x2 (const uint8_t *__a)
133
+ {
134
+ uint8x16x2_t ret;
135
+ asm volatile("ld1 {%S0.16b - %T0.16b}, %1" : "=w" (ret) : "Q"(*__a));
136
+ return ret;
137
+ }
138
+
139
+ __extension__ extern __inline int8x16x2_t
140
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
141
+ vld1q_s8_x2 (const int8_t *__a)
142
+ {
143
+ int8x16x2_t ret;
144
+ asm volatile("ld1 {%S0.16b - %T0.16b}, %1" : "=w" (ret) : "Q"(*__a));
145
+ return ret;
146
+ }
147
+
148
+ __extension__ extern __inline uint16x8x2_t
149
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
150
+ vld1q_u16_x2 (const uint16_t *__a)
151
+ {
152
+ uint16x8x2_t ret;
153
+ asm volatile("ld1 {%S0.8h - %T0.8h}, %1" : "=w" (ret) : "Q"(*__a));
154
+ return ret;
155
+ }
156
+
157
+ __extension__ extern __inline int16x8x2_t
158
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
159
+ vld1q_s16_x2 (const int16_t *__a)
160
+ {
161
+ int16x8x2_t ret;
162
+ asm volatile("ld1 {%S0.8h - %T0.8h}, %1" : "=w" (ret) : "Q"(*__a));
163
+ return ret;
164
+ }
165
+
166
+ __extension__ extern __inline uint32x4x2_t
167
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
168
+ vld1q_u32_x2 (const uint32_t *__a)
169
+ {
170
+ uint32x4x2_t ret;
171
+ asm volatile("ld1 {%S0.4s - %T0.4s}, %1" : "=w" (ret) : "Q"(*__a));
172
+ return ret;
173
+ }
174
+
175
+ __extension__ extern __inline int32x4x2_t
176
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
177
+ vld1q_s32_x2 (const int32_t *__a)
178
+ {
179
+ int32x4x2_t ret;
180
+ asm volatile("ld1 {%S0.4s - %T0.4s}, %1" : "=w" (ret) : "Q"(*__a));
181
+ return ret;
182
+ }
183
+
184
+ __extension__ extern __inline uint64x2x2_t
185
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
186
+ vld1q_u64_x2 (const uint64_t *__a)
187
+ {
188
+ uint64x2x2_t ret;
189
+ asm volatile("ld1 {%S0.2d - %T0.2d}, %1" : "=w" (ret) : "Q"(*__a));
190
+ return ret;
191
+ }
192
+
193
+ __extension__ extern __inline int64x2x2_t
194
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
195
+ vld1q_s64_x2 (const int64_t *__a)
196
+ {
197
+ int64x2x2_t ret;
198
+ asm volatile("ld1 {%S0.2d - %T0.2d}, %1" : "=w" (ret) : "Q"(*__a));
199
+ return ret;
200
+ }
201
+
202
+ __extension__ extern __inline float16x8x2_t
203
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
204
+ vld1q_f16_x2 (const float16_t *__a)
205
+ {
206
+ float16x8x2_t ret;
207
+ asm volatile("ld1 {%S0.8h - %T0.8h}, %1" : "=w" (ret) : "Q"(*__a));
208
+ return ret;
209
+ }
210
+
211
+ __extension__ extern __inline float32x4x2_t
212
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
213
+ vld1q_f32_x2 (const float32_t *__a)
214
+ {
215
+ float32x4x2_t ret;
216
+ asm volatile("ld1 {%S0.4s - %T0.4s}, %1" : "=w" (ret) : "Q"(*__a));
217
+ return ret;
218
+ }
219
+
220
+ __extension__ extern __inline float64x2x2_t
221
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
222
+ vld1q_f64_x2 (const float64_t *__a)
223
+ {
224
+ float64x2x2_t ret;
225
+ asm volatile("ld1 {%S0.2d - %T0.2d}, %1" : "=w" (ret) : "Q"(*__a));
226
+ return ret;
227
+ }
228
+
229
+ __extension__ extern __inline poly8x16x2_t
230
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
231
+ vld1q_p8_x2 (const poly8_t *__a)
232
+ {
233
+ poly8x16x2_t ret;
234
+ asm volatile("ld1 {%S0.16b - %T0.16b}, %1" : "=w" (ret) : "Q"(*__a));
235
+ return ret;
236
+ }
237
+
238
+ __extension__ extern __inline poly16x8x2_t
239
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
240
+ vld1q_p16_x2 (const poly16_t *__a)
241
+ {
242
+ poly16x8x2_t ret;
243
+ asm volatile("ld1 {%S0.8h - %T0.8h}, %1" : "=w" (ret) : "Q"(*__a));
244
+ return ret;
245
+ }
246
+
247
+ __extension__ extern __inline poly64x2x2_t
248
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
249
+ vld1q_p64_x2 (const poly64_t *__a)
250
+ {
251
+ poly64x2x2_t ret;
252
+ asm volatile("ld1 {%S0.2d - %T0.2d}, %1" : "=w" (ret) : "Q"(*__a));
253
+ return ret;
254
+ }
255
+
256
+ /* vst1x2 */
257
+
258
+ __extension__ extern __inline void
259
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
260
+ vst1_s64_x2 (int64_t * __a, int64x1x2_t val)
261
+ {
262
+ asm volatile("st1 {%S1.1d - %T1.1d}, %0" : "=Q" (*__a) : "w" (val));
263
+ }
264
+
265
+ __extension__ extern __inline void
266
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
267
+ vst1_u64_x2 (uint64_t * __a, uint64x1x2_t val)
268
+ {
269
+ asm volatile("st1 {%S1.1d - %T1.1d}, %0" : "=Q" (*__a) : "w" (val));
270
+ }
271
+
272
+ __extension__ extern __inline void
273
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
274
+ vst1_f64_x2 (float64_t * __a, float64x1x2_t val)
275
+ {
276
+ asm volatile("st1 {%S1.1d - %T1.1d}, %0" : "=Q" (*__a) : "w" (val));
277
+ }
278
+
279
+ __extension__ extern __inline void
280
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
281
+ vst1_s8_x2 (int8_t * __a, int8x8x2_t val)
282
+ {
283
+ asm volatile("st1 {%S1.8b - %T1.8b}, %0" : "=Q" (*__a) : "w" (val));
284
+ }
285
+
286
+ __extension__ extern __inline void
287
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
288
+ vst1_p8_x2 (poly8_t * __a, poly8x8x2_t val)
289
+ {
290
+ asm volatile("st1 {%S1.8b - %T1.8b}, %0" : "=Q" (*__a) : "w" (val));
291
+ }
292
+
293
+ __extension__ extern __inline void
294
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
295
+ vst1_s16_x2 (int16_t * __a, int16x4x2_t val)
296
+ {
297
+ asm volatile("st1 {%S1.4h - %T1.4h}, %0" : "=Q" (*__a) : "w" (val));
298
+ }
299
+
300
+ __extension__ extern __inline void
301
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
302
+ vst1_p16_x2 (poly16_t * __a, poly16x4x2_t val)
303
+ {
304
+ asm volatile("st1 {%S1.4h - %T1.4h}, %0" : "=Q" (*__a) : "w" (val));
305
+ }
306
+
307
+ __extension__ extern __inline void
308
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
309
+ vst1_s32_x2 (int32_t * __a, int32x2x2_t val)
310
+ {
311
+ asm volatile("st1 {%S1.2s - %T1.2s}, %0" : "=Q" (*__a) : "w" (val));
312
+ }
313
+
314
+ __extension__ extern __inline void
315
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
316
+ vst1_u8_x2 (uint8_t * __a, uint8x8x2_t val)
317
+ {
318
+ asm volatile("st1 {%S1.8b - %T1.8b}, %0" : "=Q" (*__a) : "w" (val));
319
+ }
320
+
321
+ __extension__ extern __inline void
322
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
323
+ vst1_u16_x2 (uint16_t * __a, uint16x4x2_t val)
324
+ {
325
+ asm volatile("st1 {%S1.4h - %T1.4h}, %0" : "=Q" (*__a) : "w" (val));
326
+ }
327
+
328
+ __extension__ extern __inline void
329
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
330
+ vst1_u32_x2 (uint32_t * __a, uint32x2x2_t val)
331
+ {
332
+ asm volatile("st1 {%S1.2s - %T1.2s}, %0" : "=Q" (*__a) : "w" (val));
333
+ }
334
+
335
+ __extension__ extern __inline void
336
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
337
+ vst1_f16_x2 (float16_t * __a, float16x4x2_t val)
338
+ {
339
+ asm volatile("st1 {%S1.4h - %T1.4h}, %0" : "=Q" (*__a) : "w" (val));
340
+ }
341
+
342
+ __extension__ extern __inline void
343
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
344
+ vst1_f32_x2 (float32_t * __a, float32x2x2_t val)
345
+ {
346
+ asm volatile("st1 {%S1.2s - %T1.2s}, %0" : "=Q" (*__a) : "w" (val));
347
+ }
348
+
349
+ __extension__ extern __inline void
350
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
351
+ vst1_p64_x2 (poly64_t * __a, poly64x1x2_t val)
352
+ {
353
+ asm volatile("st1 {%S1.1d - %T1.1d}, %0" : "=Q" (*__a) : "w" (val));
354
+ }
355
+
356
+ __extension__ extern __inline void
357
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
358
+ vst1q_s8_x2 (int8_t * __a, int8x16x2_t val)
359
+ {
360
+ asm volatile("st1 {%S1.16b - %T1.16b}, %0" : "=Q" (*__a) : "w" (val));
361
+ }
362
+
363
+ __extension__ extern __inline void
364
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
365
+ vst1q_p8_x2 (poly8_t * __a, poly8x16x2_t val)
366
+ {
367
+ asm volatile("st1 {%S1.16b - %T1.16b}, %0" : "=Q" (*__a) : "w" (val));
368
+ }
369
+
370
+ __extension__ extern __inline void
371
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
372
+ vst1q_s16_x2 (int16_t * __a, int16x8x2_t val)
373
+ {
374
+ asm volatile("st1 {%S1.8h - %T1.8h}, %0" : "=Q" (*__a) : "w" (val));
375
+ }
376
+
377
+ __extension__ extern __inline void
378
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
379
+ vst1q_p16_x2 (poly16_t * __a, poly16x8x2_t val)
380
+ {
381
+ asm volatile("st1 {%S1.8h - %T1.8h}, %0" : "=Q" (*__a) : "w" (val));
382
+ }
383
+
384
+ __extension__ extern __inline void
385
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
386
+ vst1q_s32_x2 (int32_t * __a, int32x4x2_t val)
387
+ {
388
+ asm volatile("st1 {%S1.4s - %T1.4s}, %0" : "=Q" (*__a) : "w" (val));
389
+ }
390
+
391
+ __extension__ extern __inline void
392
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
393
+ vst1q_s64_x2 (int64_t * __a, int64x2x2_t val)
394
+ {
395
+ asm volatile("st1 {%S1.2d - %T1.2d}, %0" : "=Q" (*__a) : "w" (val));
396
+ }
397
+
398
+ __extension__ extern __inline void
399
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
400
+ vst1q_u8_x2 (uint8_t * __a, uint8x16x2_t val)
401
+ {
402
+ asm volatile("st1 {%S1.16b - %T1.16b}, %0" : "=Q" (*__a) : "w" (val));
403
+ }
404
+
405
+ __extension__ extern __inline void
406
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
407
+ vst1q_u16_x2 (uint16_t * __a, uint16x8x2_t val)
408
+ {
409
+ asm volatile("st1 {%S1.8h - %T1.8h}, %0" : "=Q" (*__a) : "w" (val));
410
+ }
411
+
412
+ __extension__ extern __inline void
413
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
414
+ vst1q_u32_x2 (uint32_t * __a, uint32x4x2_t val)
415
+ {
416
+ asm volatile("st1 {%S1.4s - %T1.4s}, %0" : "=Q" (*__a) : "w" (val));
417
+ }
418
+
419
+ __extension__ extern __inline void
420
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
421
+ vst1q_u64_x2 (uint64_t * __a, uint64x2x2_t val)
422
+ {
423
+ asm volatile("st1 {%S1.2d - %T1.2d}, %0" : "=Q" (*__a) : "w" (val));
424
+ }
425
+
426
+ __extension__ extern __inline void
427
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
428
+ vst1q_f16_x2 (float16_t * __a, float16x8x2_t val)
429
+ {
430
+ asm volatile("st1 {%S1.8h - %T1.8h}, %0" : "=Q" (*__a) : "w" (val));
431
+ }
432
+
433
+ __extension__ extern __inline void
434
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
435
+ vst1q_f32_x2 (float32_t * __a, float32x4x2_t val)
436
+ {
437
+ asm volatile("st1 {%S1.4s - %T1.4s}, %0" : "=Q" (*__a) : "w" (val));
438
+ }
439
+
440
+ __extension__ extern __inline void
441
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
442
+ vst1q_f64_x2 (float64_t * __a, float64x2x2_t val)
443
+ {
444
+ asm volatile("st1 {%S1.2d - %T1.2d}, %0" : "=Q" (*__a) : "w" (val));
445
+ }
446
+
447
+ __extension__ extern __inline void
448
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
449
+ vst1q_p64_x2 (poly64_t * __a, poly64x2x2_t val)
450
+ {
451
+ asm volatile("st1 {%S1.2d - %T1.2d}, %0" : "=Q" (*__a) : "w" (val));
452
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/missing_vst1_neon.h ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ /* Workaround for missing vst1q_f32_x2 in gcc-8. */
2
+
3
+ __extension__ extern __inline void
4
+ __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
5
+ vst1q_f32_x2 (float32_t * __a, float32x4x2_t val)
6
+ {
7
+ asm volatile("st1 {%S1.4s - %T1.4s}, %0" : "=Q" (*__a) : "w" (val));
8
+ }
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256.h ADDED
@@ -0,0 +1,307 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/intrinsics.h>
7
+
8
+ #include <ATen/cpu/vec/vec_base.h>
9
+ #if !(defined(__VSX__) || defined(CPU_CAPABILITY_VSX) || defined(CPU_CAPABILITY_ZVECTOR))
10
+ #include <ATen/cpu/vec/vec256/vec256_float.h>
11
+ #include <ATen/cpu/vec/vec256/vec256_float_neon.h>
12
+ #include <ATen/cpu/vec/vec256/vec256_bfloat16.h>
13
+ #include <ATen/cpu/vec/vec256/vec256_double.h>
14
+ #include <ATen/cpu/vec/vec256/vec256_int.h>
15
+ #include <ATen/cpu/vec/vec256/vec256_qint.h>
16
+ #include <ATen/cpu/vec/vec256/vec256_complex_float.h>
17
+ #include <ATen/cpu/vec/vec256/vec256_complex_double.h>
18
+ #elif defined(__VSX__) || defined(CPU_CAPABILITY_VSX)
19
+ #include <ATen/cpu/vec/vec256/vsx/vec256_common_vsx.h>
20
+ #else
21
+ #include <ATen/cpu/vec/vec256/zarch/vec256_zarch.h>
22
+ #include <ATen/cpu/vec/vec256/vec256_bfloat16.h>
23
+ #endif
24
+
25
+ #include <algorithm>
26
+ #include <cstddef>
27
+ #include <cstdint>
28
+ #include <cstring>
29
+ #include <ostream>
30
+
31
+ namespace at::vec {
32
+
33
+ // Note [CPU_CAPABILITY namespace]
34
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
35
+ // This header, and all of its subheaders, will be compiled with
36
+ // different architecture flags for each supported set of vector
37
+ // intrinsics. So we need to make sure they aren't inadvertently
38
+ // linked together. We do this by declaring objects in an `inline
39
+ // namespace` which changes the name mangling, but can still be
40
+ // accessed as `at::vec`.
41
+ inline namespace CPU_CAPABILITY {
42
+
43
+ inline std::ostream& operator<<(std::ostream& stream, const c10::qint32& val) {
44
+ stream << val.val_;
45
+ return stream;
46
+ }
47
+ inline std::ostream& operator<<(std::ostream& stream, const c10::qint8& val) {
48
+ stream << static_cast<int>(val.val_);
49
+ return stream;
50
+ }
51
+ inline std::ostream& operator<<(std::ostream& stream, const c10::quint8& val) {
52
+ stream << static_cast<unsigned int>(val.val_);
53
+ return stream;
54
+ }
55
+
56
+ template <typename T>
57
+ std::ostream& operator<<(std::ostream& stream, const Vectorized<T>& vec) {
58
+ T buf[Vectorized<T>::size()];
59
+ vec.store(buf);
60
+ stream << "vec[";
61
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
62
+ if (i != 0) {
63
+ stream << ", ";
64
+ }
65
+ stream << buf[i];
66
+ }
67
+ stream << "]";
68
+ return stream;
69
+ }
70
+
71
+
72
+ #if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
73
+
74
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CAST (AVX2) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
75
+
76
+ template<>
77
+ inline Vectorized<float> cast<float, double>(const Vectorized<double>& src) {
78
+ return _mm256_castpd_ps(src);
79
+ }
80
+
81
+ template<>
82
+ inline Vectorized<double> cast<double, float>(const Vectorized<float>& src) {
83
+ return _mm256_castps_pd(src);
84
+ }
85
+
86
+ template<>
87
+ inline Vectorized<float> cast<float, int32_t>(const Vectorized<int32_t>& src) {
88
+ return _mm256_castsi256_ps(src);
89
+ }
90
+
91
+ template<>
92
+ inline Vectorized<double> cast<double, int64_t>(const Vectorized<int64_t>& src) {
93
+ return _mm256_castsi256_pd(src);
94
+ }
95
+
96
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GATHER ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
97
+
98
+ template<int64_t scale = 1>
99
+ std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<double>>
100
+ inline gather(const double* base_addr, const Vectorized<int64_t>& vindex) {
101
+ return _mm256_i64gather_pd(base_addr, vindex, scale);
102
+ }
103
+
104
+ template<int64_t scale = 1>
105
+ std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<float>>
106
+ inline gather(const float* base_addr, const Vectorized<int32_t>& vindex) {
107
+ return _mm256_i32gather_ps(base_addr, vindex, scale);
108
+ }
109
+
110
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MASK GATHER ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
111
+
112
+ template<int64_t scale = 1>
113
+ std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<double>>
114
+ inline mask_gather(const Vectorized<double>& src, const double* base_addr,
115
+ const Vectorized<int64_t>& vindex, Vectorized<double>& mask) {
116
+ return _mm256_mask_i64gather_pd(src, base_addr, vindex, mask, scale);
117
+ }
118
+
119
+ template<int64_t scale = 1>
120
+ std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<float>>
121
+ inline mask_gather(const Vectorized<float>& src, const float* base_addr,
122
+ const Vectorized<int32_t>& vindex, Vectorized<float>& mask) {
123
+ return _mm256_mask_i32gather_ps(src, base_addr, vindex, mask, scale);
124
+ }
125
+
126
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CONVERT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
127
+
128
+ // Only works for inputs in the range: [-2^51, 2^51]
129
+ // From: https://stackoverflow.com/a/41148578
130
+ template<>
131
+ Vectorized<int64_t>
132
+ inline convert_to_int_of_same_size<double>(const Vectorized<double> &src) {
133
+ auto x = _mm256_add_pd(src, _mm256_set1_pd(0x0018000000000000));
134
+ return _mm256_sub_epi64(
135
+ _mm256_castpd_si256(x),
136
+ _mm256_castpd_si256(_mm256_set1_pd(0x0018000000000000))
137
+ );
138
+ }
139
+
140
+ template<>
141
+ Vectorized<int32_t>
142
+ inline convert_to_int_of_same_size<float>(const Vectorized<float> &src) {
143
+ return _mm256_cvttps_epi32(src);
144
+ }
145
+
146
+ // Only works for inputs in the range: [-2^51, 2^51]
147
+ // From: https://stackoverflow.com/a/41148578
148
+ template<>
149
+ Vectorized<double>
150
+ inline convert_to_fp_of_same_size<double>(const Vectorized<int64_t> &src) {
151
+ auto x = _mm256_add_epi64(src, _mm256_castpd_si256(_mm256_set1_pd(0x0018000000000000)));
152
+ return _mm256_sub_pd(
153
+ _mm256_castsi256_pd(x),
154
+ _mm256_set1_pd(0x0018000000000000)
155
+ );
156
+ }
157
+
158
+ template<>
159
+ Vectorized<float>
160
+ inline convert_to_fp_of_same_size<float>(const Vectorized<int32_t> &src) {
161
+ return _mm256_cvtepi32_ps(src);
162
+ }
163
+
164
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ INTERLEAVE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
165
+
166
+ template <>
167
+ std::pair<Vectorized<double>, Vectorized<double>>
168
+ inline interleave2<double>(const Vectorized<double>& a, const Vectorized<double>& b) {
169
+ // inputs:
170
+ // a = {a0, a1, a3, a3}
171
+ // b = {b0, b1, b2, b3}
172
+
173
+ // swap lanes:
174
+ // a_swapped = {a0, a1, b0, b1}
175
+ // b_swapped = {a2, a3, b2, b3}
176
+ auto a_swapped = _mm256_permute2f128_pd(a, b, 0b0100000); // 0, 2. 4 bits apart
177
+ auto b_swapped = _mm256_permute2f128_pd(a, b, 0b0110001); // 1, 3. 4 bits apart
178
+
179
+ // group cols crossing lanes:
180
+ // return {a0, b0, a1, b1}
181
+ // {a2, b2, a3, b3}
182
+ return std::make_pair(_mm256_permute4x64_pd(a_swapped, 0b11011000), // 0, 2, 1, 3
183
+ _mm256_permute4x64_pd(b_swapped, 0b11011000)); // 0, 2, 1, 3
184
+ }
185
+
186
+ template <>
187
+ std::pair<Vectorized<float>, Vectorized<float>>
188
+ inline interleave2<float>(const Vectorized<float>& a, const Vectorized<float>& b) {
189
+ // inputs:
190
+ // a = {a0, a1, a2, a3, a4, a5, a6, a7}
191
+ // b = {b0, b1, b2, b3, b4, b5, b6, b7}
192
+
193
+ // swap lanes:
194
+ // a_swapped = {a0, a1, a2, a3, b0, b1, b2, b3}
195
+ // b_swapped = {a4, a5, a6, a7, b4, b5, b6, b7}
196
+ // TODO: can we support caching this?
197
+ auto a_swapped = _mm256_permute2f128_ps(a, b, 0b0100000); // 0, 2. 4 bits apart
198
+ auto b_swapped = _mm256_permute2f128_ps(a, b, 0b0110001); // 1, 3. 4 bits apart
199
+
200
+ // group cols crossing lanes:
201
+ // return {a0, b0, a1, b1, a2, b2, a3, b3}
202
+ // {a4, b4, a5, b5, a6, b6, a7, b7}
203
+ const __m256i group_ctrl = _mm256_setr_epi32(0, 4, 1, 5, 2, 6, 3, 7);
204
+ return std::make_pair(_mm256_permutevar8x32_ps(a_swapped, group_ctrl),
205
+ _mm256_permutevar8x32_ps(b_swapped, group_ctrl));
206
+ }
207
+
208
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DEINTERLEAVE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
209
+
210
+ template <>
211
+ std::pair<Vectorized<double>, Vectorized<double>>
212
+ inline deinterleave2<double>(const Vectorized<double>& a, const Vectorized<double>& b) {
213
+ // inputs:
214
+ // a = {a0, b0, a1, b1}
215
+ // b = {a2, b2, a3, b3}
216
+
217
+ // group cols crossing lanes:
218
+ // a_grouped = {a0, a1, b0, b1}
219
+ // b_grouped = {a2, a3, b2, b3}
220
+ auto a_grouped = _mm256_permute4x64_pd(a, 0b11011000); // 0, 2, 1, 3
221
+ auto b_grouped = _mm256_permute4x64_pd(b, 0b11011000); // 0, 2, 1, 3
222
+
223
+ // swap lanes:
224
+ // return {a0, a1, a2, a3}
225
+ // {b0, b1, b2, b3}
226
+ return std::make_pair(_mm256_permute2f128_pd(a_grouped, b_grouped, 0b0100000), // 0, 2. 4 bits apart
227
+ _mm256_permute2f128_pd(a_grouped, b_grouped, 0b0110001)); // 1, 3. 4 bits apart
228
+ }
229
+
230
+ template <>
231
+ std::pair<Vectorized<float>, Vectorized<float>>
232
+ inline deinterleave2<float>(const Vectorized<float>& a, const Vectorized<float>& b) {
233
+ // inputs:
234
+ // a = {a0, b0, a1, b1, a2, b2, a3, b3}
235
+ // b = {a4, b4, a5, b5, a6, b6, a7, b7}
236
+
237
+ // group cols crossing lanes:
238
+ // a_grouped = {a0, a1, a2, a3, b0, b1, b2, b3}
239
+ // b_grouped = {a4, a5, a6, a7, b4, b5, b6, b7}
240
+ // TODO: can we support caching this?
241
+ const __m256i group_ctrl = _mm256_setr_epi32(0, 2, 4, 6, 1, 3, 5, 7);
242
+ auto a_grouped = _mm256_permutevar8x32_ps(a, group_ctrl);
243
+ auto b_grouped = _mm256_permutevar8x32_ps(b, group_ctrl);
244
+
245
+ // swap lanes:
246
+ // return {a0, a1, a2, a3, a4, a5, a6, a7}
247
+ // {b0, b1, b2, b3, b4, b5, b6, b7}
248
+ return std::make_pair(_mm256_permute2f128_ps(a_grouped, b_grouped, 0b0100000), // 0, 2. 4 bits apart
249
+ _mm256_permute2f128_ps(a_grouped, b_grouped, 0b0110001)); // 1, 3. 4 bits apart
250
+ }
251
+
252
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FLIP ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
253
+
254
+ template<>
255
+ inline Vectorized<float> flip(const Vectorized<float> & v) {
256
+ const __m256i mask_float = _mm256_set_epi32(0, 1, 2, 3, 4, 5, 6, 7);
257
+ return _mm256_permutevar8x32_ps(v, mask_float);
258
+ }
259
+
260
+ template<>
261
+ inline Vectorized<double> flip(const Vectorized<double> & v) {
262
+ return _mm256_permute4x64_pd(v, 27); // 27 == _MM_SHUFFLE(0, 1, 2, 3)
263
+ }
264
+
265
+ template<>
266
+ inline Vectorized<int64_t> flip(const Vectorized<int64_t> & v) {
267
+ return _mm256_permute4x64_epi64(v, 27); // 27 == _MM_SHUFFLE(0, 1, 2, 3)
268
+ }
269
+
270
+ template<>
271
+ inline Vectorized<int32_t> flip(const Vectorized<int32_t> & v) {
272
+ const __m256i mask_int32 = _mm256_set_epi32(0, 1, 2, 3, 4, 5, 6, 7);
273
+ return _mm256_permutevar8x32_epi32(v, mask_int32);
274
+ }
275
+
276
+ template<>
277
+ inline Vectorized<int16_t> flip(const Vectorized<int16_t> & v) {
278
+ const __m256i mask = _mm256_set_epi8(
279
+ 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14,
280
+ 1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14
281
+ );
282
+ auto reversed = _mm256_shuffle_epi8(v, mask);
283
+ return _mm256_permute2x128_si256(reversed, reversed, 1);
284
+ }
285
+
286
+ inline __m256i flip8(const __m256i & v) {
287
+ const __m256i mask_int8 = _mm256_set_epi8(
288
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
289
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
290
+ );
291
+ auto reversed = _mm256_shuffle_epi8(v, mask_int8);
292
+ return _mm256_permute2x128_si256(reversed, reversed, 1);
293
+ }
294
+
295
+ template<>
296
+ inline Vectorized<int8_t> flip(const Vectorized<int8_t> & v) {
297
+ return flip8(v);
298
+ }
299
+
300
+ template<>
301
+ inline Vectorized<uint8_t> flip(const Vectorized<uint8_t> & v) {
302
+ return flip8(v);
303
+ }
304
+
305
+ #endif // (defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
306
+
307
+ }} // namepsace at::vec::CPU_CAPABILITY
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_bfloat16.h ADDED
@@ -0,0 +1,1096 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/intrinsics.h>
7
+ #include <ATen/cpu/vec/vec_base.h>
8
+ #include <c10/util/irange.h>
9
+
10
+ #if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
11
+ #include <sleef.h>
12
+ #endif
13
+
14
+ #pragma GCC diagnostic push
15
+ #pragma GCC diagnostic ignored "-Wignored-qualifiers"
16
+
17
+ namespace at::vec {
18
+ // See Note [CPU_CAPABILITY namespace]
19
+ inline namespace CPU_CAPABILITY {
20
+
21
+ #if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
22
+
23
+ // bfloat16 conversion
24
+ static inline void cvtbf16_fp32(const __m128i& a, __m256& o) {
25
+ o = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_cvtepu16_epi32(a), 16));
26
+ }
27
+
28
+ static inline void cvtbf16_fp32(const __m256i& a, __m256& o1, __m256& o2) {
29
+ __m128i lo = _mm256_extractf128_si256(a, 0);
30
+ __m128i hi = _mm256_extractf128_si256(a, 1);
31
+ cvtbf16_fp32(lo, o1);
32
+ cvtbf16_fp32(hi, o2);
33
+ }
34
+ static inline __m256i cvtfp32_bf16(const __m256& a, const __m256& b) {
35
+ __m256i lo = _mm256_castps_si256(a);
36
+ __m256i hi = _mm256_castps_si256(b);
37
+ __m256i nan = _mm256_set1_epi32(0xffff);
38
+ __m256i mask_lo = _mm256_castps_si256(_mm256_cmp_ps(a, a, _CMP_ORD_Q));
39
+ __m256i mask_hi = _mm256_castps_si256(_mm256_cmp_ps(b, b, _CMP_ORD_Q));
40
+ __m256i ones = _mm256_set1_epi32(0x1);
41
+ __m256i vec_bias = _mm256_set1_epi32(0x7fff);
42
+ // uint32_t lsb = (input >> 16) & 1;
43
+ auto t_lo = _mm256_and_si256(_mm256_srli_epi32(lo, 16), ones);
44
+ auto t_hi = _mm256_and_si256(_mm256_srli_epi32(hi, 16), ones);
45
+ // uint32_t rounding_bias = 0x7fff + lsb;
46
+ t_lo = _mm256_add_epi32(t_lo, vec_bias);
47
+ t_hi = _mm256_add_epi32(t_hi, vec_bias);
48
+ // input += rounding_bias;
49
+ t_lo = _mm256_add_epi32(t_lo, lo);
50
+ t_hi = _mm256_add_epi32(t_hi, hi);
51
+ // input = input >> 16;
52
+ t_lo = _mm256_srli_epi32(t_lo, 16);
53
+ t_hi = _mm256_srli_epi32(t_hi, 16);
54
+ // Check NaN before converting back to bf16
55
+ t_lo = _mm256_blendv_epi8(nan, t_lo, mask_lo);
56
+ t_hi = _mm256_blendv_epi8(nan, t_hi, mask_hi);
57
+
58
+ t_lo = _mm256_packus_epi32(t_lo, t_hi); // t_hi[4-7] t_lo[4-7] t_hi[0-4] t_lo[0-4]
59
+ return _mm256_permute4x64_epi64(t_lo, 0xd8); // 11 01 10 00
60
+ }
61
+
62
+ static inline __m256i merge_compare_result(const __m256& a, const __m256& b) {
63
+ __m256i lo = _mm256_castps_si256(a);
64
+ __m256i hi = _mm256_castps_si256(b);
65
+ lo = _mm256_srli_epi32(lo, 16);
66
+ hi = _mm256_srli_epi32(hi, 16);
67
+ auto out = _mm256_packus_epi32(lo, hi);
68
+ return _mm256_permute4x64_epi64(out, 0xd8);
69
+ }
70
+
71
+ // float16 conversion
72
+ static inline void cvtfp16_fp32(const __m128i& a, __m256& o) {
73
+ o = _mm256_cvtph_ps(a);
74
+ }
75
+
76
+ static inline void cvtfp16_fp32(const __m256i& a, __m256& o1, __m256& o2) {
77
+ __m128i lo = _mm256_extractf128_si256(a, 0);
78
+ __m128i hi = _mm256_extractf128_si256(a, 1);
79
+ cvtfp16_fp32(lo, o1);
80
+ cvtfp16_fp32(hi, o2);
81
+ }
82
+
83
+ static inline __m256i cvtfp32_fp16(const __m256& a, const __m256& b) {
84
+ __m128i lo = _mm256_cvtps_ph(
85
+ a, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
86
+ __m128i hi = _mm256_cvtps_ph(
87
+ b, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
88
+ return _mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1);
89
+ }
90
+
91
+ // dtype conversion between float16/bfloat16 and float32
92
+ template <typename T, typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
93
+ inline void cvt_to_fp32(const __m128i& a, __m256& o);
94
+ template <> inline void cvt_to_fp32<BFloat16>(const __m128i& a, __m256& o) {
95
+ cvtbf16_fp32(a, o);
96
+ };
97
+ template <> inline void cvt_to_fp32<Half>(const __m128i& a, __m256& o) {
98
+ cvtfp16_fp32(a, o);
99
+ }
100
+
101
+ template <typename T, typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
102
+ inline void cvt_to_fp32(const __m256i& a, __m256& o1, __m256& o2);
103
+ template <> inline void cvt_to_fp32<BFloat16>(const __m256i& a, __m256& o1, __m256& o2) {
104
+ cvtbf16_fp32(a, o1, o2);
105
+ }
106
+ template <> inline void cvt_to_fp32<Half>(const __m256i& a, __m256& o1, __m256& o2) {
107
+ cvtfp16_fp32(a, o1, o2);
108
+ }
109
+
110
+ template <typename T, bool is_compare_op = false,
111
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
112
+ inline __m256i cvt_from_fp32(const __m256& a, const __m256& b);
113
+ template <> inline __m256i cvt_from_fp32<BFloat16, false>(const __m256& a, const __m256& b) {
114
+ return cvtfp32_bf16(a, b);
115
+ }
116
+ template <> inline __m256i cvt_from_fp32<BFloat16, true>(const __m256& a, const __m256& b) {
117
+ return merge_compare_result(a, b);
118
+ }
119
+ template <> inline __m256i cvt_from_fp32<Half, false>(const __m256& a, const __m256& b) {
120
+ return cvtfp32_fp16(a, b);
121
+ }
122
+ template <> inline __m256i cvt_from_fp32<Half, true>(const __m256& a, const __m256& b) {
123
+ return cvtfp32_fp16(a, b);
124
+ }
125
+
126
+ template <typename T>
127
+ class Vectorized16 {
128
+ static_assert(
129
+ is_reduced_floating_point_v<T>,
130
+ "Support only float16 and bfloat16.");
131
+ protected:
132
+ __m256i values;
133
+ public:
134
+ using value_type = uint16_t;
135
+ using size_type = int;
136
+ static constexpr size_type size() {
137
+ return 16;
138
+ }
139
+ Vectorized16() {}
140
+ Vectorized16(__m256i v) : values(v) {}
141
+ Vectorized16(T val) {
142
+ value_type uw = val.x;
143
+ values = _mm256_set1_epi16(uw);
144
+ }
145
+ Vectorized16(T val1, T val2, T val3, T val4,
146
+ T val5, T val6, T val7, T val8,
147
+ T val9, T val10, T val11, T val12,
148
+ T val13, T val14, T val15, T val16) {
149
+ values = _mm256_setr_epi16(
150
+ val1.x, val2.x, val3.x, val4.x, val5.x, val6.x, val7.x, val8.x,
151
+ val9.x, val10.x, val11.x, val12.x, val13.x, val14.x, val15.x, val16.x);
152
+ }
153
+ operator __m256i() const {
154
+ return values;
155
+ }
156
+ T& operator[](int idx) = delete;
157
+ const T& operator[](int idx) const = delete;
158
+ int zero_mask() const {
159
+ // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit
160
+ __m256i cmp = _mm256_cmpeq_epi16(values, _mm256_set1_epi16(0));
161
+ return _mm256_movemask_epi8(cmp);
162
+ }
163
+ static Vectorized<T> loadu(const void* ptr, int16_t count = size()) {
164
+ if (count == size())
165
+ return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(ptr));
166
+
167
+ __at_align__ int16_t tmp_values[size()];
168
+ std::memcpy(tmp_values, ptr, count * sizeof(int16_t));
169
+ return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(tmp_values));
170
+ }
171
+ void store(void* ptr, int count = size()) const {
172
+ if (count == size()) {
173
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(ptr), values);
174
+ } else if (count > 0) {
175
+ __at_align__ int16_t tmp_values[size()];
176
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values);
177
+ std::memcpy(ptr, tmp_values, count * sizeof(int16_t));
178
+ }
179
+ }
180
+ template <int64_t mask>
181
+ static Vectorized<T> blend(const Vectorized<T>& a, const Vectorized<T>& b) {
182
+ __at_align__ int16_t tmp_values[size()];
183
+ a.store(tmp_values);
184
+ if (mask & 0x01)
185
+ tmp_values[0] = _mm256_extract_epi16(b.values, 0);
186
+ if (mask & 0x02)
187
+ tmp_values[1] = _mm256_extract_epi16(b.values, 1);
188
+ if (mask & 0x04)
189
+ tmp_values[2] = _mm256_extract_epi16(b.values, 2);
190
+ if (mask & 0x08)
191
+ tmp_values[3] = _mm256_extract_epi16(b.values, 3);
192
+ if (mask & 0x10)
193
+ tmp_values[4] = _mm256_extract_epi16(b.values, 4);
194
+ if (mask & 0x20)
195
+ tmp_values[5] = _mm256_extract_epi16(b.values, 5);
196
+ if (mask & 0x40)
197
+ tmp_values[6] = _mm256_extract_epi16(b.values, 6);
198
+ if (mask & 0x80)
199
+ tmp_values[7] = _mm256_extract_epi16(b.values, 7);
200
+ if (mask & 0x100)
201
+ tmp_values[8] = _mm256_extract_epi16(b.values, 8);
202
+ if (mask & 0x200)
203
+ tmp_values[9] = _mm256_extract_epi16(b.values, 9);
204
+ if (mask & 0x400)
205
+ tmp_values[10] = _mm256_extract_epi16(b.values, 10);
206
+ if (mask & 0x800)
207
+ tmp_values[11] = _mm256_extract_epi16(b.values, 11);
208
+ if (mask & 0x1000)
209
+ tmp_values[12] = _mm256_extract_epi16(b.values, 12);
210
+ if (mask & 0x2000)
211
+ tmp_values[13] = _mm256_extract_epi16(b.values, 13);
212
+ if (mask & 0x4000)
213
+ tmp_values[14] = _mm256_extract_epi16(b.values, 14);
214
+ if (mask & 0x8000)
215
+ tmp_values[15] = _mm256_extract_epi16(b.values, 15);
216
+ return loadu(tmp_values);
217
+ }
218
+ static Vectorized<T> blendv(const Vectorized<T>& a,
219
+ const Vectorized<T>& b, const Vectorized<T>& mask) {
220
+ return _mm256_blendv_epi8(a.values, b.values, mask.values);
221
+ }
222
+ template<typename step_t>
223
+ static Vectorized<T> arange(T base = 0.f, step_t step = static_cast<step_t>(1)) {
224
+ return Vectorized<T>(
225
+ base, base + step, base + 2 * step, base + 3 * step,
226
+ base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step,
227
+ base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step,
228
+ base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step);
229
+ }
230
+ static Vectorized<T> set(const Vectorized<T>& a,
231
+ const Vectorized<T>& b, int64_t count = size()) {
232
+ switch (count) {
233
+ case 0:
234
+ return a;
235
+ case 1:
236
+ return blend<1>(a, b);
237
+ case 2:
238
+ return blend<3>(a, b);
239
+ case 3:
240
+ return blend<7>(a, b);
241
+ case 4:
242
+ return blend<15>(a, b);
243
+ case 5:
244
+ return blend<31>(a, b);
245
+ case 6:
246
+ return blend<63>(a, b);
247
+ case 7:
248
+ return blend<127>(a, b);
249
+ case 8:
250
+ return blend<255>(a, b);
251
+ case 9:
252
+ return blend<511>(a, b);
253
+ case 10:
254
+ return blend<1023>(a, b);
255
+ case 11:
256
+ return blend<2047>(a, b);
257
+ case 12:
258
+ return blend<4095>(a, b);
259
+ case 13:
260
+ return blend<8191>(a, b);
261
+ case 14:
262
+ return blend<16383>(a, b);
263
+ case 15:
264
+ return blend<32767>(a, b);
265
+ }
266
+ return b;
267
+ }
268
+ Vectorized<T> map(const __m256 (*const vop)(__m256)) const {
269
+ __m256 lo, hi;
270
+ cvt_to_fp32<T>(values, lo, hi);
271
+ const auto o1 = vop(lo);
272
+ const auto o2 = vop(hi);
273
+ return cvt_from_fp32<T>(o1, o2);
274
+ }
275
+ Vectorized<T> isnan() const {
276
+ __m256 lo, hi;
277
+ cvt_to_fp32<T>(values, lo, hi);
278
+ lo = _mm256_cmp_ps(lo, _mm256_set1_ps(0.0f), _CMP_UNORD_Q);
279
+ hi = _mm256_cmp_ps(hi, _mm256_set1_ps(0.0f), _CMP_UNORD_Q);
280
+ return merge_compare_result(lo, hi);
281
+ }
282
+ Vectorized<T> abs() const {
283
+ return _mm256_andnot_si256(_mm256_set1_epi16(0x8000), values);
284
+ }
285
+ Vectorized<T> angle() const {
286
+ __m256 lo, hi;
287
+ cvt_to_fp32<T>(values, lo, hi);
288
+ auto angle_lambda = [](__m256 values_2) {
289
+ const auto zero_vec = _mm256_set1_ps(0.f);
290
+ const auto nan_vec = _mm256_set1_ps(NAN);
291
+ const auto not_nan_mask = _mm256_cmp_ps(values_2, values_2, _CMP_EQ_OQ);
292
+ const auto nan_mask = _mm256_cmp_ps(not_nan_mask, zero_vec, _CMP_EQ_OQ);
293
+ const auto pi = _mm256_set1_ps(c10::pi<float>);
294
+
295
+ const auto neg_mask = _mm256_cmp_ps(values_2, zero_vec, _CMP_LT_OQ);
296
+ auto angle = _mm256_blendv_ps(zero_vec, pi, neg_mask);
297
+ angle = _mm256_blendv_ps(angle, nan_vec, nan_mask);
298
+ return angle;
299
+ };
300
+ auto o1 = angle_lambda(lo);
301
+ auto o2 = angle_lambda(hi);
302
+ return cvt_from_fp32<T>(o1, o2);
303
+ }
304
+ Vectorized<T> real() const {
305
+ return *this;
306
+ }
307
+ Vectorized<T> imag() const {
308
+ return _mm256_set1_epi16(0);
309
+ }
310
+ Vectorized<T> conj() const {
311
+ return *this;
312
+ }
313
+ Vectorized<T> acos() const {
314
+ return map(Sleef_acosf8_u10);
315
+ }
316
+ Vectorized<T> acosh() const {
317
+ return map(Sleef_acoshf8_u10);
318
+ }
319
+ Vectorized<T> asin() const {
320
+ return map(Sleef_asinf8_u10);
321
+ }
322
+ Vectorized<T> atan() const {
323
+ return map(Sleef_atanf8_u10);
324
+ }
325
+ Vectorized<T> atanh() const {
326
+ return map(Sleef_atanhf8_u10);
327
+ }
328
+ Vectorized<T> atan2(const Vectorized<T> &b) const {
329
+ __m256 lo, hi;
330
+ __m256 b1, b2;
331
+ cvt_to_fp32<T>(values, lo, hi);
332
+ cvt_to_fp32<T>(b.values, b1, b2);
333
+ auto o1 = Sleef_atan2f8_u10(lo, b1);
334
+ auto o2 = Sleef_atan2f8_u10(hi, b2);
335
+ return cvt_from_fp32<T>(o1, o2);
336
+ }
337
+ Vectorized<T> copysign(const Vectorized<T> &sign) const {
338
+ // copy sign bit (0x8000) from sign and remaining bits from values
339
+ __m256i mask_value = _mm256_set1_epi32(~0x80008000);
340
+ __m256i mask_signbit = _mm256_set1_epi32(0x80008000);
341
+ return Vectorized<T>(
342
+ _mm256_or_si256(
343
+ _mm256_and_si256(values, mask_value),
344
+ _mm256_and_si256(sign, mask_signbit)));
345
+ }
346
+ Vectorized<T> erf() const {
347
+ return map(Sleef_erff8_u10);
348
+ }
349
+ Vectorized<T> erfc() const {
350
+ return map(Sleef_erfcf8_u15);
351
+ }
352
+ Vectorized<T> erfinv() const {
353
+ __m256 lo, hi;
354
+ cvt_to_fp32<T>(values, lo, hi);
355
+ __at_align__ float tmp1[size() / 2], tmp2[size() / 2];
356
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
357
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
358
+ for (int64_t i = 0; i < size() / 2; i++) {
359
+ tmp1[i] = calc_erfinv(tmp1[i]);
360
+ tmp2[i] = calc_erfinv(tmp2[i]);
361
+ }
362
+ auto o1 = _mm256_loadu_ps(tmp1);
363
+ auto o2 = _mm256_loadu_ps(tmp2);
364
+ return cvt_from_fp32<T>(o1, o2);
365
+ }
366
+ Vectorized<T> exp() const {
367
+ return map(Sleef_expf8_u10);
368
+ }
369
+ Vectorized<T> exp2() const {
370
+ return map(Sleef_exp2f8_u10);
371
+ }
372
+ Vectorized<T> expm1() const {
373
+ return map(Sleef_expm1f8_u10);
374
+ }
375
+ Vectorized<T> exp_u20() const {
376
+ return exp();
377
+ }
378
+ Vectorized<T> fmod(const Vectorized<T> & q) const {
379
+ __m256 x_lo, x_hi;
380
+ cvt_to_fp32<T>(values, x_lo, x_hi);
381
+ __m256 q_lo, q_hi;
382
+ cvt_to_fp32<T>(q.values, q_lo, q_hi);
383
+ auto o1 = Sleef_fmodf8(x_lo, q_lo);
384
+ auto o2 = Sleef_fmodf8(x_hi, q_hi);
385
+ return cvt_from_fp32<T>(o1, o2);
386
+ }
387
+ Vectorized<T> hypot(const Vectorized<T> &b) const {
388
+ __m256 lo, hi;
389
+ __m256 b1, b2;
390
+ cvt_to_fp32<T>(values, lo, hi);
391
+ cvt_to_fp32<T>(b.values, b1, b2);
392
+ auto o1 = Sleef_hypotf8_u05(lo, b1);
393
+ auto o2 = Sleef_hypotf8_u05(hi, b2);
394
+ return cvt_from_fp32<T>(o1, o2);
395
+ }
396
+ Vectorized<T> i0() const {
397
+ __m256 lo, hi;
398
+ cvt_to_fp32<T>(values, lo, hi);
399
+ __at_align__ float tmp1[size() / 2], tmp2[size() / 2];
400
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
401
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
402
+ for (int64_t i = 0; i < size() / 2; i++) {
403
+ tmp1[i] = calc_i0(tmp1[i]);
404
+ tmp2[i] = calc_i0(tmp2[i]);
405
+ }
406
+ auto o1 = _mm256_loadu_ps(tmp1);
407
+ auto o2 = _mm256_loadu_ps(tmp2);
408
+ return cvt_from_fp32<T>(o1, o2);
409
+ }
410
+ Vectorized<T> i0e() const {
411
+ __m256 lo, hi;
412
+ cvt_to_fp32<T>(values, lo, hi);
413
+ constexpr auto sz = size();
414
+ __at_align__ float tmp1[sz / 2], tmp2[sz / 2];
415
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
416
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
417
+
418
+ for (auto i = decltype(sz){0}; i < sz / 2; i++) {
419
+ tmp1[i] = calc_i0e(tmp1[i]);
420
+ tmp2[i] = calc_i0e(tmp2[i]);
421
+ }
422
+ const auto o1 = _mm256_loadu_ps(tmp1);
423
+ const auto o2 = _mm256_loadu_ps(tmp2);
424
+ return cvt_from_fp32<T>(o1, o2);
425
+ }
426
+ Vectorized<T> digamma() const {
427
+ __m256 lo, hi;
428
+ cvt_to_fp32<T>(values, lo, hi);
429
+ constexpr auto sz = size();
430
+ __at_align__ float tmp1[sz / 2], tmp2[sz / 2];
431
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
432
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
433
+
434
+ for (auto i = decltype(sz){0}; i < sz / 2; i++) {
435
+ tmp1[i] = calc_digamma(tmp1[i]);
436
+ tmp2[i] = calc_digamma(tmp2[i]);
437
+ }
438
+ const auto o1 = _mm256_loadu_ps(tmp1);
439
+ const auto o2 = _mm256_loadu_ps(tmp2);
440
+ return cvt_from_fp32<T>(o1, o2);
441
+ }
442
+ Vectorized<T> igamma(const Vectorized<T> &x) const {
443
+ __m256 lo, hi;
444
+ __m256 xlo, xhi;
445
+ cvt_to_fp32<T>(values, lo, hi);
446
+ cvt_to_fp32<T>(x.values, xlo, xhi);
447
+ __at_align__ float tmp1[size() / 2], tmp2[size() / 2];
448
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
449
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
450
+ __at_align__ float tmpx1[size() / 2], tmpx2[size() / 2];
451
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmpx1), xlo);
452
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmpx2), xhi);
453
+ for (int64_t i = 0; i < size() / 2; ++i) {
454
+ tmp1[i] = calc_igamma(tmp1[i], tmpx1[i]);
455
+ tmp2[i] = calc_igamma(tmp2[i], tmpx2[i]);
456
+ }
457
+ auto o1 = _mm256_loadu_ps(tmp1);
458
+ auto o2 = _mm256_loadu_ps(tmp2);
459
+ return cvt_from_fp32<T>(o1, o2);
460
+ }
461
+
462
+ Vectorized<T> igammac(const Vectorized<T> &x) const {
463
+ __m256 lo, hi;
464
+ __m256 xlo, xhi;
465
+ cvt_to_fp32<T>(values, lo, hi);
466
+ cvt_to_fp32<T>(x.values, xlo, xhi);
467
+ __at_align__ float tmp1[size() / 2], tmp2[size() / 2];
468
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
469
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
470
+ __at_align__ float tmpx1[size() / 2], tmpx2[size() / 2];
471
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmpx1), xlo);
472
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmpx2), xhi);
473
+ for (int64_t i = 0; i < size() / 2; ++i) {
474
+ tmp1[i] = calc_igammac(tmp1[i], tmpx1[i]);
475
+ tmp2[i] = calc_igammac(tmp2[i], tmpx2[i]);
476
+ }
477
+ auto o1 = _mm256_loadu_ps(tmp1);
478
+ auto o2 = _mm256_loadu_ps(tmp2);
479
+ return cvt_from_fp32<T>(o1, o2);
480
+ }
481
+ Vectorized<T> log() const {
482
+ return map(Sleef_logf8_u10);
483
+ }
484
+ Vectorized<T> log2() const {
485
+ return map(Sleef_log2f8_u10);
486
+ }
487
+ Vectorized<T> log10() const {
488
+ return map(Sleef_log10f8_u10);
489
+ }
490
+ Vectorized<T> log1p() const {
491
+ return map(Sleef_log1pf8_u10);
492
+ }
493
+ Vectorized<T> sin() const {
494
+ return map(Sleef_sinf8_u10);
495
+ }
496
+ Vectorized<T> sinh() const {
497
+ return map(Sleef_sinhf8_u10);
498
+ }
499
+ Vectorized<T> cos() const {
500
+ return map(Sleef_cosf8_u10);
501
+ }
502
+ Vectorized<T> cosh() const {
503
+ return map(Sleef_coshf8_u10);
504
+ }
505
+ Vectorized<T> ceil() const {
506
+ __m256 lo, hi;
507
+ cvt_to_fp32<T>(values, lo, hi);
508
+ auto o1 = _mm256_ceil_ps(lo);
509
+ auto o2 = _mm256_ceil_ps(hi);
510
+ return cvt_from_fp32<T>(o1, o2);
511
+ }
512
+ Vectorized<T> floor() const {
513
+ __m256 lo, hi;
514
+ cvt_to_fp32<T>(values, lo, hi);
515
+ auto o1 = _mm256_floor_ps(lo);
516
+ auto o2 = _mm256_floor_ps(hi);
517
+ return cvt_from_fp32<T>(o1, o2);
518
+ }
519
+ Vectorized<T> neg() const {
520
+ return _mm256_xor_si256(values, _mm256_set1_epi16(0x8000));
521
+ }
522
+ Vectorized<T> round() const {
523
+ __m256 lo, hi;
524
+ cvt_to_fp32<T>(values, lo, hi);
525
+ auto o1 = _mm256_round_ps(lo, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
526
+ auto o2 = _mm256_round_ps(hi, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
527
+ return cvt_from_fp32<T>(o1, o2);
528
+ }
529
+ Vectorized<T> tan() const {
530
+ return map(Sleef_tanf8_u10);
531
+ }
532
+ Vectorized<T> tanh() const {
533
+ return map(Sleef_tanhf8_u10);
534
+ }
535
+ Vectorized<T> trunc() const {
536
+ __m256 lo, hi;
537
+ cvt_to_fp32<T>(values, lo, hi);
538
+ auto o1 = _mm256_round_ps(lo, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
539
+ auto o2 = _mm256_round_ps(hi, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
540
+ return cvt_from_fp32<T>(o1, o2);
541
+ }
542
+ Vectorized<T> lgamma() const {
543
+ return map(Sleef_lgammaf8_u10);
544
+ }
545
+ Vectorized<T> sqrt() const {
546
+ __m256 lo, hi;
547
+ cvt_to_fp32<T>(values, lo, hi);
548
+ auto o1 = _mm256_sqrt_ps(lo);
549
+ auto o2 = _mm256_sqrt_ps(hi);
550
+ return cvt_from_fp32<T>(o1, o2);
551
+ }
552
+ Vectorized<T> reciprocal() const {
553
+ __m256 lo, hi;
554
+ cvt_to_fp32<T>(values, lo, hi);
555
+ auto ones = _mm256_set1_ps(1);
556
+ auto o1 = _mm256_div_ps(ones, lo);
557
+ auto o2 = _mm256_div_ps(ones, hi);
558
+ return cvt_from_fp32<T>(o1, o2);
559
+ }
560
+ Vectorized<T> rsqrt() const {
561
+ __m256 lo, hi;
562
+ cvt_to_fp32<T>(values, lo, hi);
563
+ auto ones = _mm256_set1_ps(1);
564
+ auto o1 = _mm256_div_ps(ones, _mm256_sqrt_ps(lo));
565
+ auto o2 = _mm256_div_ps(ones, _mm256_sqrt_ps(hi));
566
+ return cvt_from_fp32<T>(o1, o2);
567
+ }
568
+ Vectorized<T> pow(const Vectorized<T> &b) const {
569
+ __m256 lo, hi;
570
+ __m256 b1, b2;
571
+ cvt_to_fp32<T>(values, lo, hi);
572
+ cvt_to_fp32<T>(b.values, b1, b2);
573
+ auto o1 = Sleef_powf8_u10(lo, b1);
574
+ auto o2 = Sleef_powf8_u10(hi, b2);
575
+ return cvt_from_fp32<T>(o1, o2);
576
+ }
577
+ private:
578
+ template<typename Op>
579
+ Vectorized<T> inline binary_compare(const Vectorized<T>& b, Op op) const {
580
+ __m256 a_lo, a_hi;
581
+ __m256 b_lo, b_hi;
582
+ cvt_to_fp32<T>(values, a_lo, a_hi);
583
+ cvt_to_fp32<T>(b.values, b_lo, b_hi);
584
+ auto o1 = op(a_lo, b_lo);
585
+ auto o2 = op(a_hi, b_hi);
586
+ return cvt_from_fp32<T, /*is_compare_op*/true>(o1, o2);
587
+ }
588
+
589
+ public:
590
+ Vectorized<T> inline operator>(const Vectorized<T>& other) const {
591
+ return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_GT_OQ); });
592
+ }
593
+ Vectorized<T> inline operator<(const Vectorized<T>& other) const {
594
+ return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_LT_OQ); });
595
+ }
596
+ Vectorized<T> inline operator>=(const Vectorized<T>& other) const {
597
+ return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_GE_OQ); });
598
+ }
599
+ Vectorized<T> inline operator<=(const Vectorized<T>& other) const {
600
+ return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_LE_OQ); });
601
+ }
602
+ Vectorized<T> inline operator==(const Vectorized<T>& other) const {
603
+ return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_EQ_OQ); });
604
+ }
605
+ Vectorized<T> inline operator!=(const Vectorized<T>& other) const {
606
+ return binary_compare(other, [](__m256 x, __m256 y) { return _mm256_cmp_ps(x, y, _CMP_NEQ_UQ); });
607
+ }
608
+ };
609
+
610
+ template<typename T, typename Op>
611
+ static inline Vectorized<T> binary_op_as_fp32(const Vectorized<T>& a, const Vectorized<T>& b, Op op) {
612
+ __m256 a_lo, a_hi;
613
+ __m256 b_lo, b_hi;
614
+ cvt_to_fp32<T>(__m256i(a), a_lo, a_hi);
615
+ cvt_to_fp32<T>(__m256i(b), b_lo, b_hi);
616
+ auto o1 = op(a_lo, b_lo);
617
+ auto o2 = op(a_hi, b_hi);
618
+ return cvt_from_fp32<T>(o1, o2);
619
+ }
620
+
621
+ template <>
622
+ class Vectorized<BFloat16>: public Vectorized16<BFloat16> {
623
+ public:
624
+ using Vectorized16::Vectorized16;
625
+
626
+ Vectorized<BFloat16> frac() const;
627
+
628
+ Vectorized<BFloat16> eq(const Vectorized<BFloat16>& other) const;
629
+ Vectorized<BFloat16> ne(const Vectorized<BFloat16>& other) const;
630
+ Vectorized<BFloat16> gt(const Vectorized<BFloat16>& other) const;
631
+ Vectorized<BFloat16> ge(const Vectorized<BFloat16>& other) const;
632
+ Vectorized<BFloat16> lt(const Vectorized<BFloat16>& other) const;
633
+ Vectorized<BFloat16> le(const Vectorized<BFloat16>& other) const;
634
+ };
635
+
636
+ Vectorized<BFloat16> inline operator+(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
637
+ return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_add_ps(x, y); });
638
+ }
639
+ Vectorized<BFloat16> inline operator-(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
640
+ return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_sub_ps(x, y); });
641
+ }
642
+ Vectorized<BFloat16> inline operator*(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
643
+ return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_mul_ps(x, y); });
644
+ }
645
+ Vectorized<BFloat16> inline operator/(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
646
+ return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_div_ps(x, y); });
647
+ }
648
+ Vectorized<BFloat16> inline operator&(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
649
+ return _mm256_and_si256(a, b);
650
+ }
651
+ Vectorized<BFloat16> inline operator|(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
652
+ return _mm256_or_si256(a, b);
653
+ }
654
+ Vectorized<BFloat16> inline operator^(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
655
+ return _mm256_xor_si256(a, b);
656
+ }
657
+
658
+ inline Vectorized<BFloat16> Vectorized<BFloat16>::eq(const Vectorized<BFloat16>& other) const {
659
+ return (*this == other) & Vectorized<BFloat16>(1.0f);
660
+ }
661
+ inline Vectorized<BFloat16> Vectorized<BFloat16>::ne(const Vectorized<BFloat16>& other) const {
662
+ return (*this != other) & Vectorized<BFloat16>(1.0f);
663
+ }
664
+ inline Vectorized<BFloat16> Vectorized<BFloat16>::gt(const Vectorized<BFloat16>& other) const {
665
+ return (*this > other) & Vectorized<BFloat16>(1.0f);
666
+ }
667
+ inline Vectorized<BFloat16> Vectorized<BFloat16>::ge(const Vectorized<BFloat16>& other) const {
668
+ return (*this >= other) & Vectorized<BFloat16>(1.0f);
669
+ }
670
+ inline Vectorized<BFloat16> Vectorized<BFloat16>::lt(const Vectorized<BFloat16>& other) const {
671
+ return (*this < other) & Vectorized<BFloat16>(1.0f);
672
+ }
673
+ inline Vectorized<BFloat16> Vectorized<BFloat16>::le(const Vectorized<BFloat16>& other) const {
674
+ return (*this <= other) & Vectorized<BFloat16>(1.0f);
675
+ }
676
+
677
+ // frac. Implement this here so we can use subtraction
678
+ inline Vectorized<BFloat16> Vectorized<BFloat16>::frac() const {
679
+ return *this - this->trunc();
680
+ }
681
+
682
+ // Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
683
+ // either input is a NaN.
684
+ template <>
685
+ Vectorized<BFloat16> inline maximum(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
686
+ __m256 a_lo, a_hi;
687
+ __m256 b_lo, b_hi;
688
+ cvtbf16_fp32(__m256i(a), a_lo, a_hi);
689
+ cvtbf16_fp32(__m256i(b), b_lo, b_hi);
690
+ auto max_lo = _mm256_max_ps(a_lo, b_lo);
691
+ auto max_hi = _mm256_max_ps(a_hi, b_hi);
692
+ auto nan_lo = _mm256_cmp_ps(a_lo, b_lo, _CMP_UNORD_Q);
693
+ auto nan_hi = _mm256_cmp_ps(a_hi, b_hi, _CMP_UNORD_Q);
694
+ // Exploit the fact that all-ones is a NaN.
695
+ auto o1 = _mm256_or_ps(max_lo, nan_lo);
696
+ auto o2 = _mm256_or_ps(max_hi, nan_hi);
697
+ return cvtfp32_bf16(o1, o2);
698
+ }
699
+
700
+ // Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
701
+ // either input is a NaN.
702
+ template <>
703
+ Vectorized<BFloat16> inline minimum(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
704
+ __m256 a_lo, a_hi;
705
+ __m256 b_lo, b_hi;
706
+ cvtbf16_fp32(__m256i(a), a_lo, a_hi);
707
+ cvtbf16_fp32(__m256i(b), b_lo, b_hi);
708
+ auto min_lo = _mm256_min_ps(a_lo, b_lo);
709
+ auto min_hi = _mm256_min_ps(a_hi, b_hi);
710
+ auto nan_lo = _mm256_cmp_ps(a_lo, b_lo, _CMP_UNORD_Q);
711
+ auto nan_hi = _mm256_cmp_ps(a_hi, b_hi, _CMP_UNORD_Q);
712
+ // Exploit the fact that all-ones is a NaN.
713
+ auto o1 = _mm256_or_ps(min_lo, nan_lo);
714
+ auto o2 = _mm256_or_ps(min_hi, nan_hi);
715
+ return cvtfp32_bf16(o1, o2);
716
+ }
717
+
718
+ template <>
719
+ Vectorized<BFloat16> inline clamp(const Vectorized<BFloat16>& a,
720
+ const Vectorized<BFloat16>& min, const Vectorized<BFloat16>& max) {
721
+ __m256 a_lo, a_hi;
722
+ __m256 min_lo, min_hi;
723
+ __m256 max_lo, max_hi;
724
+ cvtbf16_fp32(__m256i(a), a_lo, a_hi);
725
+ cvtbf16_fp32(__m256i(min), min_lo, min_hi);
726
+ cvtbf16_fp32(__m256i(max), max_lo, max_hi);
727
+ auto o1 = _mm256_min_ps(max_lo, _mm256_max_ps(min_lo, a_lo));
728
+ auto o2 = _mm256_min_ps(max_hi, _mm256_max_ps(min_hi, a_hi));
729
+ return cvtfp32_bf16(o1, o2);
730
+ }
731
+
732
+ template <>
733
+ Vectorized<BFloat16> inline clamp_max(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& max) {
734
+ __m256 a_lo, a_hi;
735
+ __m256 max_lo, max_hi;
736
+ cvtbf16_fp32(__m256i(a), a_lo, a_hi);
737
+ cvtbf16_fp32(__m256i(max), max_lo, max_hi);
738
+ auto o1 = _mm256_min_ps(max_lo, a_lo);
739
+ auto o2 = _mm256_min_ps(max_hi, a_hi);
740
+ return cvtfp32_bf16(o1, o2);
741
+ }
742
+
743
+ template <>
744
+ Vectorized<BFloat16> inline clamp_min(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& min) {
745
+ __m256 a_lo, a_hi;
746
+ __m256 min_lo, min_hi;
747
+ cvtbf16_fp32(__m256i(a), a_lo, a_hi);
748
+ cvtbf16_fp32(__m256i(min), min_lo, min_hi);
749
+ auto o1 = _mm256_max_ps(min_lo, a_lo);
750
+ auto o2 = _mm256_max_ps(min_hi, a_hi);
751
+ return cvtfp32_bf16(o1, o2);
752
+ }
753
+
754
+ template <>
755
+ inline void convert(const BFloat16* src, BFloat16* dst, int64_t n) {
756
+ int64_t i;
757
+ #pragma unroll
758
+ for (i = 0; i <= (n - Vectorized<BFloat16>::size()); i += Vectorized<BFloat16>::size()) {
759
+ auto vsrc = _mm256_loadu_si256(reinterpret_cast<__m256i*>((void*)(src + i)));
760
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>((void*)(dst + i)), vsrc);
761
+ }
762
+ #pragma unroll
763
+ for (; i < n; i++) {
764
+ dst[i] = src[i];
765
+ }
766
+ }
767
+
768
+ template <>
769
+ inline void convert(const float* src, BFloat16* dst, int64_t n) {
770
+ int64_t i;
771
+ for (i = 0; i + Vectorized<BFloat16>::size() <= n; i += Vectorized<BFloat16>::size()) {
772
+ __m256 a = _mm256_loadu_ps(&src[i]);
773
+ __m256 b = _mm256_loadu_ps(&src[i + 8]);
774
+
775
+ __m256i bf = cvtfp32_bf16(a, b);
776
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(&dst[i]), bf);
777
+ }
778
+ for (; i < n; i++) {
779
+ dst[i] = c10::convert<BFloat16>(src[i]);
780
+ }
781
+ }
782
+
783
+ template <>
784
+ inline void convert(const double* src, BFloat16* dst, int64_t n) {
785
+ auto load_float = [](const double *src) -> __m256 {
786
+ // Load one float vector from an array of doubles
787
+ __m128 a = _mm256_cvtpd_ps(_mm256_loadu_pd(src));
788
+ __m128 b = _mm256_cvtpd_ps(_mm256_loadu_pd(src + 4));
789
+ return _mm256_insertf128_ps(_mm256_castps128_ps256(a), b, 1);
790
+ };
791
+
792
+ int64_t i;
793
+ for (i = 0; i + Vectorized<BFloat16>::size() <= n; i += Vectorized<BFloat16>::size()) {
794
+ __m256 a = load_float(&src[i]);
795
+ __m256 b = load_float(&src[i + 8]);
796
+
797
+ __m256i bf = cvtfp32_bf16(a, b);
798
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(&dst[i]), bf);
799
+ }
800
+ for (; i < n; i++) {
801
+ dst[i] = c10::convert<BFloat16>(src[i]);
802
+ }
803
+ }
804
+
805
+ template <>
806
+ Vectorized<BFloat16> inline fmadd(const Vectorized<BFloat16>& a,
807
+ const Vectorized<BFloat16>& b, const Vectorized<BFloat16>& c) {
808
+ __m256 a_lo, a_hi;
809
+ __m256 b_lo, b_hi;
810
+ __m256 c_lo, c_hi;
811
+ cvtbf16_fp32(__m256i(a), a_lo, a_hi);
812
+ cvtbf16_fp32(__m256i(b), b_lo, b_hi);
813
+ cvtbf16_fp32(__m256i(c), c_lo, c_hi);
814
+ auto o1 = _mm256_fmadd_ps(a_lo, b_lo, c_lo);
815
+ auto o2 = _mm256_fmadd_ps(a_hi, b_hi, c_hi);
816
+ return cvtfp32_bf16(o1, o2);
817
+ }
818
+
819
+ template <>
820
+ class Vectorized<Half>: public Vectorized16<Half> {
821
+ public:
822
+ using Vectorized16::Vectorized16;
823
+
824
+ Vectorized<Half> frac() const;
825
+
826
+ Vectorized<Half> eq(const Vectorized<Half>& other) const;
827
+ Vectorized<Half> ne(const Vectorized<Half>& other) const;
828
+ Vectorized<Half> gt(const Vectorized<Half>& other) const;
829
+ Vectorized<Half> ge(const Vectorized<Half>& other) const;
830
+ Vectorized<Half> lt(const Vectorized<Half>& other) const;
831
+ Vectorized<Half> le(const Vectorized<Half>& other) const;
832
+ };
833
+
834
+ Vectorized<Half> inline operator+(const Vectorized<Half>& a, const Vectorized<Half>& b) {
835
+ return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_add_ps(x, y); });
836
+ }
837
+ Vectorized<Half> inline operator-(const Vectorized<Half>& a, const Vectorized<Half>& b) {
838
+ return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_sub_ps(x, y); });
839
+ }
840
+ Vectorized<Half> inline operator*(const Vectorized<Half>& a, const Vectorized<Half>& b) {
841
+ return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_mul_ps(x, y); });
842
+ }
843
+ Vectorized<Half> inline operator/(const Vectorized<Half>& a, const Vectorized<Half>& b) {
844
+ return binary_op_as_fp32(a, b, [](const __m256& x, const __m256& y) { return _mm256_div_ps(x, y); });
845
+ }
846
+ Vectorized<Half> inline operator&(const Vectorized<Half>& a, const Vectorized<Half>& b) {
847
+ return _mm256_and_si256(a, b);
848
+ }
849
+ Vectorized<Half> inline operator|(const Vectorized<Half>& a, const Vectorized<Half>& b) {
850
+ return _mm256_or_si256(a, b);
851
+ }
852
+ Vectorized<Half> inline operator^(const Vectorized<Half>& a, const Vectorized<Half>& b) {
853
+ return _mm256_xor_si256(a, b);
854
+ }
855
+
856
+ inline Vectorized<Half> Vectorized<Half>::eq(const Vectorized<Half>& other) const {
857
+ return (*this == other) & Vectorized<Half>(1.0f);
858
+ }
859
+ inline Vectorized<Half> Vectorized<Half>::ne(const Vectorized<Half>& other) const {
860
+ return (*this != other) & Vectorized<Half>(1.0f);
861
+ }
862
+ inline Vectorized<Half> Vectorized<Half>::gt(const Vectorized<Half>& other) const {
863
+ return (*this > other) & Vectorized<Half>(1.0f);
864
+ }
865
+ inline Vectorized<Half> Vectorized<Half>::ge(const Vectorized<Half>& other) const {
866
+ return (*this >= other) & Vectorized<Half>(1.0f);
867
+ }
868
+ inline Vectorized<Half> Vectorized<Half>::lt(const Vectorized<Half>& other) const {
869
+ return (*this < other) & Vectorized<Half>(1.0f);
870
+ }
871
+ inline Vectorized<Half> Vectorized<Half>::le(const Vectorized<Half>& other) const {
872
+ return (*this <= other) & Vectorized<Half>(1.0f);
873
+ }
874
+
875
+ // frac. Implement this here so we can use subtraction
876
+ inline Vectorized<Half> Vectorized<Half>::frac() const {
877
+ return *this - this->trunc();
878
+ }
879
+
880
+ // Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
881
+ // either input is a NaN.
882
+ template <>
883
+ Vectorized<Half> inline maximum(const Vectorized<Half>& a, const Vectorized<Half>& b) {
884
+ __m256 a_lo, a_hi;
885
+ __m256 b_lo, b_hi;
886
+ cvtfp16_fp32(__m256i(a), a_lo, a_hi);
887
+ cvtfp16_fp32(__m256i(b), b_lo, b_hi);
888
+ auto max_lo = _mm256_max_ps(a_lo, b_lo);
889
+ auto max_hi = _mm256_max_ps(a_hi, b_hi);
890
+ auto nan_lo = _mm256_cmp_ps(a_lo, b_lo, _CMP_UNORD_Q);
891
+ auto nan_hi = _mm256_cmp_ps(a_hi, b_hi, _CMP_UNORD_Q);
892
+ // Exploit the fact that all-ones is a NaN.
893
+ auto o1 = _mm256_or_ps(max_lo, nan_lo);
894
+ auto o2 = _mm256_or_ps(max_hi, nan_hi);
895
+ return cvtfp32_fp16(o1, o2);
896
+ }
897
+
898
+ // Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
899
+ // either input is a NaN.
900
+ template <>
901
+ Vectorized<Half> inline minimum(const Vectorized<Half>& a, const Vectorized<Half>& b) {
902
+ __m256 a_lo, a_hi;
903
+ __m256 b_lo, b_hi;
904
+ cvtfp16_fp32(__m256i(a), a_lo, a_hi);
905
+ cvtfp16_fp32(__m256i(b), b_lo, b_hi);
906
+ auto min_lo = _mm256_min_ps(a_lo, b_lo);
907
+ auto min_hi = _mm256_min_ps(a_hi, b_hi);
908
+ auto nan_lo = _mm256_cmp_ps(a_lo, b_lo, _CMP_UNORD_Q);
909
+ auto nan_hi = _mm256_cmp_ps(a_hi, b_hi, _CMP_UNORD_Q);
910
+ // Exploit the fact that all-ones is a NaN.
911
+ auto o1 = _mm256_or_ps(min_lo, nan_lo);
912
+ auto o2 = _mm256_or_ps(min_hi, nan_hi);
913
+ return cvtfp32_fp16(o1, o2);
914
+ }
915
+
916
+ template <>
917
+ Vectorized<Half> inline clamp(const Vectorized<Half>& a,
918
+ const Vectorized<Half>& min, const Vectorized<Half>& max) {
919
+ __m256 a_lo, a_hi;
920
+ __m256 min_lo, min_hi;
921
+ __m256 max_lo, max_hi;
922
+ cvtfp16_fp32(__m256i(a), a_lo, a_hi);
923
+ cvtfp16_fp32(__m256i(min), min_lo, min_hi);
924
+ cvtfp16_fp32(__m256i(max), max_lo, max_hi);
925
+ auto o1 = _mm256_min_ps(max_lo, _mm256_max_ps(min_lo, a_lo));
926
+ auto o2 = _mm256_min_ps(max_hi, _mm256_max_ps(min_hi, a_hi));
927
+ return cvtfp32_fp16(o1, o2);
928
+ }
929
+
930
+ template <>
931
+ Vectorized<Half> inline clamp_max(const Vectorized<Half>& a, const Vectorized<Half>& max) {
932
+ __m256 a_lo, a_hi;
933
+ __m256 max_lo, max_hi;
934
+ cvtfp16_fp32(__m256i(a), a_lo, a_hi);
935
+ cvtfp16_fp32(__m256i(max), max_lo, max_hi);
936
+ auto o1 = _mm256_min_ps(max_lo, a_lo);
937
+ auto o2 = _mm256_min_ps(max_hi, a_hi);
938
+ return cvtfp32_fp16(o1, o2);
939
+ }
940
+
941
+ template <>
942
+ Vectorized<Half> inline clamp_min(const Vectorized<Half>& a, const Vectorized<Half>& min) {
943
+ __m256 a_lo, a_hi;
944
+ __m256 min_lo, min_hi;
945
+ cvtfp16_fp32(__m256i(a), a_lo, a_hi);
946
+ cvtfp16_fp32(__m256i(min), min_lo, min_hi);
947
+ auto o1 = _mm256_max_ps(min_lo, a_lo);
948
+ auto o2 = _mm256_max_ps(min_hi, a_hi);
949
+ return cvtfp32_fp16(o1, o2);
950
+ }
951
+
952
+ template <>
953
+ inline void convert(const Half* src, Half* dst, int64_t n) {
954
+ int64_t i;
955
+ #pragma unroll
956
+ for (i = 0; i <= (n - Vectorized<Half>::size()); i += Vectorized<Half>::size()) {
957
+ auto vsrc = _mm256_loadu_si256(reinterpret_cast<__m256i*>((void*)(src + i)));
958
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>((void*)(dst + i)), vsrc);
959
+ }
960
+ #pragma unroll
961
+ for (; i < n; i++) {
962
+ dst[i] = src[i];
963
+ }
964
+ }
965
+
966
+ template <>
967
+ inline void convert(const float* src, Half* dst, int64_t n) {
968
+ int64_t i;
969
+ for (i = 0; i + Vectorized<Half>::size() <= n; i += Vectorized<Half>::size()) {
970
+ __m256 a = _mm256_loadu_ps(&src[i]);
971
+ __m256 b = _mm256_loadu_ps(&src[i + 8]);
972
+
973
+ __m256i c = cvtfp32_fp16(a, b);
974
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(&dst[i]), c);
975
+ }
976
+ for (; i < n; i++) {
977
+ dst[i] = c10::convert<Half>(src[i]);
978
+ }
979
+ }
980
+
981
+ template <>
982
+ inline void convert(const double* src, Half* dst, int64_t n) {
983
+ auto load_float = [](const double *src) -> __m256 {
984
+ // Load one float vector from an array of doubles
985
+ __m128 a = _mm256_cvtpd_ps(_mm256_loadu_pd(src));
986
+ __m128 b = _mm256_cvtpd_ps(_mm256_loadu_pd(src + 4));
987
+ return _mm256_insertf128_ps(_mm256_castps128_ps256(a), b, 1);
988
+ };
989
+
990
+ int64_t i;
991
+ for (i = 0; i + Vectorized<Half>::size() <= n; i += Vectorized<Half>::size()) {
992
+ __m256 a = load_float(&src[i]);
993
+ __m256 b = load_float(&src[i + 8]);
994
+
995
+ __m256i c = cvtfp32_fp16(a, b);
996
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(&dst[i]), c);
997
+ }
998
+ for (; i < n; i++) {
999
+ dst[i] = c10::convert<Half>(src[i]);
1000
+ }
1001
+ }
1002
+
1003
+ template <>
1004
+ Vectorized<Half> inline fmadd(const Vectorized<Half>& a,
1005
+ const Vectorized<Half>& b, const Vectorized<Half>& c) {
1006
+ __m256 a_lo, a_hi;
1007
+ __m256 b_lo, b_hi;
1008
+ __m256 c_lo, c_hi;
1009
+ cvtfp16_fp32(__m256i(a), a_lo, a_hi);
1010
+ cvtfp16_fp32(__m256i(b), b_lo, b_hi);
1011
+ cvtfp16_fp32(__m256i(c), c_lo, c_hi);
1012
+ auto o1 = _mm256_fmadd_ps(a_lo, b_lo, c_lo);
1013
+ auto o2 = _mm256_fmadd_ps(a_hi, b_hi, c_hi);
1014
+ return cvtfp32_fp16(o1, o2);
1015
+ }
1016
+
1017
+ #define CONVERT_VECTORIZED_INIT(type, name) \
1018
+ inline std::tuple<Vectorized<float>, Vectorized<float>> convert_##name##_float(const Vectorized<type>& a) { \
1019
+ __m256 o1, o2; \
1020
+ cvt_to_fp32<type>(__m256i(a), o1, o2); \
1021
+ return std::make_tuple(o1, o2); \
1022
+ } \
1023
+ inline Vectorized<type> convert_float_##name(const Vectorized<float>& a, const Vectorized<float>& b) { \
1024
+ return cvt_from_fp32<type>(__m256(a), __m256(b)); \
1025
+ }
1026
+ CONVERT_VECTORIZED_INIT(BFloat16, bfloat16);
1027
+ CONVERT_VECTORIZED_INIT(Half, half);
1028
+
1029
+ #else // defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
1030
+
1031
+ #define CONVERT_NON_VECTORIZED_INIT(type, name) \
1032
+ inline std::tuple<Vectorized<float>, Vectorized<float>> convert_##name##_float(const Vectorized<type>& a) { \
1033
+ constexpr int64_t K = Vectorized<type>::size(); \
1034
+ __at_align__ float arr[K]; \
1035
+ __at_align__ type arr2[K]; \
1036
+ a.store(arr2); \
1037
+ convert(arr2, arr, K); \
1038
+ return std::make_tuple( \
1039
+ Vectorized<float>::loadu(arr), \
1040
+ Vectorized<float>::loadu(arr + Vectorized<float>::size())); \
1041
+ } \
1042
+ inline Vectorized<type> convert_float_##name(const Vectorized<float>& a, const Vectorized<float>& b) { \
1043
+ constexpr int64_t K = Vectorized<type>::size(); \
1044
+ __at_align__ float arr[K]; \
1045
+ __at_align__ type arr2[K]; \
1046
+ a.store(arr); \
1047
+ b.store(arr + Vectorized<float>::size()); \
1048
+ convert(arr, arr2, K); \
1049
+ return Vectorized<type>::loadu(arr2); \
1050
+ }
1051
+ CONVERT_NON_VECTORIZED_INIT(BFloat16, bfloat16);
1052
+ CONVERT_NON_VECTORIZED_INIT(Half, half);
1053
+
1054
+ #endif // defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
1055
+
1056
+ #if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
1057
+ #define LOAD_FP32_VECTORIZED_INIT(type, name) \
1058
+ inline void load_fp32_from_##name(const type *data, Vectorized<float>& out) { \
1059
+ auto values = _mm_loadu_si128(reinterpret_cast<const __m128i*>(data)); \
1060
+ __m256 out_values; \
1061
+ cvt_to_fp32<type>(values, out_values); \
1062
+ out = out_values; \
1063
+ } \
1064
+ \
1065
+ inline void load_fp32_from_##name(const type *data, Vectorized<float>& out1, Vectorized<float>& out2) { \
1066
+ auto vec = Vectorized<type>::loadu(data); \
1067
+ __m256 out1_values, out2_values; \
1068
+ cvt_to_fp32<type>(vec, out1_values, out2_values); \
1069
+ out1 = out1_values; \
1070
+ out2 = out2_values; \
1071
+ }
1072
+ LOAD_FP32_VECTORIZED_INIT(BFloat16, bf16);
1073
+ LOAD_FP32_VECTORIZED_INIT(Half, fp16);
1074
+
1075
+ #else // defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
1076
+ #define LOAD_FP32_NON_VECTORIZED_INIT(type, name) \
1077
+ inline void load_fp32_from_##name(const type *data, Vectorized<float>& out) { \
1078
+ __at_align__ float values[Vectorized<float>::size()]; \
1079
+ for (const auto k : c10::irange(Vectorized<float>::size())) { \
1080
+ values[k] = data[k]; \
1081
+ } \
1082
+ out = Vectorized<float>::loadu(values); \
1083
+ } \
1084
+ \
1085
+ inline void load_fp32_from_##name(const type *data, Vectorized<float>& out1, Vectorized<float>& out2) { \
1086
+ load_fp32_from_##name(data, out1); \
1087
+ data += Vectorized<float>::size(); \
1088
+ load_fp32_from_##name(data, out2); \
1089
+ }
1090
+ LOAD_FP32_NON_VECTORIZED_INIT(BFloat16, bf16);
1091
+ LOAD_FP32_NON_VECTORIZED_INIT(Half, fp16);
1092
+
1093
+ #endif
1094
+ }} // namsepace at::vec::CPU_CAPABILITY
1095
+
1096
+ #pragma GCC diagnostic pop
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_complex_double.h ADDED
@@ -0,0 +1,431 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <c10/util/complex.h>
7
+ #include <c10/util/irange.h>
8
+ #include <ATen/cpu/vec/intrinsics.h>
9
+ #include <ATen/cpu/vec/vec_base.h>
10
+
11
+ #if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
12
+ #include <sleef.h>
13
+ #endif
14
+
15
+ namespace at::vec {
16
+ // See Note [CPU_CAPABILITY namespace]
17
+ inline namespace CPU_CAPABILITY {
18
+
19
+ #if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
20
+
21
+ template <> class Vectorized<c10::complex<double>> {
22
+ private:
23
+ __m256d values;
24
+ public:
25
+ using value_type = c10::complex<double>;
26
+ using size_type = int;
27
+ static constexpr size_type size() {
28
+ return 2;
29
+ }
30
+ Vectorized() {}
31
+ Vectorized(__m256d v) : values(v) {}
32
+ Vectorized(c10::complex<double> val) {
33
+ double real_value = val.real();
34
+ double imag_value = val.imag();
35
+ values = _mm256_setr_pd(real_value, imag_value,
36
+ real_value, imag_value);
37
+ }
38
+ Vectorized(c10::complex<double> val1, c10::complex<double> val2) {
39
+ values = _mm256_setr_pd(val1.real(), val1.imag(),
40
+ val2.real(), val2.imag());
41
+ }
42
+ operator __m256d() const {
43
+ return values;
44
+ }
45
+ template <int64_t mask>
46
+ static Vectorized<c10::complex<double>> blend(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b) {
47
+ // convert c10::complex<V> index mask to V index mask: xy -> xxyy
48
+ static_assert (mask > -1 && mask < 4, "Unexpected mask value");
49
+ switch (mask) {
50
+ case 0:
51
+ return a;
52
+ case 1:
53
+ return _mm256_blend_pd(a.values, b.values, 0x03);
54
+ case 2:
55
+ return _mm256_blend_pd(a.values, b.values, 0x0c);
56
+ case 3: break;
57
+ }
58
+ return b;
59
+ }
60
+ static Vectorized<c10::complex<double>> blendv(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b,
61
+ const Vectorized<c10::complex<double>>& mask) {
62
+ // convert c10::complex<V> index mask to V index mask: xy -> xxyy
63
+ auto mask_ = _mm256_unpacklo_pd(mask.values, mask.values);
64
+ return _mm256_blendv_pd(a.values, b.values, mask_);
65
+
66
+ }
67
+ template<typename step_t>
68
+ static Vectorized<c10::complex<double>> arange(c10::complex<double> base = 0., step_t step = static_cast<step_t>(1)) {
69
+ return Vectorized<c10::complex<double>>(base,
70
+ base + step);
71
+ }
72
+ static Vectorized<c10::complex<double>> set(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b,
73
+ int64_t count = size()) {
74
+ switch (count) {
75
+ case 0:
76
+ return a;
77
+ case 1:
78
+ return blend<1>(a, b);
79
+ }
80
+ return b;
81
+ }
82
+ static Vectorized<c10::complex<double>> loadu(const void* ptr, int64_t count = size()) {
83
+ if (count == size())
84
+ return _mm256_loadu_pd(reinterpret_cast<const double*>(ptr));
85
+
86
+ __at_align__ double tmp_values[2*size()];
87
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
88
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
89
+ // instructions while a loop would be compiled to one instruction.
90
+ for (const auto i : c10::irange(2*size())) {
91
+ tmp_values[i] = 0.0;
92
+ }
93
+ std::memcpy(
94
+ tmp_values,
95
+ reinterpret_cast<const double*>(ptr),
96
+ count * sizeof(c10::complex<double>));
97
+ return _mm256_load_pd(tmp_values);
98
+ }
99
+ void store(void* ptr, int count = size()) const {
100
+ if (count == size()) {
101
+ _mm256_storeu_pd(reinterpret_cast<double*>(ptr), values);
102
+ } else if (count > 0) {
103
+ double tmp_values[2*size()];
104
+ _mm256_storeu_pd(reinterpret_cast<double*>(tmp_values), values);
105
+ std::memcpy(ptr, tmp_values, count * sizeof(c10::complex<double>));
106
+ }
107
+ }
108
+ const c10::complex<double>& operator[](int idx) const = delete;
109
+ c10::complex<double>& operator[](int idx) = delete;
110
+ Vectorized<c10::complex<double>> map(c10::complex<double> (*const f)(const c10::complex<double> &)) const {
111
+ __at_align__ c10::complex<double> tmp[size()];
112
+ store(tmp);
113
+ for (const auto i : c10::irange(size())) {
114
+ tmp[i] = f(tmp[i]);
115
+ }
116
+ return loadu(tmp);
117
+ }
118
+ __m256d abs_2_() const {
119
+ auto val_2 = _mm256_mul_pd(values, values); // a*a b*b
120
+ return _mm256_hadd_pd(val_2, val_2); // a*a+b*b a*a+b*b
121
+ }
122
+ __m256d abs_() const {
123
+ auto real = _mm256_movedup_pd(values); // real real
124
+ // movehdup_pd does not exist...
125
+ auto imag = _mm256_permute_pd(values, 0xf); // imag imag
126
+ return Sleef_hypotd4_u05(real, imag); // abs abs
127
+ }
128
+ Vectorized<c10::complex<double>> abs() const {
129
+ const __m256d real_mask = _mm256_castsi256_pd(_mm256_setr_epi64x(0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
130
+ 0xFFFFFFFFFFFFFFFF, 0x0000000000000000));
131
+ return _mm256_and_pd(abs_(), real_mask); // abs 0
132
+ }
133
+ __m256d angle_() const {
134
+ //angle = atan2(b/a)
135
+ auto b_a = _mm256_permute_pd(values, 0x05); // b a
136
+ return Sleef_atan2d4_u10(values, b_a); // 90-angle angle
137
+ }
138
+ Vectorized<c10::complex<double>> angle() const {
139
+ const __m256d real_mask = _mm256_castsi256_pd(_mm256_setr_epi64x(0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
140
+ 0xFFFFFFFFFFFFFFFF, 0x0000000000000000));
141
+ auto angle = _mm256_permute_pd(angle_(), 0x05); // angle 90-angle
142
+ return _mm256_and_pd(angle, real_mask); // angle 0
143
+ }
144
+ Vectorized<c10::complex<double>> sgn() const {
145
+ auto abs = abs_();
146
+ auto zero = _mm256_setzero_pd();
147
+ auto mask = _mm256_cmp_pd(abs, zero, _CMP_EQ_OQ);
148
+ auto div = values / abs;
149
+ return _mm256_blendv_pd(div, zero, mask);
150
+ }
151
+ __m256d real_() const {
152
+ const __m256d real_mask = _mm256_castsi256_pd(_mm256_setr_epi64x(0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
153
+ 0xFFFFFFFFFFFFFFFF, 0x0000000000000000));
154
+ return _mm256_and_pd(values, real_mask);
155
+ }
156
+ Vectorized<c10::complex<double>> real() const {
157
+ return real_();
158
+ }
159
+ __m256d imag_() const {
160
+ const __m256d imag_mask = _mm256_castsi256_pd(_mm256_setr_epi64x(0x0000000000000000, 0xFFFFFFFFFFFFFFFF,
161
+ 0x0000000000000000, 0xFFFFFFFFFFFFFFFF));
162
+ return _mm256_and_pd(values, imag_mask);
163
+ }
164
+ Vectorized<c10::complex<double>> imag() const {
165
+ return _mm256_permute_pd(imag_(), 0x05); //b a
166
+ }
167
+ __m256d conj_() const {
168
+ const __m256d sign_mask = _mm256_setr_pd(0.0, -0.0, 0.0, -0.0);
169
+ return _mm256_xor_pd(values, sign_mask); // a -b
170
+ }
171
+ Vectorized<c10::complex<double>> conj() const {
172
+ return conj_();
173
+ }
174
+ Vectorized<c10::complex<double>> log() const {
175
+ // Most trigonomic ops use the log() op to improve complex number performance.
176
+ return map(std::log);
177
+ }
178
+ Vectorized<c10::complex<double>> log2() const {
179
+ const __m256d log2_ = _mm256_set1_pd(std::log(2));
180
+ return _mm256_div_pd(log(), log2_);
181
+ }
182
+ Vectorized<c10::complex<double>> log10() const {
183
+ const __m256d log10_ = _mm256_set1_pd(std::log(10));
184
+ return _mm256_div_pd(log(), log10_);
185
+ }
186
+ Vectorized<c10::complex<double>> log1p() const {
187
+ return map(std::log1p);
188
+ }
189
+ Vectorized<c10::complex<double>> asin() const {
190
+ // asin(x)
191
+ // = -i*ln(iz + sqrt(1 -z^2))
192
+ // = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi)))
193
+ // = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi))
194
+ const __m256d one = _mm256_set1_pd(1);
195
+
196
+ auto conj = conj_();
197
+ auto b_a = _mm256_permute_pd(conj, 0x05); //-b a
198
+ auto ab = _mm256_mul_pd(conj, b_a); //-ab -ab
199
+ auto im = _mm256_add_pd(ab, ab); //-2ab -2ab
200
+
201
+ auto val_2 = _mm256_mul_pd(values, values); // a*a b*b
202
+ auto re = _mm256_hsub_pd(val_2, _mm256_permute_pd(val_2, 0x05)); // a*a-b*b b*b-a*a
203
+ re = _mm256_sub_pd(one, re);
204
+
205
+ auto root = Vectorized(_mm256_blend_pd(re, im, 0x0A)).sqrt(); //sqrt(re + i*im)
206
+ auto ln = Vectorized(_mm256_add_pd(b_a, root)).log(); //ln(iz + sqrt())
207
+ return Vectorized(_mm256_permute_pd(ln.values, 0x05)).conj(); //-i*ln()
208
+ }
209
+ Vectorized<c10::complex<double>> acos() const {
210
+ // acos(x) = pi/2 - asin(x)
211
+ constexpr auto pi_2d = c10::pi<double> / 2;
212
+ const __m256d pi_2 = _mm256_setr_pd(pi_2d, 0.0, pi_2d, 0.0);
213
+ return _mm256_sub_pd(pi_2, asin());
214
+ }
215
+ Vectorized<c10::complex<double>> atan() const;
216
+ Vectorized<c10::complex<double>> atanh() const {
217
+ return map(std::atanh);
218
+ }
219
+ Vectorized<c10::complex<double>> exp() const {
220
+ //exp(a + bi)
221
+ // = exp(a)*(cos(b) + sin(b)i)
222
+ auto exp = Sleef_expd4_u10(values); //exp(a) exp(b)
223
+ exp = _mm256_blend_pd(exp, _mm256_permute_pd(exp, 0x05), 0x0A); //exp(a) exp(a)
224
+
225
+ auto sin_cos = Sleef_sincosd4_u10(values); //[sin(a), cos(a)] [sin(b), cos(b)]
226
+ auto cos_sin = _mm256_blend_pd(_mm256_permute_pd(sin_cos.y, 0x05),
227
+ sin_cos.x, 0x0A); //cos(b) sin(b)
228
+ return _mm256_mul_pd(exp, cos_sin);
229
+ }
230
+ Vectorized<c10::complex<double>> exp2() const {
231
+ // Use identity 2**x = exp(log(2) * x)
232
+ const __m256d ln_2 = _mm256_set1_pd(c10::ln_2<double>);
233
+ Vectorized<c10::complex<double>> scaled_values = _mm256_mul_pd(values, ln_2);
234
+ return scaled_values.exp();
235
+ }
236
+ Vectorized<c10::complex<double>> expm1() const {
237
+ return map(std::expm1);
238
+ }
239
+ Vectorized<c10::complex<double>> sin() const {
240
+ return map(std::sin);
241
+ }
242
+ Vectorized<c10::complex<double>> sinh() const {
243
+ return map(std::sinh);
244
+ }
245
+ Vectorized<c10::complex<double>> cos() const {
246
+ return map(std::cos);
247
+ }
248
+ Vectorized<c10::complex<double>> cosh() const {
249
+ return map(std::cosh);
250
+ }
251
+ Vectorized<c10::complex<double>> ceil() const {
252
+ return _mm256_ceil_pd(values);
253
+ }
254
+ Vectorized<c10::complex<double>> floor() const {
255
+ return _mm256_floor_pd(values);
256
+ }
257
+ Vectorized<c10::complex<double>> neg() const {
258
+ auto zero = _mm256_setzero_pd();
259
+ return _mm256_sub_pd(zero, values);
260
+ }
261
+ Vectorized<c10::complex<double>> round() const {
262
+ return _mm256_round_pd(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
263
+ }
264
+ Vectorized<c10::complex<double>> tan() const {
265
+ return map(std::tan);
266
+ }
267
+ Vectorized<c10::complex<double>> tanh() const {
268
+ return map(std::tanh);
269
+ }
270
+ Vectorized<c10::complex<double>> trunc() const {
271
+ return _mm256_round_pd(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
272
+ }
273
+ Vectorized<c10::complex<double>> sqrt() const {
274
+ return map(std::sqrt);
275
+ }
276
+ Vectorized<c10::complex<double>> reciprocal() const;
277
+ Vectorized<c10::complex<double>> rsqrt() const {
278
+ return sqrt().reciprocal();
279
+ }
280
+ Vectorized<c10::complex<double>> pow(const Vectorized<c10::complex<double>> &exp) const {
281
+ __at_align__ c10::complex<double> x_tmp[size()];
282
+ __at_align__ c10::complex<double> y_tmp[size()];
283
+ store(x_tmp);
284
+ exp.store(y_tmp);
285
+ for (const auto i : c10::irange(size())) {
286
+ x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]);
287
+ }
288
+ return loadu(x_tmp);
289
+ }
290
+ // Comparison using the _CMP_**_OQ predicate.
291
+ // `O`: get false if an operand is NaN
292
+ // `Q`: do not raise if an operand is NaN
293
+ Vectorized<c10::complex<double>> operator==(const Vectorized<c10::complex<double>>& other) const {
294
+ return _mm256_cmp_pd(values, other.values, _CMP_EQ_OQ);
295
+ }
296
+ Vectorized<c10::complex<double>> operator!=(const Vectorized<c10::complex<double>>& other) const {
297
+ return _mm256_cmp_pd(values, other.values, _CMP_NEQ_UQ);
298
+ }
299
+ Vectorized<c10::complex<double>> operator<(const Vectorized<c10::complex<double>>&) const {
300
+ TORCH_CHECK(false, "not supported for complex numbers");
301
+ }
302
+ Vectorized<c10::complex<double>> operator<=(const Vectorized<c10::complex<double>>&) const {
303
+ TORCH_CHECK(false, "not supported for complex numbers");
304
+ }
305
+ Vectorized<c10::complex<double>> operator>(const Vectorized<c10::complex<double>>&) const {
306
+ TORCH_CHECK(false, "not supported for complex numbers");
307
+ }
308
+ Vectorized<c10::complex<double>> operator>=(const Vectorized<c10::complex<double>>&) const {
309
+ TORCH_CHECK(false, "not supported for complex numbers");
310
+ }
311
+
312
+ Vectorized<c10::complex<double>> eq(const Vectorized<c10::complex<double>>& other) const;
313
+ Vectorized<c10::complex<double>> ne(const Vectorized<c10::complex<double>>& other) const;
314
+ };
315
+
316
+ template <> Vectorized<c10::complex<double>> inline operator+(const Vectorized<c10::complex<double>> &a, const Vectorized<c10::complex<double>> &b) {
317
+ return _mm256_add_pd(a, b);
318
+ }
319
+
320
+ template <> Vectorized<c10::complex<double>> inline operator-(const Vectorized<c10::complex<double>> &a, const Vectorized<c10::complex<double>> &b) {
321
+ return _mm256_sub_pd(a, b);
322
+ }
323
+
324
+ template <> Vectorized<c10::complex<double>> inline operator*(const Vectorized<c10::complex<double>> &a, const Vectorized<c10::complex<double>> &b) {
325
+ //(a + bi) * (c + di) = (ac - bd) + (ad + bc)i
326
+ const __m256d sign_mask = _mm256_setr_pd(0.0, -0.0, 0.0, -0.0);
327
+ auto ac_bd = _mm256_mul_pd(a, b); //ac bd
328
+
329
+ auto d_c = _mm256_permute_pd(b, 0x05); //d c
330
+ d_c = _mm256_xor_pd(sign_mask, d_c); //d -c
331
+ auto ad_bc = _mm256_mul_pd(a, d_c); //ad -bc
332
+
333
+ auto ret = _mm256_hsub_pd(ac_bd, ad_bc); //ac - bd ad + bc
334
+ return ret;
335
+ }
336
+
337
+ template <> Vectorized<c10::complex<double>> inline operator/(const Vectorized<c10::complex<double>> &a, const Vectorized<c10::complex<double>> &b) {
338
+ //re + im*i = (a + bi) / (c + di)
339
+ auto mask = _mm256_set1_pd(-0.f);
340
+ auto fabs_cd = _mm256_andnot_pd(mask, b); // |c| |d|
341
+ auto fabs_dc = _mm256_permute_pd(fabs_cd, 0x05); // |d| |c|
342
+ auto scale = _mm256_div_pd(_mm256_set1_pd(1.0f), _mm256_max_pd(fabs_cd, fabs_dc)); // 1/sc 1/sc
343
+ auto a2 = _mm256_mul_pd(a, scale); // a/sc b/sc
344
+ auto b2 = _mm256_mul_pd(b, scale); // c/sc d/sc
345
+ auto acbd2 = _mm256_mul_pd(a2, b2);
346
+
347
+ const __m256d sign_mask = _mm256_setr_pd(-0.0, 0.0, -0.0, 0.0);
348
+ auto dc2 = _mm256_permute_pd(b2, 0x05); // d/sc c/sc
349
+ dc2 = _mm256_xor_pd(sign_mask, dc2); // -d/|c,d| c/sc
350
+ auto adbc2 = _mm256_mul_pd(a2, dc2); //-ad/sc^2 bc/sc^2
351
+ auto res2 = _mm256_hadd_pd(acbd2, adbc2); //(ac+bd)/sc^2 (bc-ad)/sc^2
352
+
353
+ // get the denominator
354
+ auto denom2 = Vectorized<c10::complex<double>>(b2).abs_2_(); // (c^2+d^2)/sc^2 (c^2+d^2)/sc^2
355
+ res2 = _mm256_div_pd(res2, denom2);
356
+ return res2;
357
+ }
358
+
359
+ // reciprocal. Implement this here so we can use multiplication.
360
+ inline Vectorized<c10::complex<double>> Vectorized<c10::complex<double>>::reciprocal() const{
361
+ //re + im*i = (a + bi) / (c + di)
362
+ //re = (ac + bd)/abs_2() = c/abs_2()
363
+ //im = (bc - ad)/abs_2() = d/abs_2()
364
+ const __m256d sign_mask = _mm256_setr_pd(0.0, -0.0, 0.0, -0.0);
365
+ auto c_d = _mm256_xor_pd(sign_mask, values); //c -d
366
+ return _mm256_div_pd(c_d, abs_2_());
367
+ }
368
+
369
+ inline Vectorized<c10::complex<double>> Vectorized<c10::complex<double>>::atan() const {
370
+ // atan(x) = i/2 * ln((i + z)/(i - z))
371
+ const __m256d i = _mm256_setr_pd(0.0, 1.0, 0.0, 1.0);
372
+ const Vectorized i_half = _mm256_setr_pd(0.0, 0.5, 0.0, 0.5);
373
+
374
+ auto sum = Vectorized(_mm256_add_pd(i, values)); // a 1+b
375
+ auto sub = Vectorized(_mm256_sub_pd(i, values)); // -a 1-b
376
+ auto ln = (sum/sub).log(); // ln((i + z)/(i - z))
377
+ return i_half*ln; // i/2*ln()
378
+ }
379
+
380
+ template <>
381
+ Vectorized<c10::complex<double>> inline maximum(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b) {
382
+ auto abs_a = a.abs_2_();
383
+ auto abs_b = b.abs_2_();
384
+ auto mask = _mm256_cmp_pd(abs_a, abs_b, _CMP_LT_OQ);
385
+ auto max = _mm256_blendv_pd(a, b, mask);
386
+ // Exploit the fact that all-ones is a NaN.
387
+ auto isnan = _mm256_cmp_pd(abs_a, abs_b, _CMP_UNORD_Q);
388
+ return _mm256_or_pd(max, isnan);
389
+ }
390
+
391
+ template <>
392
+ Vectorized<c10::complex<double>> inline minimum(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b) {
393
+ auto abs_a = a.abs_2_();
394
+ auto abs_b = b.abs_2_();
395
+ auto mask = _mm256_cmp_pd(abs_a, abs_b, _CMP_GT_OQ);
396
+ auto min = _mm256_blendv_pd(a, b, mask);
397
+ // Exploit the fact that all-ones is a NaN.
398
+ auto isnan = _mm256_cmp_pd(abs_a, abs_b, _CMP_UNORD_Q);
399
+ return _mm256_or_pd(min, isnan);
400
+ }
401
+
402
+ template <>
403
+ Vectorized<c10::complex<double>> inline operator&(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b) {
404
+ return _mm256_and_pd(a, b);
405
+ }
406
+
407
+ template <>
408
+ Vectorized<c10::complex<double>> inline operator|(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b) {
409
+ return _mm256_or_pd(a, b);
410
+ }
411
+
412
+ template <>
413
+ Vectorized<c10::complex<double>> inline operator^(const Vectorized<c10::complex<double>>& a, const Vectorized<c10::complex<double>>& b) {
414
+ return _mm256_xor_pd(a, b);
415
+ }
416
+
417
+ inline Vectorized<c10::complex<double>> Vectorized<c10::complex<double>>::eq(const Vectorized<c10::complex<double>>& other) const {
418
+ auto eq = (*this == other); // compares real and imag individually
419
+ // If both real numbers and imag numbers are equal, then the complex numbers are equal
420
+ return (eq.real() & eq.imag()) & Vectorized<c10::complex<double>>(_mm256_set1_pd(1.0));
421
+ }
422
+
423
+ inline Vectorized<c10::complex<double>> Vectorized<c10::complex<double>>::ne(const Vectorized<c10::complex<double>>& other) const {
424
+ auto ne = (*this != other); // compares real and imag individually
425
+ // If either real numbers or imag numbers are not equal, then the complex numbers are not equal
426
+ return (ne.real() | ne.imag()) & Vectorized<c10::complex<double>>(_mm256_set1_pd(1.0));
427
+ }
428
+
429
+ #endif
430
+
431
+ }} // namespace at::vec::CPU_CAPABILITY
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_complex_float.h ADDED
@@ -0,0 +1,468 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <c10/util/complex.h>
7
+ #include <c10/util/irange.h>
8
+ #include <ATen/cpu/vec/intrinsics.h>
9
+ #include <ATen/cpu/vec/vec_base.h>
10
+ #if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
11
+ #include <sleef.h>
12
+ #endif
13
+
14
+ namespace at::vec {
15
+ // See Note [CPU_CAPABILITY namespace]
16
+ inline namespace CPU_CAPABILITY {
17
+
18
+ #if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
19
+
20
+ template <> class Vectorized<c10::complex<float>> {
21
+ private:
22
+ __m256 values;
23
+ public:
24
+ using value_type = c10::complex<float>;
25
+ using size_type = int;
26
+ static constexpr size_type size() {
27
+ return 4;
28
+ }
29
+ Vectorized() {}
30
+ Vectorized(__m256 v) : values(v) {}
31
+ Vectorized(c10::complex<float> val) {
32
+ float real_value = val.real();
33
+ float imag_value = val.imag();
34
+ values = _mm256_setr_ps(real_value, imag_value,
35
+ real_value, imag_value,
36
+ real_value, imag_value,
37
+ real_value, imag_value
38
+ );
39
+ }
40
+ Vectorized(c10::complex<float> val1, c10::complex<float> val2, c10::complex<float> val3, c10::complex<float> val4) {
41
+ values = _mm256_setr_ps(val1.real(), val1.imag(),
42
+ val2.real(), val2.imag(),
43
+ val3.real(), val3.imag(),
44
+ val4.real(), val4.imag()
45
+ );
46
+ }
47
+ operator __m256() const {
48
+ return values;
49
+ }
50
+ template <int64_t mask>
51
+ static Vectorized<c10::complex<float>> blend(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b) {
52
+ // convert c10::complex<V> index mask to V index mask: xy -> xxyy
53
+ static_assert(mask > -1 && mask < 16, "Unexpected mask range");
54
+ switch (mask) {
55
+ case 0:
56
+ return a;
57
+ case 1:
58
+ return _mm256_blend_ps(a.values, b.values, 0x03); //b0000 0001 = b0000 0011
59
+ case 2:
60
+ return _mm256_blend_ps(a.values, b.values, 0x0C); //b0000 0010 = b0000 1100
61
+ case 3:
62
+ return _mm256_blend_ps(a.values, b.values, 0x0F); //b0000 0011 = b0000 1111
63
+ case 4:
64
+ return _mm256_blend_ps(a.values, b.values, 0x30); //b0000 0100 = b0011 0000
65
+ case 5:
66
+ return _mm256_blend_ps(a.values, b.values, 0x33); //b0000 0101 = b0011 0011
67
+ case 6:
68
+ return _mm256_blend_ps(a.values, b.values, 0x3C); //b0000 0110 = b0011 1100
69
+ case 7:
70
+ return _mm256_blend_ps(a.values, b.values, 0x3F); //b0000 0111 = b0011 1111
71
+ case 8:
72
+ return _mm256_blend_ps(a.values, b.values, 0xC0); //b0000 1000 = b1100 0000
73
+ case 9:
74
+ return _mm256_blend_ps(a.values, b.values, 0xC3); //b0000 1001 = b1100 0011
75
+ case 10:
76
+ return _mm256_blend_ps(a.values, b.values, 0xCC); //b0000 1010 = b1100 1100
77
+ case 11:
78
+ return _mm256_blend_ps(a.values, b.values, 0xCF); //b0000 1011 = b1100 1111
79
+ case 12:
80
+ return _mm256_blend_ps(a.values, b.values, 0xF0); //b0000 1100 = b1111 0000
81
+ case 13:
82
+ return _mm256_blend_ps(a.values, b.values, 0xF3); //b0000 1101 = b1111 0011
83
+ case 14:
84
+ return _mm256_blend_ps(a.values, b.values, 0xFC); //b0000 1110 = b1111 1100
85
+ default: break;
86
+ }
87
+ return b;
88
+ }
89
+ static Vectorized<c10::complex<float>> blendv(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b,
90
+ const Vectorized<c10::complex<float>>& mask) {
91
+ // convert c10::complex<V> index mask to V index mask: xy -> xxyy
92
+ auto mask_ = _mm256_unpacklo_ps(mask.values, mask.values);
93
+ return _mm256_blendv_ps(a.values, b.values, mask_);
94
+
95
+ }
96
+ template<typename step_t>
97
+ static Vectorized<c10::complex<float>> arange(c10::complex<float> base = 0., step_t step = static_cast<step_t>(1)) {
98
+ return Vectorized<c10::complex<float>>(base,
99
+ base + step,
100
+ base + c10::complex<float>(2)*step,
101
+ base + c10::complex<float>(3)*step);
102
+ }
103
+ static Vectorized<c10::complex<float>> set(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b,
104
+ int64_t count = size()) {
105
+ switch (count) {
106
+ case 0:
107
+ return a;
108
+ case 1:
109
+ return blend<1>(a, b);
110
+ case 2:
111
+ return blend<3>(a, b);
112
+ case 3:
113
+ return blend<7>(a, b);
114
+ }
115
+ return b;
116
+ }
117
+ static Vectorized<c10::complex<float>> loadu(const void* ptr, int64_t count = size()) {
118
+ if (count == size())
119
+ return _mm256_loadu_ps(reinterpret_cast<const float*>(ptr));
120
+
121
+ __at_align__ float tmp_values[2*size()];
122
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
123
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
124
+ // instructions while a loop would be compiled to one instruction.
125
+ for (const auto i : c10::irange(2*size())) {
126
+ tmp_values[i] = 0.0;
127
+ }
128
+ std::memcpy(
129
+ tmp_values,
130
+ reinterpret_cast<const float*>(ptr),
131
+ count * sizeof(c10::complex<float>));
132
+ return _mm256_load_ps(tmp_values);
133
+ }
134
+ void store(void* ptr, int count = size()) const {
135
+ if (count == size()) {
136
+ _mm256_storeu_ps(reinterpret_cast<float*>(ptr), values);
137
+ } else if (count > 0) {
138
+ float tmp_values[2*size()];
139
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp_values), values);
140
+ std::memcpy(ptr, tmp_values, count * sizeof(c10::complex<float>));
141
+ }
142
+ }
143
+ const c10::complex<float>& operator[](int idx) const = delete;
144
+ c10::complex<float>& operator[](int idx) = delete;
145
+ Vectorized<c10::complex<float>> map(c10::complex<float> (*const f)(const c10::complex<float> &)) const {
146
+ __at_align__ c10::complex<float> tmp[size()];
147
+ store(tmp);
148
+ for (const auto i : c10::irange(size())) {
149
+ tmp[i] = f(tmp[i]);
150
+ }
151
+ return loadu(tmp);
152
+ }
153
+ __m256 abs_2_() const {
154
+ auto val_2 = _mm256_mul_ps(values, values); // a*a b*b
155
+ auto ret = _mm256_hadd_ps(val_2, val_2); // a*a+b*b a*a+b*b
156
+ return _mm256_permute_ps(ret, 0xD8);
157
+ }
158
+ __m256 abs_() const {
159
+ auto real = _mm256_moveldup_ps(values); // real real
160
+ auto imag = _mm256_movehdup_ps(values); // imag imag
161
+ return Sleef_hypotf8_u05(real, imag); // abs abs
162
+ }
163
+ Vectorized<c10::complex<float>> abs() const {
164
+ const __m256 real_mask = _mm256_castsi256_ps(_mm256_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
165
+ 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000));
166
+ return _mm256_and_ps(abs_(), real_mask); // abs 0
167
+ }
168
+ __m256 angle_() const {
169
+ //angle = atan2(b/a)
170
+ auto b_a = _mm256_permute_ps(values, 0xB1); // b a
171
+ return Sleef_atan2f8_u10(values, b_a); // 90-angle angle
172
+ }
173
+ Vectorized<c10::complex<float>> angle() const {
174
+ const __m256 real_mask = _mm256_castsi256_ps(_mm256_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
175
+ 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000));
176
+ auto angle = _mm256_permute_ps(angle_(), 0xB1); // angle 90-angle
177
+ return _mm256_and_ps(angle, real_mask); // angle 0
178
+ }
179
+ Vectorized<c10::complex<float>> sgn() const {
180
+ auto abs = abs_();
181
+ auto zero = _mm256_setzero_ps();
182
+ auto mask = _mm256_cmp_ps(abs, zero, _CMP_EQ_OQ);
183
+ auto div = values / abs;
184
+ return _mm256_blendv_ps(div, zero, mask);
185
+ }
186
+ __m256 real_() const {
187
+ const __m256 real_mask = _mm256_castsi256_ps(_mm256_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
188
+ 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000));
189
+ return _mm256_and_ps(values, real_mask);
190
+ }
191
+ Vectorized<c10::complex<float>> real() const {
192
+ return real_();
193
+ }
194
+ __m256 imag_() const {
195
+ const __m256 imag_mask = _mm256_castsi256_ps(_mm256_setr_epi32(0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF,
196
+ 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF));
197
+ return _mm256_and_ps(values, imag_mask);
198
+ }
199
+ Vectorized<c10::complex<float>> imag() const {
200
+ return _mm256_permute_ps(imag_(), 0xB1); //b a
201
+ }
202
+ __m256 conj_() const {
203
+ const __m256 sign_mask = _mm256_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0);
204
+ return _mm256_xor_ps(values, sign_mask); // a -b
205
+ }
206
+ Vectorized<c10::complex<float>> conj() const {
207
+ return conj_();
208
+ }
209
+ Vectorized<c10::complex<float>> log() const {
210
+ // Most trigonomic ops use the log() op to improve complex number performance.
211
+ return map(std::log);
212
+ }
213
+ Vectorized<c10::complex<float>> log2() const {
214
+ const __m256 log2_ = _mm256_set1_ps(std::log(2));
215
+ return _mm256_div_ps(log(), log2_);
216
+ }
217
+ Vectorized<c10::complex<float>> log10() const {
218
+ const __m256 log10_ = _mm256_set1_ps(std::log(10));
219
+ return _mm256_div_ps(log(), log10_);
220
+ }
221
+ Vectorized<c10::complex<float>> log1p() const {
222
+ return map(std::log1p);
223
+ }
224
+ Vectorized<c10::complex<float>> asin() const {
225
+ // asin(x)
226
+ // = -i*ln(iz + sqrt(1 -z^2))
227
+ // = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi)))
228
+ // = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi))
229
+ const __m256 one = _mm256_set1_ps(1);
230
+
231
+ auto conj = conj_();
232
+ auto b_a = _mm256_permute_ps(conj, 0xB1); //-b a
233
+ auto ab = _mm256_mul_ps(conj, b_a); //-ab -ab
234
+ auto im = _mm256_add_ps(ab, ab); //-2ab -2ab
235
+
236
+ auto val_2 = _mm256_mul_ps(values, values); // a*a b*b
237
+ auto re = _mm256_hsub_ps(val_2, _mm256_permute_ps(val_2, 0xB1)); // a*a-b*b b*b-a*a
238
+ re = _mm256_permute_ps(re, 0xD8);
239
+ re = _mm256_sub_ps(one, re);
240
+
241
+ auto root = Vectorized(_mm256_blend_ps(re, im, 0xAA)).sqrt(); //sqrt(re + i*im)
242
+ auto ln = Vectorized(_mm256_add_ps(b_a, root)).log(); //ln(iz + sqrt())
243
+ return Vectorized(_mm256_permute_ps(ln.values, 0xB1)).conj(); //-i*ln()
244
+ }
245
+ Vectorized<c10::complex<float>> acos() const {
246
+ return map(std::acos);
247
+ }
248
+ Vectorized<c10::complex<float>> atan() const;
249
+ Vectorized<c10::complex<float>> atanh() const {
250
+ return map(std::atanh);
251
+ }
252
+ Vectorized<c10::complex<float>> exp() const {
253
+ //exp(a + bi)
254
+ // = exp(a)*(cos(b) + sin(b)i)
255
+ auto exp = Sleef_expf8_u10(values); //exp(a) exp(b)
256
+ exp = _mm256_blend_ps(exp, _mm256_permute_ps(exp, 0xB1), 0xAA); //exp(a) exp(a)
257
+
258
+ auto sin_cos = Sleef_sincosf8_u10(values); //[sin(a), cos(a)] [sin(b), cos(b)]
259
+ auto cos_sin = _mm256_blend_ps(_mm256_permute_ps(sin_cos.y, 0xB1),
260
+ sin_cos.x, 0xAA); //cos(b) sin(b)
261
+ return _mm256_mul_ps(exp, cos_sin);
262
+ }
263
+ Vectorized<c10::complex<float>> exp2() const {
264
+ // Use identity 2**x = exp(log(2) * x)
265
+ const __m256 ln_2 = _mm256_set1_ps(c10::ln_2<float>);
266
+ Vectorized<c10::complex<float>> scaled_values = _mm256_mul_ps(values, ln_2);
267
+ return scaled_values.exp();
268
+ }
269
+ Vectorized<c10::complex<float>> expm1() const {
270
+ return map(std::expm1);
271
+ }
272
+ Vectorized<c10::complex<float>> sin() const {
273
+ return map(std::sin);
274
+ }
275
+ Vectorized<c10::complex<float>> sinh() const {
276
+ return map(std::sinh);
277
+ }
278
+ Vectorized<c10::complex<float>> cos() const {
279
+ return map(std::cos);
280
+ }
281
+ Vectorized<c10::complex<float>> cosh() const {
282
+ return map(std::cosh);
283
+ }
284
+ Vectorized<c10::complex<float>> ceil() const {
285
+ return _mm256_ceil_ps(values);
286
+ }
287
+ Vectorized<c10::complex<float>> floor() const {
288
+ return _mm256_floor_ps(values);
289
+ }
290
+ Vectorized<c10::complex<float>> neg() const {
291
+ auto zero = _mm256_setzero_ps();
292
+ return _mm256_sub_ps(zero, values);
293
+ }
294
+ Vectorized<c10::complex<float>> round() const {
295
+ return _mm256_round_ps(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
296
+ }
297
+ Vectorized<c10::complex<float>> tan() const {
298
+ return map(std::tan);
299
+ }
300
+ Vectorized<c10::complex<float>> tanh() const {
301
+ return map(std::tanh);
302
+ }
303
+ Vectorized<c10::complex<float>> trunc() const {
304
+ return _mm256_round_ps(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
305
+ }
306
+ Vectorized<c10::complex<float>> sqrt() const {
307
+ return map(std::sqrt);
308
+ }
309
+ Vectorized<c10::complex<float>> reciprocal() const;
310
+ Vectorized<c10::complex<float>> rsqrt() const {
311
+ return sqrt().reciprocal();
312
+ }
313
+ Vectorized<c10::complex<float>> pow(const Vectorized<c10::complex<float>> &exp) const {
314
+ __at_align__ c10::complex<float> x_tmp[size()];
315
+ __at_align__ c10::complex<float> y_tmp[size()];
316
+ store(x_tmp);
317
+ exp.store(y_tmp);
318
+ for (const auto i : c10::irange(size())) {
319
+ x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]);
320
+ }
321
+ return loadu(x_tmp);
322
+ }
323
+ // Comparison using the _CMP_**_OQ predicate.
324
+ // `O`: get false if an operand is NaN
325
+ // `Q`: do not raise if an operand is NaN
326
+ Vectorized<c10::complex<float>> operator==(const Vectorized<c10::complex<float>>& other) const {
327
+ return _mm256_cmp_ps(values, other.values, _CMP_EQ_OQ);
328
+ }
329
+ Vectorized<c10::complex<float>> operator!=(const Vectorized<c10::complex<float>>& other) const {
330
+ return _mm256_cmp_ps(values, other.values, _CMP_NEQ_UQ);
331
+ }
332
+ Vectorized<c10::complex<float>> operator<(const Vectorized<c10::complex<float>>& /*other*/) const {
333
+ TORCH_CHECK(false, "not supported for complex numbers");
334
+ }
335
+ Vectorized<c10::complex<float>> operator<=(const Vectorized<c10::complex<float>>& /*other*/) const {
336
+ TORCH_CHECK(false, "not supported for complex numbers");
337
+ }
338
+ Vectorized<c10::complex<float>> operator>(const Vectorized<c10::complex<float>>& /*other*/) const {
339
+ TORCH_CHECK(false, "not supported for complex numbers");
340
+ }
341
+ Vectorized<c10::complex<float>> operator>=(const Vectorized<c10::complex<float>>& /*other*/) const {
342
+ TORCH_CHECK(false, "not supported for complex numbers");
343
+ }
344
+
345
+ Vectorized<c10::complex<float>> eq(const Vectorized<c10::complex<float>>& other) const;
346
+ Vectorized<c10::complex<float>> ne(const Vectorized<c10::complex<float>>& other) const;
347
+ };
348
+
349
+ template <> Vectorized<c10::complex<float>> inline operator+(const Vectorized<c10::complex<float>> &a, const Vectorized<c10::complex<float>> &b) {
350
+ return _mm256_add_ps(a, b);
351
+ }
352
+
353
+ template <> Vectorized<c10::complex<float>> inline operator-(const Vectorized<c10::complex<float>> &a, const Vectorized<c10::complex<float>> &b) {
354
+ return _mm256_sub_ps(a, b);
355
+ }
356
+
357
+ template <> Vectorized<c10::complex<float>> inline operator*(const Vectorized<c10::complex<float>> &a, const Vectorized<c10::complex<float>> &b) {
358
+ //(a + bi) * (c + di) = (ac - bd) + (ad + bc)i
359
+ const __m256 sign_mask = _mm256_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0);
360
+ auto ac_bd = _mm256_mul_ps(a, b); //ac bd
361
+
362
+ auto d_c = _mm256_permute_ps(b, 0xB1); //d c
363
+ d_c = _mm256_xor_ps(sign_mask, d_c); //d -c
364
+ auto ad_bc = _mm256_mul_ps(a, d_c); //ad -bc
365
+
366
+ auto ret = _mm256_hsub_ps(ac_bd, ad_bc); //ac - bd ad + bc
367
+ ret = _mm256_permute_ps(ret, 0xD8);
368
+ return ret;
369
+ }
370
+
371
+ template <> Vectorized<c10::complex<float>> inline operator/(const Vectorized<c10::complex<float>> &a, const Vectorized<c10::complex<float>> &b) {
372
+ //re + im*i = (a + bi) / (c + di)
373
+ auto mask = _mm256_set1_ps(-0.f);
374
+ auto fabs_cd = _mm256_andnot_ps(mask, b); // |c| |d|
375
+ auto fabs_dc = _mm256_permute_ps(fabs_cd, 0xB1); // |d| |c|
376
+ auto scale = _mm256_rcp_ps(_mm256_max_ps(fabs_cd, fabs_dc)); // 1/sc 1/sc
377
+ auto a2 = _mm256_mul_ps(a, scale); // a/sc b/sc
378
+ auto b2 = _mm256_mul_ps(b, scale); // c/sc d/sc
379
+ auto acbd2 = _mm256_mul_ps(a2, b2);
380
+
381
+ const __m256 sign_mask = _mm256_setr_ps(-0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0);
382
+ auto dc2 = _mm256_permute_ps(b2, 0xB1); // d/sc c/sc
383
+ dc2 = _mm256_xor_ps(sign_mask, dc2); // -d/|c,d| c/sc
384
+ auto adbc2 = _mm256_mul_ps(a2, dc2); //-ad/sc^2 bc/sc^2
385
+ auto res2 = _mm256_hadd_ps(acbd2, adbc2); //(ac+bd)/sc^2 (bc-ad)/sc^2
386
+ res2 = _mm256_permute_ps(res2, 0xD8);
387
+
388
+ // get the denominator
389
+ auto denom2 = Vectorized<c10::complex<float>>(b2).abs_2_(); // (c^2+d^2)/sc^2 (c^2+d^2)/sc^2
390
+ res2 = _mm256_div_ps(res2, denom2);
391
+ return res2;
392
+ }
393
+
394
+ // reciprocal. Implement this here so we can use multiplication.
395
+ inline Vectorized<c10::complex<float>> Vectorized<c10::complex<float>>::reciprocal() const {
396
+ //re + im*i = (a + bi) / (c + di)
397
+ //re = (ac + bd)/abs_2() = c/abs_2()
398
+ //im = (bc - ad)/abs_2() = d/abs_2()
399
+ const __m256 sign_mask = _mm256_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0);
400
+ auto c_d = _mm256_xor_ps(sign_mask, values); //c -d
401
+ return _mm256_div_ps(c_d, abs_2_());
402
+ }
403
+
404
+ inline Vectorized<c10::complex<float>> Vectorized<c10::complex<float>>::atan() const {
405
+ // atan(x) = i/2 * ln((i + z)/(i - z))
406
+ const __m256 i = _mm256_setr_ps(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0);
407
+ const Vectorized i_half = _mm256_setr_ps(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5);
408
+
409
+ auto sum = Vectorized(_mm256_add_ps(i, values)); // a 1+b
410
+ auto sub = Vectorized(_mm256_sub_ps(i, values)); // -a 1-b
411
+ auto ln = (sum/sub).log(); // ln((i + z)/(i - z))
412
+ return i_half*ln; // i/2*ln()
413
+ }
414
+
415
+ template <>
416
+ Vectorized<c10::complex<float>> inline maximum(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b) {
417
+ auto abs_a = a.abs_2_();
418
+ auto abs_b = b.abs_2_();
419
+ auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_LT_OQ);
420
+ auto max = _mm256_blendv_ps(a, b, mask);
421
+ // Exploit the fact that all-ones is a NaN.
422
+ auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q);
423
+ return _mm256_or_ps(max, isnan);
424
+ }
425
+
426
+ template <>
427
+ Vectorized<c10::complex<float>> inline minimum(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b) {
428
+ auto abs_a = a.abs_2_();
429
+ auto abs_b = b.abs_2_();
430
+ auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_GT_OQ);
431
+ auto min = _mm256_blendv_ps(a, b, mask);
432
+ // Exploit the fact that all-ones is a NaN.
433
+ auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q);
434
+ return _mm256_or_ps(min, isnan);
435
+ }
436
+
437
+ template <>
438
+ Vectorized<c10::complex<float>> inline operator&(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b) {
439
+ return _mm256_and_ps(a, b);
440
+ }
441
+
442
+ template <>
443
+ Vectorized<c10::complex<float>> inline operator|(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b) {
444
+ return _mm256_or_ps(a, b);
445
+ }
446
+
447
+ template <>
448
+ Vectorized<c10::complex<float>> inline operator^(const Vectorized<c10::complex<float>>& a, const Vectorized<c10::complex<float>>& b) {
449
+ return _mm256_xor_ps(a, b);
450
+ }
451
+
452
+ inline Vectorized<c10::complex<float>> Vectorized<c10::complex<float>>::eq(
453
+ const Vectorized<c10::complex<float>>& other) const {
454
+ auto eq = (*this == other); // compares real and imag individually
455
+ // If both real numbers and imag numbers are equal, then the complex numbers are equal
456
+ return (eq.real() & eq.imag()) & Vectorized<c10::complex<float>>(_mm256_set1_ps(1.0f));
457
+ }
458
+
459
+ inline Vectorized<c10::complex<float>> Vectorized<c10::complex<float>>::ne(
460
+ const Vectorized<c10::complex<float>>& other) const {
461
+ auto ne = (*this != other); // compares real and imag individually
462
+ // If either real numbers or imag numbers are not equal, then the complex numbers are not equal
463
+ return (ne.real() | ne.imag()) & Vectorized<c10::complex<float>>(_mm256_set1_ps(1.0f));
464
+ }
465
+
466
+ #endif
467
+
468
+ }} // namespace at::vec::CPU_CAPABILITY
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_double.h ADDED
@@ -0,0 +1,442 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/intrinsics.h>
7
+ #include <ATen/cpu/vec/vec_base.h>
8
+ #include <c10/util/irange.h>
9
+ #if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
10
+ #include <sleef.h>
11
+ #endif
12
+
13
+ namespace at::vec {
14
+ // See Note [CPU_CAPABILITY namespace]
15
+ inline namespace CPU_CAPABILITY {
16
+
17
+
18
+ #if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
19
+
20
+ template <> class Vectorized<double> {
21
+ private:
22
+ __m256d values;
23
+ public:
24
+ using value_type = double;
25
+ using size_type = int;
26
+ static constexpr size_type size() {
27
+ return 4;
28
+ }
29
+ Vectorized() {}
30
+ Vectorized(__m256d v) : values(v) {}
31
+ Vectorized(double val) {
32
+ values = _mm256_set1_pd(val);
33
+ }
34
+ Vectorized(double val1, double val2, double val3, double val4) {
35
+ values = _mm256_setr_pd(val1, val2, val3, val4);
36
+ }
37
+ operator __m256d() const {
38
+ return values;
39
+ }
40
+ template <int64_t mask>
41
+ static Vectorized<double> blend(const Vectorized<double>& a, const Vectorized<double>& b) {
42
+ return _mm256_blend_pd(a.values, b.values, mask);
43
+ }
44
+ static Vectorized<double> blendv(const Vectorized<double>& a, const Vectorized<double>& b,
45
+ const Vectorized<double>& mask) {
46
+ return _mm256_blendv_pd(a.values, b.values, mask.values);
47
+ }
48
+ template<typename step_t>
49
+ static Vectorized<double> arange(double base = 0., step_t step = static_cast<step_t>(1)) {
50
+ return Vectorized<double>(base, base + step, base + 2 * step, base + 3 * step);
51
+ }
52
+ static Vectorized<double> set(const Vectorized<double>& a, const Vectorized<double>& b,
53
+ int64_t count = size()) {
54
+ switch (count) {
55
+ case 0:
56
+ return a;
57
+ case 1:
58
+ return blend<1>(a, b);
59
+ case 2:
60
+ return blend<3>(a, b);
61
+ case 3:
62
+ return blend<7>(a, b);
63
+ }
64
+ return b;
65
+ }
66
+ static Vectorized<double> loadu(const void* ptr, int64_t count = size()) {
67
+ if (count == size())
68
+ return _mm256_loadu_pd(reinterpret_cast<const double*>(ptr));
69
+
70
+
71
+ __at_align__ double tmp_values[size()];
72
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
73
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
74
+ // instructions while a loop would be compiled to one instruction.
75
+ for (const auto i : c10::irange(size())) {
76
+ tmp_values[i] = 0.0;
77
+ }
78
+ std::memcpy(
79
+ tmp_values,
80
+ reinterpret_cast<const double*>(ptr),
81
+ count * sizeof(double));
82
+ return _mm256_load_pd(tmp_values);
83
+ }
84
+ void store(void* ptr, int count = size()) const {
85
+ if (count == size()) {
86
+ _mm256_storeu_pd(reinterpret_cast<double*>(ptr), values);
87
+ } else if (count > 0) {
88
+ double tmp_values[size()];
89
+ _mm256_storeu_pd(reinterpret_cast<double*>(tmp_values), values);
90
+ std::memcpy(ptr, tmp_values, count * sizeof(double));
91
+ }
92
+ }
93
+ const double& operator[](int idx) const = delete;
94
+ double& operator[](int idx) = delete;
95
+ int zero_mask() const {
96
+ // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit
97
+ __m256d cmp = _mm256_cmp_pd(values, _mm256_set1_pd(0.0), _CMP_EQ_OQ);
98
+ return _mm256_movemask_pd(cmp);
99
+ }
100
+ Vectorized<double> isnan() const {
101
+ return _mm256_cmp_pd(values, _mm256_set1_pd(0.0), _CMP_UNORD_Q);
102
+ }
103
+ bool has_inf_nan() const {
104
+ __m256d self_sub = _mm256_sub_pd(values, values);
105
+ return (_mm256_movemask_epi8(_mm256_castpd_si256(self_sub)) & 0x77777777) != 0;
106
+ }
107
+ Vectorized<double> map(double (*const f)(double)) const {
108
+ __at_align__ double tmp[size()];
109
+ store(tmp);
110
+ for (const auto i : c10::irange(size())) {
111
+ tmp[i] = f(tmp[i]);
112
+ }
113
+ return loadu(tmp);
114
+ }
115
+ Vectorized<double> abs() const {
116
+ auto mask = _mm256_set1_pd(-0.f);
117
+ return _mm256_andnot_pd(mask, values);
118
+ }
119
+ Vectorized<double> angle() const {
120
+ const auto zero_vec = _mm256_set1_pd(0.f);
121
+ const auto nan_vec = _mm256_set1_pd(NAN);
122
+ const auto not_nan_mask = _mm256_cmp_pd(values, values, _CMP_EQ_OQ);
123
+ const auto nan_mask = _mm256_cmp_pd(not_nan_mask, zero_vec, _CMP_EQ_OQ);
124
+ const auto pi = _mm256_set1_pd(c10::pi<double>);
125
+
126
+ const auto neg_mask = _mm256_cmp_pd(values, zero_vec, _CMP_LT_OQ);
127
+ auto angle = _mm256_blendv_pd(zero_vec, pi, neg_mask);
128
+ angle = _mm256_blendv_pd(angle, nan_vec, nan_mask);
129
+ return angle;
130
+ }
131
+ Vectorized<double> real() const {
132
+ return *this;
133
+ }
134
+ Vectorized<double> imag() const {
135
+ return _mm256_set1_pd(0);
136
+ }
137
+ Vectorized<double> conj() const {
138
+ return *this;
139
+ }
140
+ Vectorized<double> acos() const {
141
+ return Vectorized<double>(Sleef_acosd4_u10(values));
142
+ }
143
+ Vectorized<double> acosh() const {
144
+ return Vectorized<double>(Sleef_acoshd4_u10(values));
145
+ }
146
+ Vectorized<double> asin() const {
147
+ return Vectorized<double>(Sleef_asind4_u10(values));
148
+ }
149
+ Vectorized<double> atan() const {
150
+ return Vectorized<double>(Sleef_atand4_u10(values));
151
+ }
152
+ Vectorized<double> atanh() const {
153
+ return Vectorized<double>(Sleef_atanhd4_u10(values));
154
+ }
155
+ Vectorized<double> atan2(const Vectorized<double> &b) const {
156
+ return Vectorized<double>(Sleef_atan2d4_u10(values, b));
157
+ }
158
+ Vectorized<double> copysign(const Vectorized<double> &sign) const {
159
+ return Vectorized<double>(Sleef_copysignd4(values, sign));
160
+ }
161
+ Vectorized<double> erf() const {
162
+ return Vectorized<double>(Sleef_erfd4_u10(values));
163
+ }
164
+ Vectorized<double> erfc() const {
165
+ return Vectorized<double>(Sleef_erfcd4_u15(values));
166
+ }
167
+ Vectorized<double> erfinv() const {
168
+ return map(calc_erfinv);
169
+ }
170
+ Vectorized<double> exp() const {
171
+ return Vectorized<double>(Sleef_expd4_u10(values));
172
+ }
173
+ Vectorized<double> exp2() const {
174
+ return Vectorized<double>(Sleef_exp2d4_u10(values));
175
+ }
176
+ Vectorized<double> expm1() const {
177
+ return Vectorized<double>(Sleef_expm1d4_u10(values));
178
+ }
179
+ Vectorized<double> exp_u20() const {
180
+ return exp();
181
+ }
182
+ Vectorized<double> fmod(const Vectorized<double>& q) const {
183
+ return Vectorized<double>(Sleef_fmodd4(values, q));
184
+ }
185
+ Vectorized<double> hypot(const Vectorized<double> &b) const {
186
+ return Vectorized<double>(Sleef_hypotd4_u05(values, b));
187
+ }
188
+ Vectorized<double> i0() const {
189
+ return map(calc_i0);
190
+ }
191
+ Vectorized<double> i0e() const {
192
+ return map(calc_i0e);
193
+ }
194
+ Vectorized<double> digamma() const {
195
+ return map(calc_digamma);
196
+ }
197
+ Vectorized<double> igamma(const Vectorized<double> &x) const {
198
+ __at_align__ double tmp[size()];
199
+ __at_align__ double tmp_x[size()];
200
+ store(tmp);
201
+ x.store(tmp_x);
202
+ for (const auto i : c10::irange(size())) {
203
+ tmp[i] = calc_igamma(tmp[i], tmp_x[i]);
204
+ }
205
+ return loadu(tmp);
206
+ }
207
+ Vectorized<double> igammac(const Vectorized<double> &x) const {
208
+ __at_align__ double tmp[size()];
209
+ __at_align__ double tmp_x[size()];
210
+ store(tmp);
211
+ x.store(tmp_x);
212
+ for (const auto i : c10::irange(size())) {
213
+ tmp[i] = calc_igammac(tmp[i], tmp_x[i]);
214
+ }
215
+ return loadu(tmp);
216
+ }
217
+ Vectorized<double> log() const {
218
+ return Vectorized<double>(Sleef_logd4_u10(values));
219
+ }
220
+ Vectorized<double> log2() const {
221
+ return Vectorized<double>(Sleef_log2d4_u10(values));
222
+ }
223
+ Vectorized<double> log10() const {
224
+ return Vectorized<double>(Sleef_log10d4_u10(values));
225
+ }
226
+ Vectorized<double> log1p() const {
227
+ return Vectorized<double>(Sleef_log1pd4_u10(values));
228
+ }
229
+ Vectorized<double> sin() const {
230
+ return Vectorized<double>(Sleef_sind4_u10(values));
231
+ }
232
+ Vectorized<double> sinh() const {
233
+ return Vectorized<double>(Sleef_sinhd4_u10(values));
234
+ }
235
+ Vectorized<double> cos() const {
236
+ return Vectorized<double>(Sleef_cosd4_u10(values));
237
+ }
238
+ Vectorized<double> cosh() const {
239
+ return Vectorized<double>(Sleef_coshd4_u10(values));
240
+ }
241
+ Vectorized<double> ceil() const {
242
+ return _mm256_ceil_pd(values);
243
+ }
244
+ Vectorized<double> floor() const {
245
+ return _mm256_floor_pd(values);
246
+ }
247
+ Vectorized<double> frac() const;
248
+ Vectorized<double> neg() const {
249
+ return _mm256_xor_pd(_mm256_set1_pd(-0.), values);
250
+ }
251
+ Vectorized<double> nextafter(const Vectorized<double> &b) const {
252
+ return Vectorized<double>(Sleef_nextafterd4(values, b));
253
+ }
254
+ Vectorized<double> round() const {
255
+ return _mm256_round_pd(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
256
+ }
257
+ Vectorized<double> tan() const {
258
+ return Vectorized<double>(Sleef_tand4_u10(values));
259
+ }
260
+ Vectorized<double> tanh() const {
261
+ return Vectorized<double>(Sleef_tanhd4_u10(values));
262
+ }
263
+ Vectorized<double> trunc() const {
264
+ return _mm256_round_pd(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
265
+ }
266
+ Vectorized<double> lgamma() const {
267
+ return Vectorized<double>(Sleef_lgammad4_u10(values));
268
+ }
269
+ Vectorized<double> sqrt() const {
270
+ return _mm256_sqrt_pd(values);
271
+ }
272
+ Vectorized<double> reciprocal() const {
273
+ return _mm256_div_pd(_mm256_set1_pd(1), values);
274
+ }
275
+ Vectorized<double> rsqrt() const {
276
+ return _mm256_div_pd(_mm256_set1_pd(1), _mm256_sqrt_pd(values));
277
+ }
278
+ Vectorized<double> pow(const Vectorized<double> &b) const {
279
+ return Vectorized<double>(Sleef_powd4_u10(values, b));
280
+ }
281
+ // Comparison using the _CMP_**_OQ predicate.
282
+ // `O`: get false if an operand is NaN
283
+ // `Q`: do not raise if an operand is NaN
284
+ Vectorized<double> operator==(const Vectorized<double>& other) const {
285
+ return _mm256_cmp_pd(values, other.values, _CMP_EQ_OQ);
286
+ }
287
+
288
+ Vectorized<double> operator!=(const Vectorized<double>& other) const {
289
+ return _mm256_cmp_pd(values, other.values, _CMP_NEQ_UQ);
290
+ }
291
+
292
+ Vectorized<double> operator<(const Vectorized<double>& other) const {
293
+ return _mm256_cmp_pd(values, other.values, _CMP_LT_OQ);
294
+ }
295
+
296
+ Vectorized<double> operator<=(const Vectorized<double>& other) const {
297
+ return _mm256_cmp_pd(values, other.values, _CMP_LE_OQ);
298
+ }
299
+
300
+ Vectorized<double> operator>(const Vectorized<double>& other) const {
301
+ return _mm256_cmp_pd(values, other.values, _CMP_GT_OQ);
302
+ }
303
+
304
+ Vectorized<double> operator>=(const Vectorized<double>& other) const {
305
+ return _mm256_cmp_pd(values, other.values, _CMP_GE_OQ);
306
+ }
307
+
308
+ Vectorized<double> eq(const Vectorized<double>& other) const;
309
+ Vectorized<double> ne(const Vectorized<double>& other) const;
310
+ Vectorized<double> lt(const Vectorized<double>& other) const;
311
+ Vectorized<double> le(const Vectorized<double>& other) const;
312
+ Vectorized<double> gt(const Vectorized<double>& other) const;
313
+ Vectorized<double> ge(const Vectorized<double>& other) const;
314
+ };
315
+
316
+ template <>
317
+ Vectorized<double> inline operator+(const Vectorized<double>& a, const Vectorized<double>& b) {
318
+ return _mm256_add_pd(a, b);
319
+ }
320
+
321
+ template <>
322
+ Vectorized<double> inline operator-(const Vectorized<double>& a, const Vectorized<double>& b) {
323
+ return _mm256_sub_pd(a, b);
324
+ }
325
+
326
+ template <>
327
+ Vectorized<double> inline operator*(const Vectorized<double>& a, const Vectorized<double>& b) {
328
+ return _mm256_mul_pd(a, b);
329
+ }
330
+
331
+ template <>
332
+ Vectorized<double> inline operator/(const Vectorized<double>& a, const Vectorized<double>& b) {
333
+ return _mm256_div_pd(a, b);
334
+ }
335
+
336
+ // frac. Implement this here so we can use subtraction.
337
+ inline Vectorized<double> Vectorized<double>::frac() const {
338
+ return *this - this->trunc();
339
+ }
340
+
341
+ // Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
342
+ // either input is a NaN.
343
+ template <>
344
+ Vectorized<double> inline maximum(const Vectorized<double>& a, const Vectorized<double>& b) {
345
+ Vectorized<double> max = _mm256_max_pd(a, b);
346
+ Vectorized<double> isnan = _mm256_cmp_pd(a, b, _CMP_UNORD_Q);
347
+ // Exploit the fact that all-ones is a NaN.
348
+ return _mm256_or_pd(max, isnan);
349
+ }
350
+
351
+ // Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
352
+ // either input is a NaN.
353
+ template <>
354
+ Vectorized<double> inline minimum(const Vectorized<double>& a, const Vectorized<double>& b) {
355
+ Vectorized<double> min = _mm256_min_pd(a, b);
356
+ Vectorized<double> isnan = _mm256_cmp_pd(a, b, _CMP_UNORD_Q);
357
+ // Exploit the fact that all-ones is a NaN.
358
+ return _mm256_or_pd(min, isnan);
359
+ }
360
+
361
+ template <>
362
+ Vectorized<double> inline clamp(const Vectorized<double>& a, const Vectorized<double>& min, const Vectorized<double>& max) {
363
+ return _mm256_min_pd(max, _mm256_max_pd(min, a));
364
+ }
365
+
366
+ template <>
367
+ Vectorized<double> inline clamp_min(const Vectorized<double>& a, const Vectorized<double>& min) {
368
+ return _mm256_max_pd(min, a);
369
+ }
370
+
371
+ template <>
372
+ Vectorized<double> inline clamp_max(const Vectorized<double>& a, const Vectorized<double>& max) {
373
+ return _mm256_min_pd(max, a);
374
+ }
375
+
376
+ template <>
377
+ Vectorized<double> inline operator&(const Vectorized<double>& a, const Vectorized<double>& b) {
378
+ return _mm256_and_pd(a, b);
379
+ }
380
+
381
+ template <>
382
+ Vectorized<double> inline operator|(const Vectorized<double>& a, const Vectorized<double>& b) {
383
+ return _mm256_or_pd(a, b);
384
+ }
385
+
386
+ template <>
387
+ Vectorized<double> inline operator^(const Vectorized<double>& a, const Vectorized<double>& b) {
388
+ return _mm256_xor_pd(a, b);
389
+ }
390
+
391
+ inline Vectorized<double> Vectorized<double>::eq(const Vectorized<double>& other) const {
392
+ return (*this == other) & Vectorized<double>(1.0);
393
+ }
394
+
395
+ inline Vectorized<double> Vectorized<double>::ne(const Vectorized<double>& other) const {
396
+ return (*this != other) & Vectorized<double>(1.0);
397
+ }
398
+
399
+ inline Vectorized<double> Vectorized<double>::gt(const Vectorized<double>& other) const {
400
+ return (*this > other) & Vectorized<double>(1.0);
401
+ }
402
+
403
+ inline Vectorized<double> Vectorized<double>::ge(const Vectorized<double>& other) const {
404
+ return (*this >= other) & Vectorized<double>(1.0);
405
+ }
406
+
407
+ inline Vectorized<double> Vectorized<double>::lt(const Vectorized<double>& other) const {
408
+ return (*this < other) & Vectorized<double>(1.0);
409
+ }
410
+
411
+ inline Vectorized<double> Vectorized<double>::le(const Vectorized<double>& other) const {
412
+ return (*this <= other) & Vectorized<double>(1.0);
413
+ }
414
+
415
+ template <>
416
+ inline void convert(const double* src, double* dst, int64_t n) {
417
+ int64_t i;
418
+ #pragma unroll
419
+ for (i = 0; i <= (n - Vectorized<double>::size()); i += Vectorized<double>::size()) {
420
+ _mm256_storeu_pd(dst + i, _mm256_loadu_pd(src + i));
421
+ }
422
+ #pragma unroll
423
+ for (; i < n; i++) {
424
+ dst[i] = src[i];
425
+ }
426
+ }
427
+
428
+ #ifdef CPU_CAPABILITY_AVX2
429
+ template <>
430
+ Vectorized<double> inline fmadd(const Vectorized<double>& a, const Vectorized<double>& b, const Vectorized<double>& c) {
431
+ return _mm256_fmadd_pd(a, b, c);
432
+ }
433
+
434
+ template <>
435
+ Vectorized<double> inline fmsub(const Vectorized<double>& a, const Vectorized<double>& b, const Vectorized<double>& c) {
436
+ return _mm256_fmsub_pd(a, b, c);
437
+ }
438
+ #endif
439
+
440
+ #endif
441
+
442
+ }} // namespace at::vec::CPU_CAPABILITY
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_float.h ADDED
@@ -0,0 +1,636 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/intrinsics.h>
7
+ #include <ATen/cpu/vec/vec_base.h>
8
+ #include <c10/util/irange.h>
9
+ #if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
10
+ #include <sleef.h>
11
+ #endif
12
+
13
+ namespace at::vec {
14
+ // See Note [CPU_CAPABILITY namespace]
15
+ inline namespace CPU_CAPABILITY {
16
+
17
+ #if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
18
+
19
+ template <> class Vectorized<float> {
20
+ private:
21
+ __m256 values;
22
+ public:
23
+ using value_type = float;
24
+ using size_type = int;
25
+ static constexpr size_type size() {
26
+ return 8;
27
+ }
28
+ Vectorized() {}
29
+ Vectorized(__m256 v) : values(v) {}
30
+ Vectorized(float val) {
31
+ values = _mm256_set1_ps(val);
32
+ }
33
+ Vectorized(float val1, float val2, float val3, float val4,
34
+ float val5, float val6, float val7, float val8) {
35
+ values = _mm256_setr_ps(val1, val2, val3, val4, val5, val6, val7, val8);
36
+ }
37
+ operator __m256() const {
38
+ return values;
39
+ }
40
+ template <int64_t mask>
41
+ static Vectorized<float> blend(const Vectorized<float>& a, const Vectorized<float>& b) {
42
+ return _mm256_blend_ps(a.values, b.values, mask);
43
+ }
44
+ static Vectorized<float> blendv(const Vectorized<float>& a, const Vectorized<float>& b,
45
+ const Vectorized<float>& mask) {
46
+ return _mm256_blendv_ps(a.values, b.values, mask.values);
47
+ }
48
+ template<typename step_t>
49
+ static Vectorized<float> arange(float base = 0.f, step_t step = static_cast<step_t>(1)) {
50
+ return Vectorized<float>(
51
+ base, base + step, base + 2 * step, base + 3 * step,
52
+ base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step);
53
+ }
54
+ static Vectorized<float> set(const Vectorized<float>& a, const Vectorized<float>& b,
55
+ int64_t count = size()) {
56
+ switch (count) {
57
+ case 0:
58
+ return a;
59
+ case 1:
60
+ return blend<1>(a, b);
61
+ case 2:
62
+ return blend<3>(a, b);
63
+ case 3:
64
+ return blend<7>(a, b);
65
+ case 4:
66
+ return blend<15>(a, b);
67
+ case 5:
68
+ return blend<31>(a, b);
69
+ case 6:
70
+ return blend<63>(a, b);
71
+ case 7:
72
+ return blend<127>(a, b);
73
+ }
74
+ return b;
75
+ }
76
+ static Vectorized<float> loadu(const void* ptr, int64_t count = size()) {
77
+ if (count == size())
78
+ return _mm256_loadu_ps(reinterpret_cast<const float*>(ptr));
79
+ __at_align__ float tmp_values[size()];
80
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
81
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
82
+ // instructions while a loop would be compiled to one instruction.
83
+ for (const auto i : c10::irange(size())) {
84
+ tmp_values[i] = 0.0;
85
+ }
86
+ std::memcpy(
87
+ tmp_values, reinterpret_cast<const float*>(ptr), count * sizeof(float));
88
+ return _mm256_loadu_ps(tmp_values);
89
+ }
90
+ void store(void* ptr, int64_t count = size()) const {
91
+ if (count == size()) {
92
+ _mm256_storeu_ps(reinterpret_cast<float*>(ptr), values);
93
+ } else if (count > 0) {
94
+ float tmp_values[size()];
95
+ _mm256_storeu_ps(reinterpret_cast<float*>(tmp_values), values);
96
+ std::memcpy(ptr, tmp_values, count * sizeof(float));
97
+ }
98
+ }
99
+ const float& operator[](int idx) const = delete;
100
+ float& operator[](int idx) = delete;
101
+ int zero_mask() const {
102
+ // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit
103
+ __m256 cmp = _mm256_cmp_ps(values, _mm256_set1_ps(0.0f), _CMP_EQ_OQ);
104
+ return _mm256_movemask_ps(cmp);
105
+ }
106
+ Vectorized<float> isnan() const {
107
+ return _mm256_cmp_ps(values, _mm256_set1_ps(0.0f), _CMP_UNORD_Q);
108
+ }
109
+
110
+ bool has_inf_nan() const {
111
+ __m256 self_sub = _mm256_sub_ps(values, values);
112
+ return (_mm256_movemask_epi8(_mm256_castps_si256(self_sub)) & 0x77777777) != 0;
113
+ }
114
+
115
+ Vectorized<float> map(float (*const f)(float)) const {
116
+ __at_align__ float tmp[size()];
117
+ store(tmp);
118
+ for (const auto i : c10::irange(size())) {
119
+ tmp[i] = f(tmp[i]);
120
+ }
121
+ return loadu(tmp);
122
+ }
123
+ Vectorized<float> abs() const {
124
+ auto mask = _mm256_set1_ps(-0.f);
125
+ return _mm256_andnot_ps(mask, values);
126
+ }
127
+ Vectorized<float> angle() const {
128
+ const auto zero_vec = _mm256_set1_ps(0.f);
129
+ const auto nan_vec = _mm256_set1_ps(NAN);
130
+ const auto not_nan_mask = _mm256_cmp_ps(values, values, _CMP_EQ_OQ);
131
+ const auto nan_mask = _mm256_cmp_ps(not_nan_mask, zero_vec, _CMP_EQ_OQ);
132
+ const auto pi = _mm256_set1_ps(c10::pi<float>);
133
+
134
+ const auto neg_mask = _mm256_cmp_ps(values, zero_vec, _CMP_LT_OQ);
135
+ auto angle = _mm256_blendv_ps(zero_vec, pi, neg_mask);
136
+ angle = _mm256_blendv_ps(angle, nan_vec, nan_mask);
137
+ return angle;
138
+ }
139
+ Vectorized<float> real() const {
140
+ return *this;
141
+ }
142
+ Vectorized<float> imag() const {
143
+ return _mm256_set1_ps(0);
144
+ }
145
+ Vectorized<float> conj() const {
146
+ return *this;
147
+ }
148
+ Vectorized<float> acos() const {
149
+ return Vectorized<float>(Sleef_acosf8_u10(values));
150
+ }
151
+ Vectorized<float> acosh() const {
152
+ return Vectorized<float>(Sleef_acoshf8_u10(values));
153
+ }
154
+ Vectorized<float> asin() const {
155
+ return Vectorized<float>(Sleef_asinf8_u10(values));
156
+ }
157
+ Vectorized<float> atan() const {
158
+ return Vectorized<float>(Sleef_atanf8_u10(values));
159
+ }
160
+ Vectorized<float> atanh() const {
161
+ return Vectorized<float>(Sleef_atanhf8_u10(values));
162
+ }
163
+ Vectorized<float> atan2(const Vectorized<float> &b) const {
164
+ return Vectorized<float>(Sleef_atan2f8_u10(values, b));
165
+ }
166
+ Vectorized<float> copysign(const Vectorized<float> &sign) const {
167
+ return Vectorized<float>(Sleef_copysignf8(values, sign));
168
+ }
169
+ Vectorized<float> erf() const {
170
+ // constants
171
+ const auto neg_zero_vec = _mm256_set1_ps(-0.f);
172
+ const auto one_vec = _mm256_set1_ps(1.0f);
173
+ const auto p = _mm256_set1_ps(0.3275911f);
174
+ const auto p1 = _mm256_set1_ps(0.254829592f);
175
+ const auto p2 = _mm256_set1_ps(-0.284496736f);
176
+ const auto p3 = _mm256_set1_ps(1.421413741f);
177
+ const auto p4 = _mm256_set1_ps(-1.453152027f);
178
+ const auto p5 = _mm256_set1_ps(1.061405429f);
179
+ // sign(x)
180
+ auto sign_mask = _mm256_and_ps(neg_zero_vec, values);
181
+ auto abs_vec = _mm256_xor_ps(sign_mask, values);
182
+ // t = 1 / (p * abs(x) + 1)
183
+ auto tmp0 = _mm256_fmadd_ps(p, abs_vec, one_vec);
184
+ auto t = _mm256_div_ps(one_vec, tmp0);
185
+ // r = p5 * t ^ 4 + p4 * t ^ 3 + p3 * t ^ 2 + p2 * t + p1
186
+ auto tmp1 = _mm256_fmadd_ps(p5, t, p4);
187
+ auto tmp2 = _mm256_fmadd_ps(tmp1, t, p3);
188
+ auto tmp3 = _mm256_fmadd_ps(tmp2, t, p2);
189
+ auto r = _mm256_fmadd_ps(tmp3, t, p1);
190
+ // - exp(- x * x)
191
+ auto pow_2 = _mm256_mul_ps(values, values);
192
+ auto neg_pow_2 = _mm256_xor_ps(neg_zero_vec, pow_2);
193
+ // auto tmp4 = exp(neg_pow_2);
194
+ auto tmp4 = Vectorized<float>(Sleef_expf8_u10(neg_pow_2));
195
+ auto tmp5 = _mm256_xor_ps(neg_zero_vec, tmp4);
196
+ // erf(x) = sign(x) * (1 - r * t * exp(- x * x))
197
+ auto tmp6 = _mm256_mul_ps(tmp5, t);
198
+ auto tmp7 = _mm256_fmadd_ps(tmp6, r, one_vec);
199
+ return _mm256_xor_ps(sign_mask, tmp7);
200
+ }
201
+ Vectorized<float> erfc() const {
202
+ return Vectorized<float>(Sleef_erfcf8_u15(values));
203
+ }
204
+ Vectorized<float> erfinv() const {
205
+ return map(calc_erfinv);
206
+ }
207
+ Vectorized<float> exp() const {
208
+ return Vectorized<float>(Sleef_expf8_u10(values));
209
+ }
210
+ Vectorized<float> exp2() const {
211
+ return Vectorized<float>(Sleef_exp2f8_u10(values));
212
+ }
213
+ Vectorized<float> expm1() const {
214
+ return Vectorized<float>(Sleef_expm1f8_u10(values));
215
+ }
216
+ Vectorized<float> exp_u20() const {
217
+ // A faster version of exp with ULP=20
218
+ static __m256 vec_factorial_1 =
219
+ _mm256_set1_ps(0.999999701f); // 1/factorial(1)
220
+ static __m256 vec_factorial_2 =
221
+ _mm256_set1_ps(0.499991506f); // 1/factorial(2)
222
+ static __m256 vec_factorial_3 =
223
+ _mm256_set1_ps(0.166676521f); // 1/factorial(3)
224
+ static __m256 vec_factorial_4 =
225
+ _mm256_set1_ps(0.0418978221f); // 1/factorial(4)
226
+ static __m256 vec_factorial_5 =
227
+ _mm256_set1_ps(0.00828929059f); // 1/factorial(5)
228
+ static __m256 vec_exp_log2ef =
229
+ (__m256)_mm256_set1_epi32(0x3fb8aa3b); // log2(e)
230
+ static __m256 vec_half = _mm256_set1_ps(0.5f);
231
+ static __m256 vec_one = _mm256_set1_ps(1.f);
232
+ static __m256 vec_zero = _mm256_set1_ps(0.f);
233
+ static __m256 vec_two = _mm256_set1_ps(2.f);
234
+ static __m256 vec_ln2f = (__m256)_mm256_set1_epi32(0x3f317218); // ln(2)
235
+ static __m256 vec_ln_flt_min = (__m256)_mm256_set1_epi32(0xc2aeac50);
236
+ static __m256 vec_ln_flt_max = (__m256)_mm256_set1_epi32(0x42b17218);
237
+ static __m256i vec_127 = _mm256_set1_epi32(0x0000007f);
238
+ static int n_mantissa_bits = 23;
239
+
240
+ // exp(x) =
241
+ // = exp(n * ln(2) + r) // divide x by ln(2) and get quot and rem
242
+ // = 2^n * exp(r) // simplify the exp(n*ln(2)) expression
243
+
244
+ auto less_ln_flt_min_mask =
245
+ _mm256_cmp_ps(values, vec_ln_flt_min, 1 /*_CMP_LT_OS*/);
246
+ auto vec_src = _mm256_min_ps(values, vec_ln_flt_max);
247
+ vec_src = _mm256_max_ps(vec_src, vec_ln_flt_min);
248
+
249
+ // fx = floorf(x * log2ef + 0.5)
250
+ auto vec_fx = _mm256_fmadd_ps(vec_src, vec_exp_log2ef, vec_half);
251
+ vec_fx = _mm256_floor_ps(vec_fx);
252
+
253
+ // x = x - fx * ln2
254
+ auto vec_exp_poly = _mm256_fnmadd_ps(vec_fx, vec_ln2f, vec_src);
255
+
256
+ // compute polynomial
257
+ auto vec_res =
258
+ _mm256_fmadd_ps(vec_exp_poly, vec_factorial_5, vec_factorial_4);
259
+ vec_res = _mm256_fmadd_ps(vec_exp_poly, vec_res, vec_factorial_3);
260
+ vec_res = _mm256_fmadd_ps(vec_exp_poly, vec_res, vec_factorial_2);
261
+ vec_res = _mm256_fmadd_ps(vec_exp_poly, vec_res, vec_factorial_1);
262
+ vec_res = _mm256_fmadd_ps(vec_exp_poly, vec_res, vec_one);
263
+
264
+ // compute 2^(n-1)
265
+ auto vec_exp_number = _mm256_sub_ps(vec_fx, vec_one);
266
+ auto vec_exp_number_i = _mm256_cvtps_epi32(vec_exp_number);
267
+ auto vec_two_pow_n_i = _mm256_add_epi32(vec_exp_number_i, vec_127);
268
+ vec_two_pow_n_i = _mm256_slli_epi32(vec_two_pow_n_i, n_mantissa_bits);
269
+ auto vec_two_pow_n = (__m256)vec_two_pow_n_i;
270
+ vec_two_pow_n =
271
+ _mm256_blendv_ps(vec_two_pow_n, vec_zero, less_ln_flt_min_mask);
272
+
273
+ // y = y * 2^n
274
+ vec_res = _mm256_mul_ps(vec_res, vec_two_pow_n);
275
+ vec_res = _mm256_mul_ps(vec_res, vec_two);
276
+ return vec_res;
277
+ }
278
+ Vectorized<float> fmod(const Vectorized<float>& q) const {
279
+ return Vectorized<float>(Sleef_fmodf8(values, q));
280
+ }
281
+ Vectorized<float> log() const {
282
+ return Vectorized<float>(Sleef_logf8_u10(values));
283
+ }
284
+ Vectorized<float> log2() const {
285
+ return Vectorized<float>(Sleef_log2f8_u10(values));
286
+ }
287
+ Vectorized<float> log10() const {
288
+ return Vectorized<float>(Sleef_log10f8_u10(values));
289
+ }
290
+ Vectorized<float> log1p() const {
291
+ return Vectorized<float>(Sleef_log1pf8_u10(values));
292
+ }
293
+ Vectorized<float> frac() const;
294
+ Vectorized<float> sin() const {
295
+ return Vectorized<float>(Sleef_sinf8_u35(values));
296
+ }
297
+ Vectorized<float> sinh() const {
298
+ return Vectorized<float>(Sleef_sinhf8_u10(values));
299
+ }
300
+ Vectorized<float> cos() const {
301
+ return Vectorized<float>(Sleef_cosf8_u35(values));
302
+ }
303
+ Vectorized<float> cosh() const {
304
+ return Vectorized<float>(Sleef_coshf8_u10(values));
305
+ }
306
+ Vectorized<float> ceil() const {
307
+ return _mm256_ceil_ps(values);
308
+ }
309
+ Vectorized<float> floor() const {
310
+ return _mm256_floor_ps(values);
311
+ }
312
+ Vectorized<float> hypot(const Vectorized<float> &b) const {
313
+ return Vectorized<float>(Sleef_hypotf8_u05(values, b));
314
+ }
315
+ Vectorized<float> i0() const {
316
+ return map(calc_i0);
317
+ }
318
+ Vectorized<float> i0e() const {
319
+ return map(calc_i0e);
320
+ }
321
+ Vectorized<float> digamma() const {
322
+ return map(calc_digamma);
323
+ }
324
+ Vectorized<float> igamma(const Vectorized<float> &x) const {
325
+ __at_align__ float tmp[size()];
326
+ __at_align__ float tmp_x[size()];
327
+ store(tmp);
328
+ x.store(tmp_x);
329
+ for (const auto i : c10::irange(size())) {
330
+ tmp[i] = calc_igamma(tmp[i], tmp_x[i]);
331
+ }
332
+ return loadu(tmp);
333
+ }
334
+ Vectorized<float> igammac(const Vectorized<float> &x) const {
335
+ __at_align__ float tmp[size()];
336
+ __at_align__ float tmp_x[size()];
337
+ store(tmp);
338
+ x.store(tmp_x);
339
+ for (const auto i : c10::irange(size())) {
340
+ tmp[i] = calc_igammac(tmp[i], tmp_x[i]);
341
+ }
342
+ return loadu(tmp);
343
+ }
344
+ Vectorized<float> neg() const {
345
+ return _mm256_xor_ps(_mm256_set1_ps(-0.f), values);
346
+ }
347
+ Vectorized<float> nextafter(const Vectorized<float> &b) const {
348
+ return Vectorized<float>(Sleef_nextafterf8(values, b));
349
+ }
350
+ Vectorized<float> round() const {
351
+ return _mm256_round_ps(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
352
+ }
353
+ Vectorized<float> tan() const {
354
+ return Vectorized<float>(Sleef_tanf8_u10(values));
355
+ }
356
+ Vectorized<float> tanh() const {
357
+ return Vectorized<float>(Sleef_tanhf8_u10(values));
358
+ }
359
+ Vectorized<float> trunc() const {
360
+ return _mm256_round_ps(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
361
+ }
362
+ Vectorized<float> lgamma() const {
363
+ return Vectorized<float>(Sleef_lgammaf8_u10(values));
364
+ }
365
+ Vectorized<float> sqrt() const {
366
+ return _mm256_sqrt_ps(values);
367
+ }
368
+ Vectorized<float> reciprocal() const {
369
+ return _mm256_div_ps(_mm256_set1_ps(1), values);
370
+ }
371
+ Vectorized<float> rsqrt() const {
372
+ return _mm256_div_ps(_mm256_set1_ps(1), _mm256_sqrt_ps(values));
373
+ }
374
+ Vectorized<float> pow(const Vectorized<float> &b) const {
375
+ return Vectorized<float>(Sleef_powf8_u10(values, b));
376
+ }
377
+ // Comparison using the _CMP_**_OQ predicate.
378
+ // `O`: get false if an operand is NaN
379
+ // `Q`: do not raise if an operand is NaN
380
+ Vectorized<float> operator==(const Vectorized<float>& other) const {
381
+ return _mm256_cmp_ps(values, other.values, _CMP_EQ_OQ);
382
+ }
383
+
384
+ Vectorized<float> operator!=(const Vectorized<float>& other) const {
385
+ return _mm256_cmp_ps(values, other.values, _CMP_NEQ_UQ);
386
+ }
387
+
388
+ Vectorized<float> operator<(const Vectorized<float>& other) const {
389
+ return _mm256_cmp_ps(values, other.values, _CMP_LT_OQ);
390
+ }
391
+
392
+ Vectorized<float> operator<=(const Vectorized<float>& other) const {
393
+ return _mm256_cmp_ps(values, other.values, _CMP_LE_OQ);
394
+ }
395
+
396
+ Vectorized<float> operator>(const Vectorized<float>& other) const {
397
+ return _mm256_cmp_ps(values, other.values, _CMP_GT_OQ);
398
+ }
399
+
400
+ Vectorized<float> operator>=(const Vectorized<float>& other) const {
401
+ return _mm256_cmp_ps(values, other.values, _CMP_GE_OQ);
402
+ }
403
+
404
+ Vectorized<float> eq(const Vectorized<float>& other) const;
405
+ Vectorized<float> ne(const Vectorized<float>& other) const;
406
+ Vectorized<float> gt(const Vectorized<float>& other) const;
407
+ Vectorized<float> ge(const Vectorized<float>& other) const;
408
+ Vectorized<float> lt(const Vectorized<float>& other) const;
409
+ Vectorized<float> le(const Vectorized<float>& other) const;
410
+ };
411
+
412
+ template <>
413
+ Vectorized<float> inline operator+(const Vectorized<float>& a, const Vectorized<float>& b) {
414
+ return _mm256_add_ps(a, b);
415
+ }
416
+
417
+ template <>
418
+ Vectorized<float> inline operator-(const Vectorized<float>& a, const Vectorized<float>& b) {
419
+ return _mm256_sub_ps(a, b);
420
+ }
421
+
422
+ template <>
423
+ Vectorized<float> inline operator*(const Vectorized<float>& a, const Vectorized<float>& b) {
424
+ return _mm256_mul_ps(a, b);
425
+ }
426
+
427
+ template <>
428
+ Vectorized<float> inline operator/(const Vectorized<float>& a, const Vectorized<float>& b) {
429
+ return _mm256_div_ps(a, b);
430
+ }
431
+
432
+ // frac. Implement this here so we can use subtraction
433
+ inline Vectorized<float> Vectorized<float>::frac() const {
434
+ return *this - this->trunc();
435
+ }
436
+
437
+ // Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
438
+ // either input is a NaN.
439
+ template <>
440
+ Vectorized<float> inline maximum(const Vectorized<float>& a, const Vectorized<float>& b) {
441
+ Vectorized<float> max = _mm256_max_ps(a, b);
442
+ Vectorized<float> isnan = _mm256_cmp_ps(a, b, _CMP_UNORD_Q);
443
+ // Exploit the fact that all-ones is a NaN.
444
+ return _mm256_or_ps(max, isnan);
445
+ }
446
+
447
+ // Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
448
+ // either input is a NaN.
449
+ template <>
450
+ Vectorized<float> inline minimum(const Vectorized<float>& a, const Vectorized<float>& b) {
451
+ Vectorized<float> min = _mm256_min_ps(a, b);
452
+ Vectorized<float> isnan = _mm256_cmp_ps(a, b, _CMP_UNORD_Q);
453
+ // Exploit the fact that all-ones is a NaN.
454
+ return _mm256_or_ps(min, isnan);
455
+ }
456
+
457
+ template <>
458
+ Vectorized<float> inline clamp(const Vectorized<float>& a, const Vectorized<float>& min, const Vectorized<float>& max) {
459
+ return _mm256_min_ps(max, _mm256_max_ps(min, a));
460
+ }
461
+
462
+ template <>
463
+ Vectorized<float> inline clamp_max(const Vectorized<float>& a, const Vectorized<float>& max) {
464
+ return _mm256_min_ps(max, a);
465
+ }
466
+
467
+ template <>
468
+ Vectorized<float> inline clamp_min(const Vectorized<float>& a, const Vectorized<float>& min) {
469
+ return _mm256_max_ps(min, a);
470
+ }
471
+
472
+ template <>
473
+ Vectorized<float> inline operator&(const Vectorized<float>& a, const Vectorized<float>& b) {
474
+ return _mm256_and_ps(a, b);
475
+ }
476
+
477
+ template <>
478
+ Vectorized<float> inline operator|(const Vectorized<float>& a, const Vectorized<float>& b) {
479
+ return _mm256_or_ps(a, b);
480
+ }
481
+
482
+ template <>
483
+ Vectorized<float> inline operator^(const Vectorized<float>& a, const Vectorized<float>& b) {
484
+ return _mm256_xor_ps(a, b);
485
+ }
486
+
487
+ inline Vectorized<float> Vectorized<float>::eq(const Vectorized<float>& other) const {
488
+ return (*this == other) & Vectorized<float>(1.0f);
489
+ }
490
+
491
+ inline Vectorized<float> Vectorized<float>::ne(const Vectorized<float>& other) const {
492
+ return (*this != other) & Vectorized<float>(1.0f);
493
+ }
494
+
495
+ inline Vectorized<float> Vectorized<float>::gt(const Vectorized<float>& other) const {
496
+ return (*this > other) & Vectorized<float>(1.0f);
497
+ }
498
+
499
+ inline Vectorized<float> Vectorized<float>::ge(const Vectorized<float>& other) const {
500
+ return (*this >= other) & Vectorized<float>(1.0f);
501
+ }
502
+
503
+ inline Vectorized<float> Vectorized<float>::lt(const Vectorized<float>& other) const {
504
+ return (*this < other) & Vectorized<float>(1.0f);
505
+ }
506
+
507
+ inline Vectorized<float> Vectorized<float>::le(const Vectorized<float>& other) const {
508
+ return (*this <= other) & Vectorized<float>(1.0f);
509
+ }
510
+
511
+ template <>
512
+ inline void convert(const float* src, float* dst, int64_t n) {
513
+ int64_t i;
514
+ #pragma unroll
515
+ for (i = 0; i <= (n - Vectorized<float>::size()); i += Vectorized<float>::size()) {
516
+ _mm256_storeu_ps(dst + i, _mm256_loadu_ps(src + i));
517
+ }
518
+ #pragma unroll
519
+ for (; i < n; i++) {
520
+ dst[i] = src[i];
521
+ }
522
+ }
523
+
524
+
525
+ template <>
526
+ Vectorized<float> inline fmadd(const Vectorized<float>& a, const Vectorized<float>& b, const Vectorized<float>& c) {
527
+ return _mm256_fmadd_ps(a, b, c);
528
+ }
529
+
530
+ template <>
531
+ Vectorized<float> inline fmsub(const Vectorized<float>& a, const Vectorized<float>& b, const Vectorized<float>& c) {
532
+ return _mm256_fmsub_ps(a, b, c);
533
+ }
534
+
535
+ // Used by Inductor CPP codegen
536
+ template<>
537
+ inline void transpose_mxn<float, 8, 8>(
538
+ const float* src,
539
+ int64_t ld_src,
540
+ float* dst,
541
+ int64_t ld_dst) {
542
+ // load from src to registers
543
+ // a: a0 a1 a2 a3 a4 a5 a6 a7
544
+ // b: b0 b1 b2 b3 b4 b5 b6 b7
545
+ // c: c0 c1 c2 c3 c4 c5 c6 c7
546
+ // d: d0 d1 d2 d3 d4 d5 d6 d7
547
+ // e: e0 e1 e2 e3 e4 e5 e6 e7
548
+ // f: f0 f1 f2 f3 f4 f5 f6 f7
549
+ // g: g0 g1 g2 g3 g4 g5 g6 g7
550
+ // h: h0 h1 h2 h3 h4 h5 h6 h7
551
+ __m256 a = _mm256_loadu_ps(&src[0 * ld_src]);
552
+ __m256 b = _mm256_loadu_ps(&src[1 * ld_src]);
553
+ __m256 c = _mm256_loadu_ps(&src[2 * ld_src]);
554
+ __m256 d = _mm256_loadu_ps(&src[3 * ld_src]);
555
+ __m256 e = _mm256_loadu_ps(&src[4 * ld_src]);
556
+ __m256 f = _mm256_loadu_ps(&src[5 * ld_src]);
557
+ __m256 g = _mm256_loadu_ps(&src[6 * ld_src]);
558
+ __m256 h = _mm256_loadu_ps(&src[7 * ld_src]);
559
+
560
+ __m256 ta, tb, tc, td, te, tf, tg, th;
561
+ // unpacking and interleaving 32-bit elements
562
+ // a0 b0 a1 b1 a4 b4 a5 b5
563
+ // a2 b2 a3 b3 a6 b6 a7 b7
564
+ // c0 d0 c1 d1 ...
565
+ // c2 d2 c3 d3 ...
566
+ // e0 f0 e1 f1 ...
567
+ // e2 f2 e3 f3 ...
568
+ // g0 h0 g1 h1 ...
569
+ // g2 h2 g3 h3 ...
570
+ ta = _mm256_unpacklo_ps(a, b);
571
+ tb = _mm256_unpackhi_ps(a, b);
572
+ tc = _mm256_unpacklo_ps(c, d);
573
+ td = _mm256_unpackhi_ps(c, d);
574
+ te = _mm256_unpacklo_ps(e, f);
575
+ tf = _mm256_unpackhi_ps(e, f);
576
+ tg = _mm256_unpacklo_ps(g, h);
577
+ th = _mm256_unpackhi_ps(g, h);
578
+
579
+ // unpacking and interleaving 64-bit elements
580
+ // a0 b0 c0 d0 a4 b4 c4 d4
581
+ // a1 b1 c1 d1 ...
582
+ // a2 b2 c2 d2 ...
583
+ // a3 b3 c3 d3 ...
584
+ // e0 f0 g0 h0 e4 f4 g4 h4
585
+ // e1 f1 g1 h1 ...
586
+ // e2 f2 g2 h2 ...
587
+ // e3 f3 g3 h3 ...
588
+ a = _mm256_castpd_ps(
589
+ _mm256_unpacklo_pd(_mm256_castps_pd(ta), _mm256_castps_pd(tc)));
590
+ b = _mm256_castpd_ps(
591
+ _mm256_unpackhi_pd(_mm256_castps_pd(ta), _mm256_castps_pd(tc)));
592
+ c = _mm256_castpd_ps(
593
+ _mm256_unpacklo_pd(_mm256_castps_pd(tb), _mm256_castps_pd(td)));
594
+ d = _mm256_castpd_ps(
595
+ _mm256_unpackhi_pd(_mm256_castps_pd(tb), _mm256_castps_pd(td)));
596
+ e = _mm256_castpd_ps(
597
+ _mm256_unpacklo_pd(_mm256_castps_pd(te), _mm256_castps_pd(tg)));
598
+ f = _mm256_castpd_ps(
599
+ _mm256_unpackhi_pd(_mm256_castps_pd(te), _mm256_castps_pd(tg)));
600
+ g = _mm256_castpd_ps(
601
+ _mm256_unpacklo_pd(_mm256_castps_pd(tf), _mm256_castps_pd(th)));
602
+ h = _mm256_castpd_ps(
603
+ _mm256_unpackhi_pd(_mm256_castps_pd(tf), _mm256_castps_pd(th)));
604
+
605
+ // shuffle 128-bits (composed of 4 32-bit elements)
606
+ // a0 b0 c0 d0 e0 f0 g0 h0
607
+ // a1 b1 c1 d1 ...
608
+ // a2 b2 c2 d2 ...
609
+ // a3 b3 c3 d3 ...
610
+ // a4 b4 c4 d4 ...
611
+ // a5 b5 c5 d5 ...
612
+ // a6 b6 c6 d6 ...
613
+ // a7 b7 c7 d7 ...
614
+ ta = _mm256_permute2f128_ps(a, e, 0x20);
615
+ tb = _mm256_permute2f128_ps(b, f, 0x20);
616
+ tc = _mm256_permute2f128_ps(c, g, 0x20);
617
+ td = _mm256_permute2f128_ps(d, h, 0x20);
618
+ te = _mm256_permute2f128_ps(a, e, 0x31);
619
+ tf = _mm256_permute2f128_ps(b, f, 0x31);
620
+ tg = _mm256_permute2f128_ps(c, g, 0x31);
621
+ th = _mm256_permute2f128_ps(d, h, 0x31);
622
+
623
+ // store from registers to dst
624
+ _mm256_storeu_ps(&dst[0 * ld_dst], ta);
625
+ _mm256_storeu_ps(&dst[1 * ld_dst], tb);
626
+ _mm256_storeu_ps(&dst[2 * ld_dst], tc);
627
+ _mm256_storeu_ps(&dst[3 * ld_dst], td);
628
+ _mm256_storeu_ps(&dst[4 * ld_dst], te);
629
+ _mm256_storeu_ps(&dst[5 * ld_dst], tf);
630
+ _mm256_storeu_ps(&dst[6 * ld_dst], tg);
631
+ _mm256_storeu_ps(&dst[7 * ld_dst], th);
632
+ }
633
+
634
+ #endif
635
+
636
+ }} // namespace at::vec::CPU_CAPABILITY
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_float_neon.h ADDED
@@ -0,0 +1,892 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/intrinsics.h>
7
+ #include <ATen/cpu/vec/vec_base.h>
8
+ #include <c10/util/irange.h>
9
+
10
+ #if defined(__aarch64__) && defined(AT_BUILD_ARM_VEC256_WITH_SLEEF)
11
+ #include <sleef.h>
12
+ #endif
13
+
14
+ // Sleef offers vectorized versions of some transcedentals
15
+ // such as sin, cos, tan etc..
16
+ // However for now opting for STL, since we are not building
17
+ // with Sleef for mobile yet.
18
+
19
+ namespace at::vec {
20
+ // See Note [CPU_CAPABILITY namespace]
21
+ inline namespace CPU_CAPABILITY {
22
+
23
+ // Right now contains only aarch64 implementation.
24
+ // Due to follow two reasons aarch32 is not currently supported.
25
+ // 1. Due to difference in ISA been aarch32 and aarch64, intrinsics
26
+ // that work for aarch64 dont work for aarch32.
27
+ // 2. Android NDK r21 has problems with compiling aarch32.
28
+ // Clang seg faults.
29
+ // https://github.com/android/ndk/issues/1248
30
+ // https://bugs.llvm.org/show_bug.cgi?id=45824
31
+ // Most likely we will do aarch32 support with inline asm.
32
+ #if defined(__aarch64__)
33
+
34
+ #ifdef __BIG_ENDIAN__
35
+ #error "Big endian is not supported."
36
+ #endif
37
+
38
+ #if defined(AT_BUILD_ARM_VEC256_WITH_SLEEF)
39
+ #define USE_SLEEF(sleef_code, non_sleef_code) sleef_code
40
+ #else
41
+ #define USE_SLEEF(sleef_code, non_sleef_code) non_sleef_code
42
+ #endif
43
+
44
+ template<int index, bool mask_val>
45
+ struct BlendRegs {
46
+ static float32x4_t impl(
47
+ const float32x4_t& a, const float32x4_t& b, float32x4_t& res);
48
+ };
49
+
50
+ template<int index>
51
+ struct BlendRegs<index, true>{
52
+ static float32x4_t impl(
53
+ const float32x4_t& a, const float32x4_t& b, float32x4_t& res) {
54
+ return vsetq_lane_f32(vgetq_lane_f32(b, index), res, index);
55
+ }
56
+ };
57
+
58
+ template<int index>
59
+ struct BlendRegs<index, false>{
60
+ static float32x4_t impl(
61
+ const float32x4_t& a, const float32x4_t& b, float32x4_t& res) {
62
+ return vsetq_lane_f32(vgetq_lane_f32(a, index), res, index);
63
+ }
64
+ };
65
+
66
+ template <> class Vectorized<float> {
67
+ private:
68
+ float32x4x2_t values;
69
+ public:
70
+ using value_type = float;
71
+ using size_type = int;
72
+ static constexpr size_type size() {
73
+ return 8;
74
+ }
75
+ Vectorized() {}
76
+ Vectorized(float32x4x2_t v) : values(v) {}
77
+ Vectorized(float val) : values{vdupq_n_f32(val), vdupq_n_f32(val) } {}
78
+ Vectorized(float val0, float val1, float val2, float val3,
79
+ float val4, float val5, float val6, float val7) :
80
+ values{val0, val1, val2, val3, val4, val5, val6, val7} {}
81
+ Vectorized(float32x4_t val0, float32x4_t val1) : values{val0, val1} {}
82
+ operator float32x4x2_t() const {
83
+ return values;
84
+ }
85
+ template <int64_t mask>
86
+ static Vectorized<float> blend(const Vectorized<float>& a, const Vectorized<float>& b) {
87
+ Vectorized<float> vec;
88
+ // 0.
89
+ vec.values.val[0] =
90
+ BlendRegs<0, (mask & 0x01)!=0>::impl(
91
+ a.values.val[0], b.values.val[0], vec.values.val[0]);
92
+ vec.values.val[0] =
93
+ BlendRegs<1, (mask & 0x02)!=0>::impl(
94
+ a.values.val[0], b.values.val[0], vec.values.val[0]);
95
+ vec.values.val[0] =
96
+ BlendRegs<2, (mask & 0x04)!=0>::impl(
97
+ a.values.val[0], b.values.val[0], vec.values.val[0]);
98
+ vec.values.val[0] =
99
+ BlendRegs<3, (mask & 0x08)!=0>::impl(
100
+ a.values.val[0], b.values.val[0], vec.values.val[0]);
101
+ // 1.
102
+ vec.values.val[1] =
103
+ BlendRegs<0, (mask & 0x10)!=0>::impl(
104
+ a.values.val[1], b.values.val[1], vec.values.val[1]);
105
+ vec.values.val[1] =
106
+ BlendRegs<1, (mask & 0x20)!=0>::impl(
107
+ a.values.val[1], b.values.val[1], vec.values.val[1]);
108
+ vec.values.val[1] =
109
+ BlendRegs<2, (mask & 0x40)!=0>::impl(
110
+ a.values.val[1], b.values.val[1], vec.values.val[1]);
111
+ vec.values.val[1] =
112
+ BlendRegs<3, (mask & 0x80)!=0>::impl(
113
+ a.values.val[1], b.values.val[1], vec.values.val[1]);
114
+ return vec;
115
+ }
116
+ static Vectorized<float> blendv(const Vectorized<float>& a, const Vectorized<float>& b,
117
+ const Vectorized<float>& mask) {
118
+ // TODO
119
+ // NB: This requires that each value, i.e., each uint value,
120
+ // of the mask either all be zeros or all be 1s.
121
+ // We perhaps need some kind of an assert?
122
+ // But that will affect performance.
123
+ Vectorized<float> vec(mask.values);
124
+ vec.values.val[0] = vbslq_f32(
125
+ vreinterpretq_u32_f32(vec.values.val[0]),
126
+ b.values.val[0],
127
+ a.values.val[0]);
128
+ vec.values.val[1] = vbslq_f32(
129
+ vreinterpretq_u32_f32(vec.values.val[1]),
130
+ b.values.val[1],
131
+ a.values.val[1]);
132
+ return vec;
133
+ }
134
+ template<typename step_t>
135
+ static Vectorized<float> arange(float base = 0.f, step_t step = static_cast<step_t>(1)) {
136
+ const Vectorized<float> base_vec(base);
137
+ const Vectorized<float> step_vec(step);
138
+ const Vectorized<float> step_sizes(0, 1, 2, 3, 4, 5, 6, 7);
139
+ return fmadd(step_sizes, step_vec, base_vec);
140
+ }
141
+ static Vectorized<float> set(const Vectorized<float>& a, const Vectorized<float>& b,
142
+ int64_t count = size()) {
143
+ switch (count) {
144
+ case 0:
145
+ return a;
146
+ case 1:
147
+ {
148
+ Vectorized<float> vec;
149
+ static uint32x4_t mask_low = {0xFFFFFFFF, 0x0, 0x0, 0x0};
150
+ vec.values.val[0] = vreinterpretq_f32_u32(mask_low);
151
+ vec.values.val[1] = a.values.val[1];
152
+ vec.values.val[0] = vbslq_f32(
153
+ vreinterpretq_u32_f32(vec.values.val[0]),
154
+ b.values.val[0],
155
+ a.values.val[0]);
156
+ return vec;
157
+ }
158
+ case 2:
159
+ {
160
+ Vectorized<float> vec;
161
+ static uint32x4_t mask_low = {0xFFFFFFFF, 0xFFFFFFFF, 0x0, 0x0};
162
+ vec.values.val[0] = vreinterpretq_f32_u32(mask_low);
163
+ vec.values.val[1] = a.values.val[1];
164
+ vec.values.val[0] = vbslq_f32(
165
+ vreinterpretq_u32_f32(vec.values.val[0]),
166
+ b.values.val[0],
167
+ a.values.val[0]);
168
+ return vec;
169
+ }
170
+ case 3:
171
+ {
172
+ Vectorized<float> vec;
173
+ static uint32x4_t mask_low = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x0};
174
+ vec.values.val[0] = vreinterpretq_f32_u32(mask_low);
175
+ vec.values.val[1] = a.values.val[1];
176
+ vec.values.val[0] = vbslq_f32(
177
+ vreinterpretq_u32_f32(vec.values.val[0]),
178
+ b.values.val[0],
179
+ a.values.val[0]);
180
+ return vec;
181
+ }
182
+ case 4:
183
+ return Vectorized<float>(b.values.val[0], a.values.val[1]);
184
+ case 5:
185
+ {
186
+ Vectorized<float> vec;
187
+ static uint32x4_t mask_high = {0xFFFFFFFF, 0x0, 0x0, 0x0};
188
+ vec.values.val[0] = b.values.val[0];
189
+ vec.values.val[1] = vreinterpretq_f32_u32(mask_high);
190
+ vec.values.val[1] = vbslq_f32(
191
+ vreinterpretq_u32_f32(vec.values.val[1]),
192
+ b.values.val[1],
193
+ a.values.val[1]);
194
+ return vec;
195
+ }
196
+ case 6:
197
+ {
198
+ Vectorized<float> vec;
199
+ static uint32x4_t mask_high = {0xFFFFFFFF, 0xFFFFFFFF, 0x0, 0x0};
200
+ vec.values.val[0] = b.values.val[0];
201
+ vec.values.val[1] = vreinterpretq_f32_u32(mask_high);
202
+ vec.values.val[1] = vbslq_f32(
203
+ vreinterpretq_u32_f32(vec.values.val[1]),
204
+ b.values.val[1],
205
+ a.values.val[1]);
206
+ return vec;
207
+ }
208
+ case 7:
209
+ {
210
+ Vectorized<float> vec;
211
+ static uint32x4_t mask_high = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x0};
212
+ vec.values.val[0] = b.values.val[0];
213
+ vec.values.val[1] = vreinterpretq_f32_u32(mask_high);
214
+ vec.values.val[1] = vbslq_f32(
215
+ vreinterpretq_u32_f32(vec.values.val[1]),
216
+ b.values.val[1],
217
+ a.values.val[1]);
218
+ return vec;
219
+ }
220
+ }
221
+ return b;
222
+ }
223
+ static Vectorized<float> loadu(const void* ptr, int64_t count = size()) {
224
+ if (count == size()) {
225
+ return vld1q_f32_x2(reinterpret_cast<const float*>(ptr));
226
+ }
227
+ else if (count == (size() >> 1)) {
228
+ Vectorized<float> res;
229
+ res.values.val[0] = vld1q_f32(reinterpret_cast<const float*>(ptr));
230
+ res.values.val[1] = vdupq_n_f32(0.f);
231
+ return res;
232
+ }
233
+ else {
234
+ __at_align__ float tmp_values[size()];
235
+ for (const auto i : c10::irange(size())) {
236
+ tmp_values[i] = 0.0;
237
+ }
238
+ std::memcpy(
239
+ tmp_values,
240
+ reinterpret_cast<const float*>(ptr),
241
+ count * sizeof(float));
242
+ return vld1q_f32_x2(reinterpret_cast<const float*>(tmp_values));
243
+ }
244
+ }
245
+ void store(void* ptr, int64_t count = size()) const {
246
+ if (count == size()) {
247
+ vst1q_f32_x2(reinterpret_cast<float*>(ptr), values);
248
+ }
249
+ else if (count == (size() >> 1)) {
250
+ vst1q_f32(reinterpret_cast<float*>(ptr), values.val[0]);
251
+ }
252
+ else {
253
+ float tmp_values[size()];
254
+ vst1q_f32_x2(reinterpret_cast<float*>(tmp_values), values);
255
+ std::memcpy(ptr, tmp_values, count * sizeof(float));
256
+ }
257
+ }
258
+ inline const float32x4_t& get_low() const {
259
+ return values.val[0];
260
+ }
261
+ inline float32x4_t& get_low() {
262
+ return values.val[0];
263
+ }
264
+ inline const float32x4_t& get_high() const {
265
+ return values.val[1];
266
+ }
267
+ inline float32x4_t& get_high() {
268
+ return values.val[1];
269
+ }
270
+ // Very slow implementation of indexing.
271
+ // Only required because vec256_qint refers to this.
272
+ // Once we specialize that implementation for ARM
273
+ // this should be removed. TODO (kimishpatel)
274
+ float operator[](int idx) const {
275
+ __at_align__ float tmp[size()];
276
+ store(tmp);
277
+ return tmp[idx];
278
+ }
279
+ float operator[](int idx) {
280
+ __at_align__ float tmp[size()];
281
+ store(tmp);
282
+ return tmp[idx];
283
+ }
284
+ // For boolean version where we want to if any 1/all zero
285
+ // etc. can be done faster in a different way.
286
+ int zero_mask() const {
287
+ __at_align__ float tmp[size()];
288
+ store(tmp);
289
+ int mask = 0;
290
+ for (int i = 0; i < size(); ++ i) {
291
+ if (tmp[i] == 0.f) {
292
+ mask |= (1 << i);
293
+ }
294
+ }
295
+ return mask;
296
+ }
297
+ Vectorized<float> isnan() const {
298
+ __at_align__ float tmp[size()];
299
+ __at_align__ float res[size()];
300
+ store(tmp);
301
+ for (const auto i : c10::irange(size())) {
302
+ if (_isnan(tmp[i])) {
303
+ std::memset(static_cast<void*>(&res[i]), 0xFF, sizeof(float));
304
+ } else {
305
+ std::memset(static_cast<void*>(&res[i]), 0, sizeof(float));
306
+ }
307
+ }
308
+ return loadu(res);
309
+ };
310
+ bool has_inf_nan() const {
311
+ __at_align__ float tmp[size()];
312
+ store(tmp);
313
+ for (const auto i : c10::irange(size())) {
314
+ if(_isnan(tmp[i]) || _isinf(tmp[i])) {
315
+ return true;
316
+ }
317
+ }
318
+ return false;
319
+ }
320
+ Vectorized<float> map(float (*const f)(float)) const {
321
+ __at_align__ float tmp[size()];
322
+ store(tmp);
323
+ for (const auto i : c10::irange(size())) {
324
+ tmp[i] = f(tmp[i]);
325
+ }
326
+ return loadu(tmp);
327
+ }
328
+ Vectorized<float> abs() const {
329
+ return Vectorized<float>(vabsq_f32(values.val[0]), vabsq_f32(values.val[1]));
330
+ }
331
+ Vectorized<float> angle() const {
332
+ auto zero = Vectorized<float>(0);
333
+ auto pi = Vectorized<float>(c10::pi<float>);
334
+ auto tmp = blendv(zero, pi, *this < zero);
335
+ return blendv(tmp, *this, isnan());
336
+ }
337
+ Vectorized<float> real() const {
338
+ return *this;
339
+ }
340
+ Vectorized<float> imag() const {
341
+ return Vectorized<float>(0.f);
342
+ }
343
+ Vectorized<float> conj() const {
344
+ return *this;
345
+ }
346
+ Vectorized<float> acos() const {
347
+ return USE_SLEEF(
348
+ Vectorized<float>(Sleef_acosf4_u10(values.val[0]), Sleef_acosf4_u10(values.val[1])),
349
+ map(std::acos)
350
+ );
351
+ }
352
+ Vectorized<float> asin() const {
353
+ return USE_SLEEF(
354
+ Vectorized<float>(Sleef_asinf4_u10(values.val[0]), Sleef_asinf4_u10(values.val[1])),
355
+ map(std::asin)
356
+ );
357
+ }
358
+ Vectorized<float> atan() const {
359
+ return USE_SLEEF(
360
+ Vectorized<float>(Sleef_atanf4_u10(values.val[0]), Sleef_atanf4_u10(values.val[1])),
361
+ map(std::atan)
362
+ );
363
+ }
364
+ Vectorized<float> atanh() const {
365
+ return USE_SLEEF(
366
+ Vectorized<float>(Sleef_atanhf4_u10(values.val[0]), Sleef_atanhf4_u10(values.val[1])),
367
+ map(std::atanh)
368
+ );
369
+ }
370
+ Vectorized<float> atan2(const Vectorized<float> &exp) const {
371
+ USE_SLEEF(
372
+ {
373
+ return Vectorized<float>(Sleef_atan2f4_u10(values.val[0], exp.values.val[0]),
374
+ Sleef_atan2f4_u10(values.val[1], exp.values.val[1]));
375
+ },
376
+ {
377
+ __at_align__ float tmp[size()];
378
+ __at_align__ float tmp_exp[size()];
379
+ store(tmp);
380
+ exp.store(tmp_exp);
381
+ for (const auto i : c10::irange(size())) {
382
+ tmp[i] = std::atan2(tmp[i], tmp_exp[i]);
383
+ }
384
+ return loadu(tmp);
385
+ }
386
+ )
387
+ }
388
+ Vectorized<float> copysign(const Vectorized<float> &sign) const {
389
+ USE_SLEEF(
390
+ {
391
+ return Vectorized<float>(Sleef_copysignf4(values.val[0], sign.values.val[0]),
392
+ Sleef_copysignf4(values.val[1], sign.values.val[1]));
393
+ },
394
+ {
395
+ __at_align__ float tmp[size()];
396
+ __at_align__ float tmp_sign[size()];
397
+ store(tmp);
398
+ sign.store(tmp_sign);
399
+ for (size_type i = 0; i < size(); i++) {
400
+ tmp[i] = std::copysign(tmp[i], tmp_sign[i]);
401
+ }
402
+ return loadu(tmp);
403
+ }
404
+ )
405
+ }
406
+ Vectorized<float> erf() const;
407
+ Vectorized<float> erfc() const {
408
+ return USE_SLEEF(
409
+ Vectorized<float>(Sleef_erfcf4_u15(values.val[0]), Sleef_erfcf4_u15(values.val[1])),
410
+ map(std::erfc)
411
+ );
412
+ }
413
+ Vectorized<float> erfinv() const {
414
+ return map(calc_erfinv);
415
+ }
416
+ Vectorized<float> exp() const {
417
+ return USE_SLEEF(
418
+ Vectorized<float>(Sleef_expf4_u10(values.val[0]), Sleef_expf4_u10(values.val[1])),
419
+ map(std::exp)
420
+ );
421
+ }
422
+ Vectorized<float> exp2() const {
423
+ return USE_SLEEF(
424
+ Vectorized<float>(Sleef_exp2f4_u10(values.val[0]), Sleef_exp2f4_u10(values.val[1])),
425
+ map(std::exp2)
426
+ );
427
+ }
428
+ Vectorized<float> expm1() const {
429
+ return USE_SLEEF(
430
+ Vectorized<float>(Sleef_expm1f4_u10(values.val[0]), Sleef_expm1f4_u10(values.val[1])),
431
+ map(std::expm1)
432
+ );
433
+ }
434
+ Vectorized<float> exp_u20() const {
435
+ return exp();
436
+ }
437
+ Vectorized<float> fmod(const Vectorized<float>& q) const {
438
+ USE_SLEEF(
439
+ {
440
+ return Vectorized<float>(Sleef_fmodf4(values.val[0], q.values.val[0]),
441
+ Sleef_fmodf4(values.val[1], q.values.val[1]));
442
+ },
443
+ {
444
+ __at_align__ float tmp[size()];
445
+ __at_align__ float tmp_q[size()];
446
+ store(tmp);
447
+ q.store(tmp_q);
448
+ for (const auto i : c10::irange(size())) {
449
+ tmp[i] = std::fmod(tmp[i], tmp_q[i]);
450
+ }
451
+ return loadu(tmp);
452
+ }
453
+ )
454
+ }
455
+ Vectorized<float> hypot(const Vectorized<float> &b) const {
456
+ USE_SLEEF(
457
+ {
458
+ return Vectorized<float>(Sleef_hypotf4_u05(values.val[0], b.values.val[0]),
459
+ Sleef_hypotf4_u05(values.val[1], b.values.val[1]));
460
+ },
461
+ {
462
+ __at_align__ float tmp[size()];
463
+ __at_align__ float tmp_b[size()];
464
+ store(tmp);
465
+ b.store(tmp_b);
466
+ for (const auto i : c10::irange(size())) {
467
+ tmp[i] = std::hypot(tmp[i], tmp_b[i]);
468
+ }
469
+ return loadu(tmp);
470
+ }
471
+ )
472
+ }
473
+ Vectorized<float> i0() const {
474
+ return map(calc_i0);
475
+ }
476
+ Vectorized<float> i0e() const {
477
+ return map(calc_i0e);
478
+ }
479
+ Vectorized<float> digamma() const {
480
+ return map(calc_digamma);
481
+ }
482
+ Vectorized<float> igamma(const Vectorized<float> &x) const {
483
+ __at_align__ float tmp[size()];
484
+ __at_align__ float tmp_x[size()];
485
+ store(tmp);
486
+ x.store(tmp_x);
487
+ for (const auto i : c10::irange(size())) {
488
+ tmp[i] = calc_igamma(tmp[i], tmp_x[i]);
489
+ }
490
+ return loadu(tmp);
491
+ }
492
+ Vectorized<float> igammac(const Vectorized<float> &x) const {
493
+ __at_align__ float tmp[size()];
494
+ __at_align__ float tmp_x[size()];
495
+ store(tmp);
496
+ x.store(tmp_x);
497
+ for (const auto i : c10::irange(size())) {
498
+ tmp[i] = calc_igammac(tmp[i], tmp_x[i]);
499
+ }
500
+ return loadu(tmp);
501
+ }
502
+ Vectorized<float> log() const {
503
+ return USE_SLEEF(
504
+ Vectorized<float>(Sleef_logf4_u10(values.val[0]), Sleef_logf4_u10(values.val[1])),
505
+ map(std::log)
506
+ );
507
+ }
508
+ Vectorized<float> log10() const {
509
+ return USE_SLEEF(
510
+ Vectorized<float>(Sleef_log10f4_u10(values.val[0]), Sleef_log10f4_u10(values.val[1])),
511
+ map(std::log10)
512
+ );
513
+ }
514
+ Vectorized<float> log1p() const {
515
+ return USE_SLEEF(
516
+ Vectorized<float>(Sleef_log1pf4_u10(values.val[0]), Sleef_log1pf4_u10(values.val[1])),
517
+ map(std::log1p)
518
+ );
519
+ }
520
+ Vectorized<float> log2() const {
521
+ return USE_SLEEF(
522
+ Vectorized<float>(Sleef_log2f4_u10(values.val[0]), Sleef_log2f4_u10(values.val[1])),
523
+ map(std::log2)
524
+ );
525
+ }
526
+ Vectorized<float> nextafter(const Vectorized<float> &b) const {
527
+ USE_SLEEF(
528
+ {
529
+ return Vectorized<float>(Sleef_nextafterf4(values.val[0], b.values.val[0]),
530
+ Sleef_nextafterf4(values.val[1], b.values.val[1]));
531
+ },
532
+ {
533
+ __at_align__ float tmp[size()];
534
+ __at_align__ float tmp_b[size()];
535
+ store(tmp);
536
+ b.store(tmp_b);
537
+ for (const auto i : c10::irange(size())) {
538
+ tmp[i] = std::nextafter(tmp[i], tmp_b[i]);
539
+ }
540
+ return loadu(tmp);
541
+ }
542
+ )
543
+ }
544
+ Vectorized<float> frac() const;
545
+ Vectorized<float> sin() const {
546
+ return USE_SLEEF(
547
+ Vectorized<float>(Sleef_sinf4_u10(values.val[0]), Sleef_sinf4_u10(values.val[1])),
548
+ map(std::sin)
549
+ );
550
+ }
551
+ Vectorized<float> sinh() const {
552
+ return USE_SLEEF(
553
+ Vectorized<float>(Sleef_sinhf4_u10(values.val[0]), Sleef_sinhf4_u10(values.val[1])),
554
+ map(std::sinh)
555
+ );
556
+ }
557
+ Vectorized<float> cos() const {
558
+ return USE_SLEEF(
559
+ Vectorized<float>(Sleef_cosf4_u10(values.val[0]), Sleef_cosf4_u10(values.val[1])),
560
+ map(std::cos)
561
+ );
562
+ }
563
+ Vectorized<float> cosh() const {
564
+ return USE_SLEEF(
565
+ Vectorized<float>(Sleef_coshf4_u10(values.val[0]), Sleef_coshf4_u10(values.val[1])),
566
+ map(std::cosh)
567
+ );
568
+ }
569
+ Vectorized<float> ceil() const {
570
+ return map(at::native::ceil_impl);
571
+ }
572
+ Vectorized<float> floor() const {
573
+ return map(at::native::floor_impl);
574
+ }
575
+ Vectorized<float> neg() const {
576
+ return Vectorized<float>(
577
+ vnegq_f32(values.val[0]),
578
+ vnegq_f32(values.val[1]));
579
+ }
580
+ Vectorized<float> round() const {
581
+ // We do not use std::round because we would like to round midway numbers to the nearest even integer.
582
+ return map(at::native::round_impl);
583
+ }
584
+ Vectorized<float> tan() const {
585
+ return USE_SLEEF(
586
+ Vectorized<float>(Sleef_tanf4_u10(values.val[0]), Sleef_tanf4_u10(values.val[1])),
587
+ map(std::tan)
588
+ );
589
+ }
590
+ Vectorized<float> tanh() const {
591
+ return USE_SLEEF(
592
+ Vectorized<float>(Sleef_tanhf4_u10(values.val[0]), Sleef_tanhf4_u10(values.val[1])),
593
+ map(std::tanh)
594
+ );
595
+ }
596
+ Vectorized<float> trunc() const {
597
+ float32x4_t r0 = vrndq_f32(values.val[0]);
598
+ float32x4_t r1 = vrndq_f32(values.val[1]);
599
+ return Vectorized<float>(r0, r1);
600
+ }
601
+ Vectorized<float> lgamma() const {
602
+ return USE_SLEEF(
603
+ Vectorized<float>(Sleef_lgammaf4_u10(values.val[0]), Sleef_lgammaf4_u10(values.val[1])),
604
+ map(std::lgamma)
605
+ );
606
+ }
607
+ Vectorized<float> sqrt() const {
608
+ return Vectorized<float>(
609
+ vsqrtq_f32(values.val[0]),
610
+ vsqrtq_f32(values.val[1]));
611
+ }
612
+ Vectorized<float> reciprocal() const {
613
+ auto r0 = vdivq_f32(vdupq_n_f32(1.0f), values.val[0]);
614
+ auto r1 = vdivq_f32(vdupq_n_f32(1.0f), values.val[1]);
615
+ return Vectorized<float>(r0, r1);
616
+ }
617
+ Vectorized<float> rsqrt() const {
618
+ return this->sqrt().reciprocal();
619
+ }
620
+ Vectorized<float> pow(const Vectorized<float> &exp) const {
621
+ USE_SLEEF(
622
+ {
623
+ return Vectorized<float>(Sleef_powf4_u10(values.val[0], exp.values.val[0]),
624
+ Sleef_powf4_u10(values.val[1], exp.values.val[1]));
625
+ },
626
+ {
627
+ __at_align__ float tmp[size()];
628
+ __at_align__ float tmp_exp[size()];
629
+ store(tmp);
630
+ exp.store(tmp_exp);
631
+ for (const auto i : c10::irange(size())) {
632
+ tmp[i] = std::pow(tmp[i], tmp_exp[i]);
633
+ }
634
+ return loadu(tmp);
635
+ }
636
+ )
637
+ }
638
+ Vectorized<float> operator==(const Vectorized<float>& other) const {
639
+ float32x4_t r0 =
640
+ vreinterpretq_f32_u32(vceqq_f32(values.val[0], other.values.val[0]));
641
+ float32x4_t r1 =
642
+ vreinterpretq_f32_u32(vceqq_f32(values.val[1], other.values.val[1]));
643
+ return Vectorized<float>(r0, r1);
644
+ }
645
+
646
+ Vectorized<float> operator!=(const Vectorized<float>& other) const {
647
+ float32x4_t r0 = vreinterpretq_f32_u32(
648
+ vmvnq_u32(vceqq_f32(values.val[0], other.values.val[0])));
649
+ float32x4_t r1 = vreinterpretq_f32_u32(
650
+ vmvnq_u32(vceqq_f32(values.val[1], other.values.val[1])));
651
+ return Vectorized<float>(r0, r1);
652
+ }
653
+
654
+ Vectorized<float> operator<(const Vectorized<float>& other) const {
655
+ float32x4_t r0 =
656
+ vreinterpretq_f32_u32(vcltq_f32(values.val[0], other.values.val[0]));
657
+ float32x4_t r1 =
658
+ vreinterpretq_f32_u32(vcltq_f32(values.val[1], other.values.val[1]));
659
+ return Vectorized<float>(r0, r1);
660
+ }
661
+
662
+ Vectorized<float> operator<=(const Vectorized<float>& other) const {
663
+ float32x4_t r0 =
664
+ vreinterpretq_f32_u32(vcleq_f32(values.val[0], other.values.val[0]));
665
+ float32x4_t r1 =
666
+ vreinterpretq_f32_u32(vcleq_f32(values.val[1], other.values.val[1]));
667
+ return Vectorized<float>(r0, r1);
668
+ }
669
+
670
+ Vectorized<float> operator>(const Vectorized<float>& other) const {
671
+ float32x4_t r0 =
672
+ vreinterpretq_f32_u32(vcgtq_f32(values.val[0], other.values.val[0]));
673
+ float32x4_t r1 =
674
+ vreinterpretq_f32_u32(vcgtq_f32(values.val[1], other.values.val[1]));
675
+ return Vectorized<float>(r0, r1);
676
+ }
677
+
678
+ Vectorized<float> operator>=(const Vectorized<float>& other) const {
679
+ float32x4_t r0 =
680
+ vreinterpretq_f32_u32(vcgeq_f32(values.val[0], other.values.val[0]));
681
+ float32x4_t r1 =
682
+ vreinterpretq_f32_u32(vcgeq_f32(values.val[1], other.values.val[1]));
683
+ return Vectorized<float>(r0, r1);
684
+ }
685
+
686
+ Vectorized<float> eq(const Vectorized<float>& other) const;
687
+ Vectorized<float> ne(const Vectorized<float>& other) const;
688
+ Vectorized<float> gt(const Vectorized<float>& other) const;
689
+ Vectorized<float> ge(const Vectorized<float>& other) const;
690
+ Vectorized<float> lt(const Vectorized<float>& other) const;
691
+ Vectorized<float> le(const Vectorized<float>& other) const;
692
+ };
693
+
694
+ template <>
695
+ Vectorized<float> inline operator+(const Vectorized<float>& a, const Vectorized<float>& b) {
696
+ float32x4_t r0 = vaddq_f32(a.get_low(), b.get_low());
697
+ float32x4_t r1 = vaddq_f32(a.get_high(), b.get_high());
698
+ return Vectorized<float>(r0, r1);
699
+ }
700
+
701
+ template <>
702
+ Vectorized<float> inline operator-(const Vectorized<float>& a, const Vectorized<float>& b) {
703
+ float32x4_t r0 = vsubq_f32(a.get_low(), b.get_low());
704
+ float32x4_t r1 = vsubq_f32(a.get_high(), b.get_high());
705
+ return Vectorized<float>(r0, r1);
706
+ }
707
+
708
+ template <>
709
+ Vectorized<float> inline operator*(const Vectorized<float>& a, const Vectorized<float>& b) {
710
+ float32x4_t r0 = vmulq_f32(a.get_low(), b.get_low());
711
+ float32x4_t r1 = vmulq_f32(a.get_high(), b.get_high());
712
+ return Vectorized<float>(r0, r1);
713
+ }
714
+
715
+ template <>
716
+ Vectorized<float> inline operator/(const Vectorized<float>& a, const Vectorized<float>& b) {
717
+ float32x4_t r0 = vdivq_f32(a.get_low(), b.get_low());
718
+ float32x4_t r1 = vdivq_f32(a.get_high(), b.get_high());
719
+ return Vectorized<float>(r0, r1);
720
+ }
721
+
722
+ // frac. Implement this here so we can use subtraction
723
+ inline Vectorized<float> Vectorized<float>::frac() const {
724
+ return *this - this->trunc();
725
+ }
726
+
727
+ // Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
728
+ // either input is a NaN.
729
+ template <>
730
+ Vectorized<float> inline maximum(const Vectorized<float>& a, const Vectorized<float>& b) {
731
+ float32x4_t r0 = vmaxq_f32(a.get_low(), b.get_low());
732
+ float32x4_t r1 = vmaxq_f32(a.get_high(), b.get_high());
733
+ return Vectorized<float>(r0, r1);
734
+ }
735
+
736
+ // Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
737
+ // either input is a NaN.
738
+ template <>
739
+ Vectorized<float> inline minimum(const Vectorized<float>& a, const Vectorized<float>& b) {
740
+ float32x4_t r0 = vminq_f32(a.get_low(), b.get_low());
741
+ float32x4_t r1 = vminq_f32(a.get_high(), b.get_high());
742
+ return Vectorized<float>(r0, r1);
743
+ }
744
+
745
+ template <>
746
+ Vectorized<float> inline clamp(const Vectorized<float>& a, const Vectorized<float>& min, const Vectorized<float>& max) {
747
+ return minimum(max, maximum(min, a));
748
+ }
749
+
750
+ template <>
751
+ Vectorized<float> inline clamp_max(const Vectorized<float>& a, const Vectorized<float>& max) {
752
+ return minimum(max, a);
753
+ }
754
+
755
+ template <>
756
+ Vectorized<float> inline clamp_min(const Vectorized<float>& a, const Vectorized<float>& min) {
757
+ return maximum(min, a);
758
+ }
759
+
760
+ template <>
761
+ Vectorized<float> inline operator&(const Vectorized<float>& a, const Vectorized<float>& b) {
762
+ float32x4_t r0 = vreinterpretq_f32_u32(vandq_u32(
763
+ vreinterpretq_u32_f32(a.get_low()),
764
+ vreinterpretq_u32_f32(b.get_low())));
765
+ float32x4_t r1 = vreinterpretq_f32_u32(vandq_u32(
766
+ vreinterpretq_u32_f32(a.get_high()),
767
+ vreinterpretq_u32_f32(b.get_high())));
768
+ return Vectorized<float>(r0, r1);
769
+ }
770
+
771
+ template <>
772
+ Vectorized<float> inline operator|(const Vectorized<float>& a, const Vectorized<float>& b) {
773
+ float32x4_t r0 = vreinterpretq_f32_u32(vorrq_u32(
774
+ vreinterpretq_u32_f32(a.get_low()),
775
+ vreinterpretq_u32_f32(b.get_low())));
776
+ float32x4_t r1 = vreinterpretq_f32_u32(vorrq_u32(
777
+ vreinterpretq_u32_f32(a.get_high()),
778
+ vreinterpretq_u32_f32(b.get_high())));
779
+ return Vectorized<float>(r0, r1);
780
+ }
781
+
782
+ template <>
783
+ Vectorized<float> inline operator^(const Vectorized<float>& a, const Vectorized<float>& b) {
784
+ float32x4_t r0 = vreinterpretq_f32_u32(veorq_u32(
785
+ vreinterpretq_u32_f32(a.get_low()),
786
+ vreinterpretq_u32_f32(b.get_low())));
787
+ float32x4_t r1 = vreinterpretq_f32_u32(veorq_u32(
788
+ vreinterpretq_u32_f32(a.get_high()),
789
+ vreinterpretq_u32_f32(b.get_high())));
790
+ return Vectorized<float>(r0, r1);
791
+ }
792
+
793
+ inline Vectorized<float> Vectorized<float>::eq(const Vectorized<float>& other) const {
794
+ return (*this == other) & Vectorized<float>(1.0f);
795
+ }
796
+
797
+ inline Vectorized<float> Vectorized<float>::ne(const Vectorized<float>& other) const {
798
+ return (*this != other) & Vectorized<float>(1.0f);
799
+ }
800
+
801
+ inline Vectorized<float> Vectorized<float>::gt(const Vectorized<float>& other) const {
802
+ return (*this > other) & Vectorized<float>(1.0f);
803
+ }
804
+
805
+ inline Vectorized<float> Vectorized<float>::ge(const Vectorized<float>& other) const {
806
+ return (*this >= other) & Vectorized<float>(1.0f);
807
+ }
808
+
809
+ inline Vectorized<float> Vectorized<float>::lt(const Vectorized<float>& other) const {
810
+ return (*this < other) & Vectorized<float>(1.0f);
811
+ }
812
+
813
+ inline Vectorized<float> Vectorized<float>::le(const Vectorized<float>& other) const {
814
+ return (*this <= other) & Vectorized<float>(1.0f);
815
+ }
816
+
817
+ template <>
818
+ inline void convert(const float* src, int32_t* dst, int64_t n) {
819
+ int64_t i;
820
+ #pragma unroll
821
+ for (i = 0; i <= (n - Vectorized<float>::size()); i += Vectorized<float>::size()) {
822
+ vst1q_s32(dst + i, vcvtq_s32_f32(vld1q_f32(src + i)));
823
+ vst1q_s32(dst + i + 4, vcvtq_s32_f32(vld1q_f32(src + i + 4)));
824
+ }
825
+ #pragma unroll
826
+ for (; i < n; i++) {
827
+ dst[i] = static_cast<int32_t>(src[i]);
828
+ }
829
+ }
830
+
831
+ template <>
832
+ inline void convert(const int32_t* src, float* dst, int64_t n) {
833
+ int64_t i;
834
+ #pragma unroll
835
+ for (i = 0; i <= (n - Vectorized<float>::size()); i += Vectorized<float>::size()) {
836
+ vst1q_f32(dst + i, vcvtq_f32_s32(vld1q_s32(src + i)));
837
+ vst1q_f32(dst + i + 4, vcvtq_f32_s32(vld1q_s32(src + i + 4)));
838
+ }
839
+ #pragma unroll
840
+ for (; i < n; i++) {
841
+ dst[i] = static_cast<float>(src[i]);
842
+ }
843
+ }
844
+
845
+ template <>
846
+ Vectorized<float> inline fmadd(const Vectorized<float>& a, const Vectorized<float>& b, const Vectorized<float>& c) {
847
+ float32x4_t r0 = vfmaq_f32(c.get_low(), a.get_low(), b.get_low());
848
+ float32x4_t r1 = vfmaq_f32(c.get_high(), a.get_high(), b.get_high());
849
+ return Vectorized<float>(r0, r1);
850
+ }
851
+
852
+ template <>
853
+ Vectorized<float> inline fmsub(const Vectorized<float>& a, const Vectorized<float>& b, const Vectorized<float>& c) {
854
+ float32x4_t r0 = vfmsq_f32(c.get_low(), a.get_low(), b.get_low());
855
+ float32x4_t r1 = vfmsq_f32(c.get_high(), a.get_high(), b.get_high());
856
+ return Vectorized<float>(r0, r1);
857
+ }
858
+
859
+ inline Vectorized<float> Vectorized<float>::erf() const{
860
+ // constants
861
+ const Vectorized<float> neg_zero_vec(-0.f);
862
+ const Vectorized<float> one_vec(1.0f);
863
+ const Vectorized<float> p(0.3275911f);
864
+ const Vectorized<float> p1(0.254829592f);
865
+ const Vectorized<float> p2(-0.284496736f);
866
+ const Vectorized<float> p3(1.421413741f);
867
+ const Vectorized<float> p4(-1.453152027f);
868
+ const Vectorized<float> p5(1.061405429f);
869
+ // sign(x)
870
+ auto sign_mask = neg_zero_vec & *this;
871
+ auto abs_vec = this->abs();
872
+ // t = 1 / (p * abs(x) + 1)
873
+ auto tmp0 = fmadd(p, abs_vec, one_vec);
874
+ auto t = one_vec / tmp0;
875
+ // r = p5 * t ^ 4 + p4 * t ^ 3 + p3 * t ^ 2 + p2 * t + p1
876
+ auto tmp1 = fmadd(p5, t, p4);
877
+ auto tmp2 = fmadd(tmp1, t, p3);
878
+ auto tmp3 = fmadd(tmp2, t, p2);
879
+ auto r = fmadd(tmp3, t, p1);
880
+ // - exp(- x * x)
881
+ auto pow_2 = (*this) * (*this);
882
+ auto neg_pow_2 = pow_2 ^ neg_zero_vec;
883
+ auto tmp4 = neg_pow_2.map(std::exp); // This can be swapped for a faster implementation of exp.
884
+ auto tmp5 = tmp4 ^ neg_zero_vec;
885
+ // erf(x) = sign(x) * (1 - r * t * exp(- x * x))
886
+ auto tmp6 = t * tmp5;
887
+ auto tmp7 = fmadd(tmp6, r, one_vec);
888
+ return tmp7 ^ sign_mask;
889
+ }
890
+ #endif /* defined(aarch64) */
891
+
892
+ }} // namespace at::vec::CPU_CAPABILITY
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_int.h ADDED
@@ -0,0 +1,1586 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/intrinsics.h>
7
+ #include <ATen/cpu/vec/vec_base.h>
8
+ #include <c10/macros/Macros.h>
9
+ #include <c10/util/irange.h>
10
+
11
+ namespace at::vec {
12
+ inline namespace CPU_CAPABILITY {
13
+
14
+ #ifdef CPU_CAPABILITY_AVX2
15
+
16
+ struct Vectorizedi {
17
+ protected:
18
+ __m256i values;
19
+
20
+ static inline __m256i invert(const __m256i& v) {
21
+ const auto ones = _mm256_set1_epi64x(-1);
22
+ return _mm256_xor_si256(ones, v);
23
+ }
24
+ public:
25
+ Vectorizedi() {}
26
+ Vectorizedi(__m256i v) : values(v) {}
27
+ operator __m256i() const {
28
+ return values;
29
+ }
30
+ };
31
+
32
+ #else
33
+
34
+ struct Vectorizedi {}; // dummy definition to make Vectorizedi always defined
35
+
36
+ #endif // CPU_CAPABILITY_AVX2
37
+
38
+ #ifdef CPU_CAPABILITY_AVX2
39
+
40
+ template <>
41
+ class Vectorized<int64_t> : public Vectorizedi {
42
+ private:
43
+ static const Vectorized<int64_t> ones;
44
+ public:
45
+ using value_type = int64_t;
46
+ using size_type = int;
47
+ static constexpr size_type size() {
48
+ return 4;
49
+ }
50
+ using Vectorizedi::Vectorizedi;
51
+ Vectorized() {}
52
+ Vectorized(int64_t v) { values = _mm256_set1_epi64x(v); }
53
+ Vectorized(int64_t val1, int64_t val2, int64_t val3, int64_t val4) {
54
+ values = _mm256_setr_epi64x(val1, val2, val3, val4);
55
+ }
56
+ template <int64_t mask>
57
+ static Vectorized<int64_t> blend(Vectorized<int64_t> a, Vectorized<int64_t> b) {
58
+ __at_align__ int64_t tmp_values[size()];
59
+ a.store(tmp_values);
60
+ if (mask & 0x01)
61
+ tmp_values[0] = _mm256_extract_epi64(b.values, 0);
62
+ if (mask & 0x02)
63
+ tmp_values[1] = _mm256_extract_epi64(b.values, 1);
64
+ if (mask & 0x04)
65
+ tmp_values[2] = _mm256_extract_epi64(b.values, 2);
66
+ if (mask & 0x08)
67
+ tmp_values[3] = _mm256_extract_epi64(b.values, 3);
68
+ return loadu(tmp_values);
69
+ }
70
+ static Vectorized<int64_t> blendv(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b,
71
+ const Vectorized<int64_t>& mask) {
72
+ return _mm256_blendv_epi8(a.values, b.values, mask.values);
73
+ }
74
+ template <typename step_t>
75
+ static Vectorized<int64_t> arange(int64_t base = 0, step_t step = static_cast<step_t>(1)) {
76
+ return Vectorized<int64_t>(base, base + step, base + 2 * step, base + 3 * step);
77
+ }
78
+ static Vectorized<int64_t>
79
+ set(Vectorized<int64_t> a, Vectorized<int64_t> b, int64_t count = size()) {
80
+ switch (count) {
81
+ case 0:
82
+ return a;
83
+ case 1:
84
+ return blend<1>(a, b);
85
+ case 2:
86
+ return blend<3>(a, b);
87
+ case 3:
88
+ return blend<7>(a, b);
89
+ }
90
+ return b;
91
+ }
92
+ static Vectorized<int64_t> loadu(const void* ptr) {
93
+ return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(ptr));
94
+ }
95
+ static Vectorized<int64_t> loadu(const void* ptr, int64_t count) {
96
+ __at_align__ int64_t tmp_values[size()];
97
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
98
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
99
+ // instructions while a loop would be compiled to one instruction.
100
+ for (const auto i : c10::irange(size())) {
101
+ tmp_values[i] = 0;
102
+ }
103
+ std::memcpy(tmp_values, ptr, count * sizeof(int64_t));
104
+ return loadu(tmp_values);
105
+ }
106
+ void store(void* ptr, int count = size()) const {
107
+ if (count == size()) {
108
+ // ptr need not to be aligned here. See
109
+ // https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm256-storeu-si256.html
110
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(ptr), values);
111
+ } else if (count > 0) {
112
+ __at_align__ int64_t tmp_values[size()];
113
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values);
114
+ std::memcpy(ptr, tmp_values, count * sizeof(int64_t));
115
+ }
116
+ }
117
+ const int64_t& operator[](int idx) const = delete;
118
+ int64_t& operator[](int idx) = delete;
119
+ Vectorized<int64_t> abs() const {
120
+ auto zero = _mm256_set1_epi64x(0);
121
+ auto is_larger = _mm256_cmpgt_epi64(zero, values);
122
+ auto inverse = _mm256_xor_si256(values, is_larger);
123
+ return _mm256_sub_epi64(inverse, is_larger);
124
+ }
125
+ Vectorized<int64_t> real() const {
126
+ return *this;
127
+ }
128
+ Vectorized<int64_t> imag() const {
129
+ return _mm256_set1_epi64x(0);
130
+ }
131
+ Vectorized<int64_t> conj() const {
132
+ return *this;
133
+ }
134
+ Vectorized<int64_t> neg() const;
135
+ Vectorized<int64_t> operator==(const Vectorized<int64_t>& other) const {
136
+ return _mm256_cmpeq_epi64(values, other.values);
137
+ }
138
+ Vectorized<int64_t> operator!=(const Vectorized<int64_t>& other) const {
139
+ return invert(_mm256_cmpeq_epi64(values, other.values));
140
+ }
141
+ Vectorized<int64_t> operator<(const Vectorized<int64_t>& other) const {
142
+ return _mm256_cmpgt_epi64(other.values, values);
143
+ }
144
+ Vectorized<int64_t> operator<=(const Vectorized<int64_t>& other) const {
145
+ return invert(_mm256_cmpgt_epi64(values, other.values));
146
+ }
147
+ Vectorized<int64_t> operator>(const Vectorized<int64_t>& other) const {
148
+ return _mm256_cmpgt_epi64(values, other.values);
149
+ }
150
+ Vectorized<int64_t> operator>=(const Vectorized<int64_t>& other) const {
151
+ return invert(_mm256_cmpgt_epi64(other.values, values));
152
+ }
153
+
154
+ Vectorized<int64_t> eq(const Vectorized<int64_t>& other) const;
155
+ Vectorized<int64_t> ne(const Vectorized<int64_t>& other) const;
156
+ Vectorized<int64_t> gt(const Vectorized<int64_t>& other) const;
157
+ Vectorized<int64_t> ge(const Vectorized<int64_t>& other) const;
158
+ Vectorized<int64_t> lt(const Vectorized<int64_t>& other) const;
159
+ Vectorized<int64_t> le(const Vectorized<int64_t>& other) const;
160
+ };
161
+
162
+ template <>
163
+ class Vectorized<int32_t> : public Vectorizedi {
164
+ private:
165
+ static const Vectorized<int32_t> ones;
166
+ public:
167
+ using value_type = int32_t;
168
+ static constexpr int size() {
169
+ return 8;
170
+ }
171
+ using Vectorizedi::Vectorizedi;
172
+ Vectorized() {}
173
+ Vectorized(int32_t v) { values = _mm256_set1_epi32(v); }
174
+ Vectorized(int32_t val1, int32_t val2, int32_t val3, int32_t val4,
175
+ int32_t val5, int32_t val6, int32_t val7, int32_t val8) {
176
+ values = _mm256_setr_epi32(val1, val2, val3, val4, val5, val6, val7, val8);
177
+ }
178
+ template <int64_t mask>
179
+ static Vectorized<int32_t> blend(Vectorized<int32_t> a, Vectorized<int32_t> b) {
180
+ return _mm256_blend_epi32(a, b, mask);
181
+ }
182
+ static Vectorized<int32_t> blendv(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b,
183
+ const Vectorized<int32_t>& mask) {
184
+ return _mm256_blendv_epi8(a.values, b.values, mask.values);
185
+ }
186
+ template <typename step_t>
187
+ static Vectorized<int32_t> arange(int32_t base = 0, step_t step = static_cast<step_t>(1)) {
188
+ return Vectorized<int32_t>(
189
+ base, base + step, base + 2 * step, base + 3 * step,
190
+ base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step);
191
+ }
192
+ static Vectorized<int32_t>
193
+ set(Vectorized<int32_t> a, Vectorized<int32_t> b, int32_t count = size()) {
194
+ switch (count) {
195
+ case 0:
196
+ return a;
197
+ case 1:
198
+ return blend<1>(a, b);
199
+ case 2:
200
+ return blend<3>(a, b);
201
+ case 3:
202
+ return blend<7>(a, b);
203
+ case 4:
204
+ return blend<15>(a, b);
205
+ case 5:
206
+ return blend<31>(a, b);
207
+ case 6:
208
+ return blend<63>(a, b);
209
+ case 7:
210
+ return blend<127>(a, b);
211
+ }
212
+ return b;
213
+ }
214
+ static Vectorized<int32_t> loadu(const void* ptr) {
215
+ return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(ptr));
216
+ }
217
+ static Vectorized<int32_t> loadu(const void* ptr, int32_t count) {
218
+ __at_align__ int32_t tmp_values[size()];
219
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
220
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
221
+ // instructions while a loop would be compiled to one instruction.
222
+ for (const auto i : c10::irange(size())) {
223
+ tmp_values[i] = 0;
224
+ }
225
+ std::memcpy(tmp_values, ptr, count * sizeof(int32_t));
226
+ return loadu(tmp_values);
227
+ }
228
+ void store(void* ptr, int count = size()) const {
229
+ if (count == size()) {
230
+ // ptr need not to be aligned here. See
231
+ // https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm256-storeu-si256.html
232
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(ptr), values);
233
+ } else if (count > 0) {
234
+ __at_align__ int32_t tmp_values[size()];
235
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values);
236
+ std::memcpy(ptr, tmp_values, count * sizeof(int32_t));
237
+ }
238
+ }
239
+ const int32_t& operator[](int idx) const = delete;
240
+ int32_t& operator[](int idx) = delete;
241
+ Vectorized<int32_t> abs() const {
242
+ return _mm256_abs_epi32(values);
243
+ }
244
+ Vectorized<int32_t> real() const {
245
+ return *this;
246
+ }
247
+ Vectorized<int32_t> imag() const {
248
+ return _mm256_set1_epi32(0);
249
+ }
250
+ Vectorized<int32_t> conj() const {
251
+ return *this;
252
+ }
253
+ Vectorized<int32_t> neg() const;
254
+ Vectorized<int32_t> operator==(const Vectorized<int32_t>& other) const {
255
+ return _mm256_cmpeq_epi32(values, other.values);
256
+ }
257
+ Vectorized<int32_t> operator!=(const Vectorized<int32_t>& other) const {
258
+ return invert(_mm256_cmpeq_epi32(values, other.values));
259
+ }
260
+ Vectorized<int32_t> operator<(const Vectorized<int32_t>& other) const {
261
+ return _mm256_cmpgt_epi32(other.values, values);
262
+ }
263
+ Vectorized<int32_t> operator<=(const Vectorized<int32_t>& other) const {
264
+ return invert(_mm256_cmpgt_epi32(values, other.values));
265
+ }
266
+ Vectorized<int32_t> operator>(const Vectorized<int32_t>& other) const {
267
+ return _mm256_cmpgt_epi32(values, other.values);
268
+ }
269
+ Vectorized<int32_t> operator>=(const Vectorized<int32_t>& other) const {
270
+ return invert(_mm256_cmpgt_epi32(other.values, values));
271
+ }
272
+ Vectorized<int32_t> eq(const Vectorized<int32_t>& other) const;
273
+ Vectorized<int32_t> ne(const Vectorized<int32_t>& other) const;
274
+ Vectorized<int32_t> gt(const Vectorized<int32_t>& other) const;
275
+ Vectorized<int32_t> ge(const Vectorized<int32_t>& other) const;
276
+ Vectorized<int32_t> lt(const Vectorized<int32_t>& other) const;
277
+ Vectorized<int32_t> le(const Vectorized<int32_t>& other) const;
278
+ };
279
+
280
+ template <>
281
+ inline void convert(const int32_t *src, float *dst, int64_t n) {
282
+ int64_t i;
283
+ // int32_t and float have same size
284
+ #ifndef _MSC_VER
285
+ # pragma unroll
286
+ #endif
287
+ for (i = 0; i <= (n - Vectorized<int32_t>::size()); i += Vectorized<int32_t>::size()) {
288
+ auto input_vec = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(src + i));
289
+ auto output_vec = _mm256_cvtepi32_ps(input_vec);
290
+ _mm256_storeu_ps(reinterpret_cast<float*>(dst + i), output_vec);
291
+ }
292
+ #ifndef _MSC_VER
293
+ # pragma unroll
294
+ #endif
295
+ for (; i < n; i++) {
296
+ dst[i] = static_cast<float>(src[i]);
297
+ }
298
+ }
299
+
300
+ template <>
301
+ inline void convert(const int32_t *src, double *dst, int64_t n) {
302
+ int64_t i;
303
+ // int32_t has half the size of double
304
+ #ifndef _MSC_VER
305
+ # pragma unroll
306
+ #endif
307
+ for (i = 0; i <= (n - Vectorized<double>::size()); i += Vectorized<double>::size()) {
308
+ auto input_128_vec = _mm_loadu_si128(reinterpret_cast<const __m128i*>(src + i));
309
+ auto output_vec = _mm256_cvtepi32_pd(input_128_vec);
310
+ _mm256_storeu_pd(reinterpret_cast<double*>(dst + i), output_vec);
311
+ }
312
+ #ifndef _MSC_VER
313
+ # pragma unroll
314
+ #endif
315
+ for (; i < n; i++) {
316
+ dst[i] = static_cast<double>(src[i]);
317
+ }
318
+ }
319
+
320
+ template <>
321
+ class Vectorized<int16_t> : public Vectorizedi {
322
+ private:
323
+ static const Vectorized<int16_t> ones;
324
+ public:
325
+ using value_type = int16_t;
326
+ static constexpr int size() {
327
+ return 16;
328
+ }
329
+ using Vectorizedi::Vectorizedi;
330
+ Vectorized() {}
331
+ Vectorized(int16_t v) { values = _mm256_set1_epi16(v); }
332
+ Vectorized(int16_t val1, int16_t val2, int16_t val3, int16_t val4,
333
+ int16_t val5, int16_t val6, int16_t val7, int16_t val8,
334
+ int16_t val9, int16_t val10, int16_t val11, int16_t val12,
335
+ int16_t val13, int16_t val14, int16_t val15, int16_t val16) {
336
+ values = _mm256_setr_epi16(val1, val2, val3, val4, val5, val6, val7, val8,
337
+ val9, val10, val11, val12, val13, val14, val15, val16);
338
+ }
339
+ template <int64_t mask>
340
+ static Vectorized<int16_t> blend(Vectorized<int16_t> a, Vectorized<int16_t> b) {
341
+ __at_align__ int16_t tmp_values[size()];
342
+ a.store(tmp_values);
343
+ if (mask & 0x01)
344
+ tmp_values[0] = _mm256_extract_epi16(b.values, 0);
345
+ if (mask & 0x02)
346
+ tmp_values[1] = _mm256_extract_epi16(b.values, 1);
347
+ if (mask & 0x04)
348
+ tmp_values[2] = _mm256_extract_epi16(b.values, 2);
349
+ if (mask & 0x08)
350
+ tmp_values[3] = _mm256_extract_epi16(b.values, 3);
351
+ if (mask & 0x10)
352
+ tmp_values[4] = _mm256_extract_epi16(b.values, 4);
353
+ if (mask & 0x20)
354
+ tmp_values[5] = _mm256_extract_epi16(b.values, 5);
355
+ if (mask & 0x40)
356
+ tmp_values[6] = _mm256_extract_epi16(b.values, 6);
357
+ if (mask & 0x80)
358
+ tmp_values[7] = _mm256_extract_epi16(b.values, 7);
359
+ if (mask & 0x100)
360
+ tmp_values[8] = _mm256_extract_epi16(b.values, 8);
361
+ if (mask & 0x200)
362
+ tmp_values[9] = _mm256_extract_epi16(b.values, 9);
363
+ if (mask & 0x400)
364
+ tmp_values[10] = _mm256_extract_epi16(b.values, 10);
365
+ if (mask & 0x800)
366
+ tmp_values[11] = _mm256_extract_epi16(b.values, 11);
367
+ if (mask & 0x1000)
368
+ tmp_values[12] = _mm256_extract_epi16(b.values, 12);
369
+ if (mask & 0x2000)
370
+ tmp_values[13] = _mm256_extract_epi16(b.values, 13);
371
+ if (mask & 0x4000)
372
+ tmp_values[14] = _mm256_extract_epi16(b.values, 14);
373
+ if (mask & 0x8000)
374
+ tmp_values[15] = _mm256_extract_epi16(b.values, 15);
375
+ return loadu(tmp_values);
376
+ }
377
+ static Vectorized<int16_t> blendv(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b,
378
+ const Vectorized<int16_t>& mask) {
379
+ return _mm256_blendv_epi8(a.values, b.values, mask.values);
380
+ }
381
+ template <typename step_t>
382
+ static Vectorized<int16_t> arange(int16_t base = 0, step_t step = static_cast<step_t>(1)) {
383
+ return Vectorized<int16_t>(
384
+ base, base + step, base + 2 * step, base + 3 * step,
385
+ base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step,
386
+ base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step,
387
+ base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step);
388
+ }
389
+ static Vectorized<int16_t>
390
+ set(Vectorized<int16_t> a, Vectorized<int16_t> b, int16_t count = size()) {
391
+ switch (count) {
392
+ case 0:
393
+ return a;
394
+ case 1:
395
+ return blend<1>(a, b);
396
+ case 2:
397
+ return blend<3>(a, b);
398
+ case 3:
399
+ return blend<7>(a, b);
400
+ case 4:
401
+ return blend<15>(a, b);
402
+ case 5:
403
+ return blend<31>(a, b);
404
+ case 6:
405
+ return blend<63>(a, b);
406
+ case 7:
407
+ return blend<127>(a, b);
408
+ case 8:
409
+ return blend<255>(a, b);
410
+ case 9:
411
+ return blend<511>(a, b);
412
+ case 10:
413
+ return blend<1023>(a, b);
414
+ case 11:
415
+ return blend<2047>(a, b);
416
+ case 12:
417
+ return blend<4095>(a, b);
418
+ case 13:
419
+ return blend<8191>(a, b);
420
+ case 14:
421
+ return blend<16383>(a, b);
422
+ case 15:
423
+ return blend<32767>(a, b);
424
+ }
425
+ return b;
426
+ }
427
+ static Vectorized<int16_t> loadu(const void* ptr) {
428
+ return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(ptr));
429
+ }
430
+ static Vectorized<int16_t> loadu(const void* ptr, int16_t count) {
431
+ __at_align__ int16_t tmp_values[size()];
432
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
433
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
434
+ // instructions while a loop would be compiled to one instruction.
435
+ for (const auto i : c10::irange(size())) {
436
+ tmp_values[i] = 0;
437
+ }
438
+ std::memcpy(tmp_values, ptr, count * sizeof(int16_t));
439
+ return loadu(tmp_values);
440
+ }
441
+ void store(void* ptr, int count = size()) const {
442
+ if (count == size()) {
443
+ // ptr need not to be aligned here. See
444
+ // https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm256-storeu-si256.html
445
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(ptr), values);
446
+ } else if (count > 0) {
447
+ __at_align__ int16_t tmp_values[size()];
448
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values);
449
+ std::memcpy(ptr, tmp_values, count * sizeof(int16_t));
450
+ }
451
+ }
452
+ const int16_t& operator[](int idx) const = delete;
453
+ int16_t& operator[](int idx) = delete;
454
+ Vectorized<int16_t> abs() const {
455
+ return _mm256_abs_epi16(values);
456
+ }
457
+ Vectorized<int16_t> real() const {
458
+ return *this;
459
+ }
460
+ Vectorized<int16_t> imag() const {
461
+ return _mm256_set1_epi16(0);
462
+ }
463
+ Vectorized<int16_t> conj() const {
464
+ return *this;
465
+ }
466
+ Vectorized<int16_t> neg() const;
467
+ Vectorized<int16_t> operator==(const Vectorized<int16_t>& other) const {
468
+ return _mm256_cmpeq_epi16(values, other.values);
469
+ }
470
+ Vectorized<int16_t> operator!=(const Vectorized<int16_t>& other) const {
471
+ return invert(_mm256_cmpeq_epi16(values, other.values));
472
+ }
473
+ Vectorized<int16_t> operator<(const Vectorized<int16_t>& other) const {
474
+ return _mm256_cmpgt_epi16(other.values, values);
475
+ }
476
+ Vectorized<int16_t> operator<=(const Vectorized<int16_t>& other) const {
477
+ return invert(_mm256_cmpgt_epi16(values, other.values));
478
+ }
479
+ Vectorized<int16_t> operator>(const Vectorized<int16_t>& other) const {
480
+ return _mm256_cmpgt_epi16(values, other.values);
481
+ }
482
+ Vectorized<int16_t> operator>=(const Vectorized<int16_t>& other) const {
483
+ return invert(_mm256_cmpgt_epi16(other.values, values));
484
+ }
485
+
486
+ Vectorized<int16_t> eq(const Vectorized<int16_t>& other) const;
487
+ Vectorized<int16_t> ne(const Vectorized<int16_t>& other) const;
488
+ Vectorized<int16_t> gt(const Vectorized<int16_t>& other) const;
489
+ Vectorized<int16_t> ge(const Vectorized<int16_t>& other) const;
490
+ Vectorized<int16_t> lt(const Vectorized<int16_t>& other) const;
491
+ Vectorized<int16_t> le(const Vectorized<int16_t>& other) const;
492
+ };
493
+
494
+ template <typename T>
495
+ class Vectorized8 : public Vectorizedi {
496
+ static_assert(
497
+ std::is_same<T, int8_t>::value || std::is_same<T, uint8_t>::value,
498
+ "Only int8_t/uint8_t are supported");
499
+ protected:
500
+ static const Vectorized<T> ones;
501
+ public:
502
+ using value_type = T;
503
+ static constexpr int size() {
504
+ return 32;
505
+ }
506
+ using Vectorizedi::Vectorizedi;
507
+ Vectorized8() {}
508
+ Vectorized8(T v) { values = _mm256_set1_epi8(v); }
509
+ Vectorized8(T val1, T val2, T val3, T val4,
510
+ T val5, T val6, T val7, T val8,
511
+ T val9, T val10, T val11, T val12,
512
+ T val13, T val14, T val15, T val16,
513
+ T val17, T val18, T val19, T val20,
514
+ T val21, T val22, T val23, T val24,
515
+ T val25, T val26, T val27, T val28,
516
+ T val29, T val30, T val31, T val32) {
517
+ values = _mm256_setr_epi8(val1, val2, val3, val4, val5, val6, val7, val8,
518
+ val9, val10, val11, val12, val13, val14, val15, val16,
519
+ val17, val18, val19, val20, val21, val22, val23, val24,
520
+ val25, val26, val27, val28, val29, val30, val31, val32);
521
+ }
522
+ template <int64_t mask>
523
+ static Vectorized<T> blend(Vectorized<T> a, Vectorized<T> b) {
524
+ __at_align__ T tmp_values[size()];
525
+ a.store(tmp_values);
526
+ if (mask & 0x01)
527
+ tmp_values[0] = _mm256_extract_epi8(b.values, 0);
528
+ if (mask & 0x02)
529
+ tmp_values[1] = _mm256_extract_epi8(b.values, 1);
530
+ if (mask & 0x04)
531
+ tmp_values[2] = _mm256_extract_epi8(b.values, 2);
532
+ if (mask & 0x08)
533
+ tmp_values[3] = _mm256_extract_epi8(b.values, 3);
534
+ if (mask & 0x10)
535
+ tmp_values[4] = _mm256_extract_epi8(b.values, 4);
536
+ if (mask & 0x20)
537
+ tmp_values[5] = _mm256_extract_epi8(b.values, 5);
538
+ if (mask & 0x40)
539
+ tmp_values[6] = _mm256_extract_epi8(b.values, 6);
540
+ if (mask & 0x80)
541
+ tmp_values[7] = _mm256_extract_epi8(b.values, 7);
542
+ if (mask & 0x100)
543
+ tmp_values[8] = _mm256_extract_epi8(b.values, 8);
544
+ if (mask & 0x200)
545
+ tmp_values[9] = _mm256_extract_epi8(b.values, 9);
546
+ if (mask & 0x400)
547
+ tmp_values[10] = _mm256_extract_epi8(b.values, 10);
548
+ if (mask & 0x800)
549
+ tmp_values[11] = _mm256_extract_epi8(b.values, 11);
550
+ if (mask & 0x1000)
551
+ tmp_values[12] = _mm256_extract_epi8(b.values, 12);
552
+ if (mask & 0x2000)
553
+ tmp_values[13] = _mm256_extract_epi8(b.values, 13);
554
+ if (mask & 0x4000)
555
+ tmp_values[14] = _mm256_extract_epi8(b.values, 14);
556
+ if (mask & 0x8000)
557
+ tmp_values[15] = _mm256_extract_epi8(b.values, 15);
558
+ if (mask & 0x010000)
559
+ tmp_values[16] = _mm256_extract_epi8(b.values, 16);
560
+ if (mask & 0x020000)
561
+ tmp_values[17] = _mm256_extract_epi8(b.values, 17);
562
+ if (mask & 0x040000)
563
+ tmp_values[18] = _mm256_extract_epi8(b.values, 18);
564
+ if (mask & 0x080000)
565
+ tmp_values[19] = _mm256_extract_epi8(b.values, 19);
566
+ if (mask & 0x100000)
567
+ tmp_values[20] = _mm256_extract_epi8(b.values, 20);
568
+ if (mask & 0x200000)
569
+ tmp_values[21] = _mm256_extract_epi8(b.values, 21);
570
+ if (mask & 0x400000)
571
+ tmp_values[22] = _mm256_extract_epi8(b.values, 22);
572
+ if (mask & 0x800000)
573
+ tmp_values[23] = _mm256_extract_epi8(b.values, 23);
574
+ if (mask & 0x1000000)
575
+ tmp_values[24] = _mm256_extract_epi8(b.values, 24);
576
+ if (mask & 0x2000000)
577
+ tmp_values[25] = _mm256_extract_epi8(b.values, 25);
578
+ if (mask & 0x4000000)
579
+ tmp_values[26] = _mm256_extract_epi8(b.values, 26);
580
+ if (mask & 0x8000000)
581
+ tmp_values[27] = _mm256_extract_epi8(b.values, 27);
582
+ if (mask & 0x10000000)
583
+ tmp_values[28] = _mm256_extract_epi8(b.values, 28);
584
+ if (mask & 0x20000000)
585
+ tmp_values[29] = _mm256_extract_epi8(b.values, 29);
586
+ if (mask & 0x40000000)
587
+ tmp_values[30] = _mm256_extract_epi8(b.values, 30);
588
+ if (mask & 0x80000000)
589
+ tmp_values[31] = _mm256_extract_epi8(b.values, 31);
590
+ return loadu(tmp_values);
591
+ }
592
+ static Vectorized<T> blendv(const Vectorized<T>& a, const Vectorized<T>& b,
593
+ const Vectorized<T>& mask) {
594
+ return _mm256_blendv_epi8(a.values, b.values, mask.values);
595
+ }
596
+ template <typename step_t>
597
+ static Vectorized<T> arange(T base = 0, step_t step = static_cast<step_t>(1)) {
598
+ return Vectorized<T>(
599
+ base, base + step, base + 2 * step, base + 3 * step,
600
+ base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step,
601
+ base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step,
602
+ base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step,
603
+ base + 16 * step, base + 17 * step, base + 18 * step, base + 19 * step,
604
+ base + 20 * step, base + 21 * step, base + 22 * step, base + 23 * step,
605
+ base + 24 * step, base + 25 * step, base + 26 * step, base + 27 * step,
606
+ base + 28 * step, base + 29 * step, base + 30 * step, base + 31 * step);
607
+ }
608
+ static Vectorized<T>
609
+ set(Vectorized<T> a, Vectorized<T> b, T count = size()) {
610
+ switch (count) {
611
+ case 0:
612
+ return a;
613
+ case 1:
614
+ return blend<0x1>(a, b);
615
+ case 2:
616
+ return blend<0x3>(a, b);
617
+ case 3:
618
+ return blend<0x7>(a, b);
619
+ case 4:
620
+ return blend<0xF>(a, b);
621
+ case 5:
622
+ return blend<0x1F>(a, b);
623
+ case 6:
624
+ return blend<0x3F>(a, b);
625
+ case 7:
626
+ return blend<0x7F>(a, b);
627
+ case 8:
628
+ return blend<0xFF>(a, b);
629
+ case 9:
630
+ return blend<0x1FF>(a, b);
631
+ case 10:
632
+ return blend<0x3FF>(a, b);
633
+ case 11:
634
+ return blend<0x7FF>(a, b);
635
+ case 12:
636
+ return blend<0xFFF>(a, b);
637
+ case 13:
638
+ return blend<0x1FFF>(a, b);
639
+ case 14:
640
+ return blend<0x3FFF>(a, b);
641
+ case 15:
642
+ return blend<0x7FFF>(a, b);
643
+ case 16:
644
+ return blend<0xFFFF>(a, b);
645
+ case 17:
646
+ return blend<0x1FFFF>(a, b);
647
+ case 18:
648
+ return blend<0x3FFFF>(a, b);
649
+ case 19:
650
+ return blend<0x7FFFF>(a, b);
651
+ case 20:
652
+ return blend<0xFFFFF>(a, b);
653
+ case 21:
654
+ return blend<0x1FFFFF>(a, b);
655
+ case 22:
656
+ return blend<0x3FFFFF>(a, b);
657
+ case 23:
658
+ return blend<0x7FFFFF>(a, b);
659
+ case 24:
660
+ return blend<0xFFFFFF>(a, b);
661
+ case 25:
662
+ return blend<0x1FFFFFF>(a, b);
663
+ case 26:
664
+ return blend<0x3FFFFFF>(a, b);
665
+ case 27:
666
+ return blend<0x7FFFFFF>(a, b);
667
+ case 28:
668
+ return blend<0xFFFFFFF>(a, b);
669
+ case 29:
670
+ return blend<0x1FFFFFFF>(a, b);
671
+ case 30:
672
+ return blend<0x3FFFFFFF>(a, b);
673
+ case 31:
674
+ return blend<0x7FFFFFFF>(a, b);
675
+ }
676
+ return b;
677
+ }
678
+ static Vectorized<T> loadu(const void* ptr) {
679
+ return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(ptr));
680
+ }
681
+ static Vectorized<T> loadu_one_fourth(const void* ptr) {
682
+ // Fast path if only load element number of 8.
683
+ // Note: We didn't merge it as fast path of loadu(const void* ptr, T count),
684
+ // Because loadu(const void* ptr, T count) requires zero initialization for upper 128 bits.
685
+ // However, by using _mm256_castsi128_si256, the upper 128 bits of the result are undefined.
686
+ // TODO<leslie> We can use _mm256_zextsi128_si256 in the furture,
687
+ // since gcc 9.3 doesn't support it now.
688
+ __m128i input_128 = _mm_loadl_epi64(reinterpret_cast<const __m128i*>(ptr));
689
+ return _mm256_castsi128_si256(input_128);
690
+ }
691
+ static Vectorized<T> loadu(const void* ptr, T count) {
692
+ __at_align__ T tmp_values[size()];
693
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
694
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
695
+ // instructions while a loop would be compiled to one instruction.
696
+ for (const auto i : c10::irange(size())) {
697
+ tmp_values[i] = 0;
698
+ }
699
+ std::memcpy(tmp_values, ptr, count * sizeof(T));
700
+ return loadu(tmp_values);
701
+ }
702
+ void store(void* ptr, int count = size()) const {
703
+ if (count == size()) {
704
+ // ptr need not to be aligned here. See
705
+ // https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm256-storeu-si256.html
706
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(ptr), values);
707
+ } else if (count > 0) {
708
+ if (count == 8) {
709
+ // Fast path if only store element number of 8
710
+ _mm_storel_epi64(reinterpret_cast<__m128i*>(ptr), _mm256_castsi256_si128(values));
711
+ } else {
712
+ __at_align__ T tmp_values[size()];
713
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(tmp_values), values);
714
+ std::memcpy(ptr, tmp_values, count * sizeof(T));
715
+ }
716
+ }
717
+ }
718
+ const T& operator[](int idx) const = delete;
719
+ T& operator[](int idx) = delete;
720
+ Vectorized<T> real() const {
721
+ return *this;
722
+ }
723
+ Vectorized<T> imag() const {
724
+ return _mm256_set1_epi8(0);
725
+ }
726
+ Vectorized<T> conj() const {
727
+ return *this;
728
+ }
729
+ };
730
+
731
+ template<>
732
+ class Vectorized<int8_t>: public Vectorized8<int8_t> {
733
+ public:
734
+ using Vectorized8::Vectorized8;
735
+
736
+ Vectorized<int8_t> neg() const;
737
+
738
+ Vectorized<int8_t> abs() const {
739
+ return _mm256_abs_epi8(values);
740
+ }
741
+
742
+ Vectorized<int8_t> operator==(const Vectorized<int8_t>& other) const {
743
+ return _mm256_cmpeq_epi8(values, other.values);
744
+ }
745
+ Vectorized<int8_t> operator!=(const Vectorized<int8_t>& other) const {
746
+ return invert(_mm256_cmpeq_epi8(values, other.values));
747
+ }
748
+ Vectorized<int8_t> operator<(const Vectorized<int8_t>& other) const {
749
+ return _mm256_cmpgt_epi8(other.values, values);
750
+ }
751
+ Vectorized<int8_t> operator<=(const Vectorized<int8_t>& other) const {
752
+ return invert(_mm256_cmpgt_epi8(values, other.values));
753
+ }
754
+ Vectorized<int8_t> operator>(const Vectorized<int8_t>& other) const {
755
+ return other < *this;
756
+ }
757
+ Vectorized<int8_t> operator>=(const Vectorized<int8_t>& other) const {
758
+ return other <= *this;
759
+ }
760
+
761
+ Vectorized<int8_t> eq(const Vectorized<int8_t>& other) const;
762
+ Vectorized<int8_t> ne(const Vectorized<int8_t>& other) const;
763
+ Vectorized<int8_t> gt(const Vectorized<int8_t>& other) const;
764
+ Vectorized<int8_t> ge(const Vectorized<int8_t>& other) const;
765
+ Vectorized<int8_t> lt(const Vectorized<int8_t>& other) const;
766
+ Vectorized<int8_t> le(const Vectorized<int8_t>& other) const;
767
+ };
768
+
769
+ template<>
770
+ class Vectorized<uint8_t>: public Vectorized8<uint8_t> {
771
+ public:
772
+ using Vectorized8::Vectorized8;
773
+
774
+ Vectorized<uint8_t> neg() const;
775
+
776
+ Vectorized<uint8_t> abs() const {
777
+ return *this;
778
+ }
779
+
780
+ Vectorized<uint8_t> operator==(const Vectorized<uint8_t>& other) const {
781
+ return _mm256_cmpeq_epi8(values, other.values);
782
+ }
783
+ Vectorized<uint8_t> operator!=(const Vectorized<uint8_t>& other) const {
784
+ return invert(_mm256_cmpeq_epi8(values, other.values));
785
+ }
786
+ Vectorized<uint8_t> operator<(const Vectorized<uint8_t>& other) const {
787
+ __m256i max = _mm256_max_epu8(values, other.values);
788
+ return invert(_mm256_cmpeq_epi8(max, values));
789
+ }
790
+ Vectorized<uint8_t> operator<=(const Vectorized<uint8_t>& other) const {
791
+ __m256i max = _mm256_max_epu8(values, other.values);
792
+ return _mm256_cmpeq_epi8(max, other.values);
793
+ }
794
+ Vectorized<uint8_t> operator>(const Vectorized<uint8_t>& other) const {
795
+ return other < *this;
796
+ }
797
+ Vectorized<uint8_t> operator>=(const Vectorized<uint8_t>& other) const {
798
+ return other <= *this;
799
+ }
800
+
801
+ Vectorized<uint8_t> eq(const Vectorized<uint8_t>& other) const;
802
+ Vectorized<uint8_t> ne(const Vectorized<uint8_t>& other) const;
803
+ Vectorized<uint8_t> gt(const Vectorized<uint8_t>& other) const;
804
+ Vectorized<uint8_t> ge(const Vectorized<uint8_t>& other) const;
805
+ Vectorized<uint8_t> lt(const Vectorized<uint8_t>& other) const;
806
+ Vectorized<uint8_t> le(const Vectorized<uint8_t>& other) const;
807
+ };
808
+
809
+ template <>
810
+ Vectorized<int64_t> inline operator+(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
811
+ return _mm256_add_epi64(a, b);
812
+ }
813
+
814
+ template <>
815
+ Vectorized<int32_t> inline operator+(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
816
+ return _mm256_add_epi32(a, b);
817
+ }
818
+
819
+ template <>
820
+ Vectorized<int16_t> inline operator+(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
821
+ return _mm256_add_epi16(a, b);
822
+ }
823
+
824
+ template <>
825
+ Vectorized<int8_t> inline operator+(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
826
+ return _mm256_add_epi8(a, b);
827
+ }
828
+
829
+ template <>
830
+ Vectorized<uint8_t> inline operator+(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
831
+ return _mm256_add_epi8(a, b);
832
+ }
833
+
834
+ template <>
835
+ Vectorized<int64_t> inline operator-(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
836
+ return _mm256_sub_epi64(a, b);
837
+ }
838
+
839
+ template <>
840
+ Vectorized<int32_t> inline operator-(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
841
+ return _mm256_sub_epi32(a, b);
842
+ }
843
+
844
+ template <>
845
+ Vectorized<int16_t> inline operator-(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
846
+ return _mm256_sub_epi16(a, b);
847
+ }
848
+
849
+ template <>
850
+ Vectorized<int8_t> inline operator-(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
851
+ return _mm256_sub_epi8(a, b);
852
+ }
853
+
854
+ template <>
855
+ Vectorized<uint8_t> inline operator-(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
856
+ return _mm256_sub_epi8(a, b);
857
+ }
858
+
859
+ // Negation. Defined here so we can utilize operator-
860
+ inline Vectorized<int64_t> Vectorized<int64_t>::neg() const {
861
+ return Vectorized<int64_t>(0) - *this;
862
+ }
863
+
864
+ inline Vectorized<int32_t> Vectorized<int32_t>::neg() const {
865
+ return Vectorized<int32_t>(0) - *this;
866
+ }
867
+
868
+ inline Vectorized<int16_t> Vectorized<int16_t>::neg() const {
869
+ return Vectorized<int16_t>(0) - *this;
870
+ }
871
+
872
+ inline Vectorized<int8_t> Vectorized<int8_t>::neg() const {
873
+ return Vectorized<int8_t>(0) - *this;
874
+ }
875
+
876
+ inline Vectorized<uint8_t> Vectorized<uint8_t>::neg() const {
877
+ return Vectorized<uint8_t>(0) - *this;
878
+ }
879
+
880
+ // Emulate operations with no native 64-bit support in avx,
881
+ // by extracting each element, performing the operation pointwise,
882
+ // then combining the results into a vector.
883
+ template <typename op_t>
884
+ Vectorized<int64_t> inline emulate(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b, const op_t& op) {
885
+ int64_t a0 = _mm256_extract_epi64(a, 0);
886
+ int64_t a1 = _mm256_extract_epi64(a, 1);
887
+ int64_t a2 = _mm256_extract_epi64(a, 2);
888
+ int64_t a3 = _mm256_extract_epi64(a, 3);
889
+
890
+ int64_t b0 = _mm256_extract_epi64(b, 0);
891
+ int64_t b1 = _mm256_extract_epi64(b, 1);
892
+ int64_t b2 = _mm256_extract_epi64(b, 2);
893
+ int64_t b3 = _mm256_extract_epi64(b, 3);
894
+
895
+ int64_t c0 = op(a0, b0);
896
+ int64_t c1 = op(a1, b1);
897
+ int64_t c2 = op(a2, b2);
898
+ int64_t c3 = op(a3, b3);
899
+
900
+ return _mm256_set_epi64x(c3, c2, c1, c0);
901
+ }
902
+
903
+ template <typename op_t>
904
+ Vectorized<int64_t> inline emulate(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b, const Vectorized<int64_t>& c, const op_t& op) {
905
+ int64_t a0 = _mm256_extract_epi64(a, 0);
906
+ int64_t a1 = _mm256_extract_epi64(a, 1);
907
+ int64_t a2 = _mm256_extract_epi64(a, 2);
908
+ int64_t a3 = _mm256_extract_epi64(a, 3);
909
+
910
+ int64_t b0 = _mm256_extract_epi64(b, 0);
911
+ int64_t b1 = _mm256_extract_epi64(b, 1);
912
+ int64_t b2 = _mm256_extract_epi64(b, 2);
913
+ int64_t b3 = _mm256_extract_epi64(b, 3);
914
+
915
+ int64_t c0 = _mm256_extract_epi64(c, 0);
916
+ int64_t c1 = _mm256_extract_epi64(c, 1);
917
+ int64_t c2 = _mm256_extract_epi64(c, 2);
918
+ int64_t c3 = _mm256_extract_epi64(c, 3);
919
+
920
+ int64_t d0 = op(a0, b0, c0);
921
+ int64_t d1 = op(a1, b1, c1);
922
+ int64_t d2 = op(a2, b2, c2);
923
+ int64_t d3 = op(a3, b3, c3);
924
+
925
+ return _mm256_set_epi64x(d3, d2, d1, d0);
926
+ }
927
+
928
+ // AVX2 has no intrinsic for int64_t multiply so it needs to be emulated
929
+ // This could be implemented more efficiently using epi32 instructions
930
+ // This is also technically avx compatible, but then we'll need AVX
931
+ // code for add as well.
932
+ // Note: intentionally ignores undefined behavior like (-lowest * -1).
933
+ template <>
934
+ Vectorized<int64_t> inline operator*(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
935
+ return emulate(a, b, [](int64_t a_point, int64_t b_point) __ubsan_ignore_undefined__ {return a_point * b_point;});
936
+ }
937
+
938
+ template <>
939
+ Vectorized<int32_t> inline operator*(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
940
+ return _mm256_mullo_epi32(a, b);
941
+ }
942
+
943
+ template <>
944
+ Vectorized<int16_t> inline operator*(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
945
+ return _mm256_mullo_epi16(a, b);
946
+ }
947
+
948
+ template <typename T, typename Op>
949
+ Vectorized<T> inline int_elementwise_binary_256(const Vectorized<T>& a, const Vectorized<T>& b, Op op) {
950
+ T values_a[Vectorized<T>::size()];
951
+ T values_b[Vectorized<T>::size()];
952
+ a.store(values_a);
953
+ b.store(values_b);
954
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
955
+ values_a[i] = op(values_a[i], values_b[i]);
956
+ }
957
+ return Vectorized<T>::loadu(values_a);
958
+ }
959
+
960
+ template <>
961
+ Vectorized<int8_t> inline operator*(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
962
+ // We don't have an instruction for multiplying int8_t
963
+ #ifndef CPU_CAPABILITY_AVX2
964
+ return int_elementwise_binary_256(a, b, std::multiplies<int8_t>());
965
+ #else
966
+ __m256i mask00FF = _mm256_set1_epi16(0x00FF);
967
+ __m256i a_lo = _mm256_srai_epi16(_mm256_slli_epi16(a, 8), 8);
968
+ __m256i b_lo = _mm256_srai_epi16(_mm256_slli_epi16(b, 8), 8);
969
+ __m256i a_hi = _mm256_srai_epi16(a, 8);
970
+ __m256i b_hi = _mm256_srai_epi16(b, 8);
971
+ __m256i res_lo = _mm256_and_si256(_mm256_mullo_epi16(a_lo, b_lo), mask00FF);
972
+ __m256i res_hi = _mm256_slli_epi16(_mm256_mullo_epi16(a_hi, b_hi), 8);
973
+ __m256i res = _mm256_or_si256(res_hi, res_lo);
974
+ return res;
975
+ #endif
976
+ }
977
+
978
+ template <>
979
+ Vectorized<uint8_t> inline operator*(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
980
+ // We don't have an instruction for multiplying uint8_t
981
+ #ifndef CPU_CAPABILITY_AVX2
982
+ return int_elementwise_binary_256(a, b, std::multiplies<uint8_t>());
983
+ #else
984
+ __m256i mask00FF = _mm256_set1_epi16(0x00FF);
985
+ __m256i a_lo = _mm256_and_si256 (a, mask00FF);
986
+ __m256i b_lo = _mm256_and_si256 (b, mask00FF);
987
+ __m256i a_hi = _mm256_srli_epi16(a, 8);
988
+ __m256i b_hi = _mm256_srli_epi16(b, 8);
989
+ __m256i res_lo = _mm256_and_si256(_mm256_mullo_epi16(a_lo, b_lo), mask00FF);
990
+ __m256i res_hi = _mm256_slli_epi16(_mm256_mullo_epi16(a_hi, b_hi), 8);
991
+ __m256i res = _mm256_or_si256(res_hi, res_lo);
992
+ return res;
993
+ #endif
994
+ }
995
+
996
+ template <>
997
+ Vectorized<int64_t> inline minimum(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
998
+ #ifndef CPU_CAPABILITY_AVX2
999
+ return emulate(a, b, [](int64_t a_point, int64_t b_point) {return std::min(a_point, b_point);});
1000
+ #else
1001
+ __m256i cmp = _mm256_cmpgt_epi64(a, b);
1002
+ return _mm256_blendv_epi8(a, b, cmp);
1003
+ #endif
1004
+ }
1005
+
1006
+ template <>
1007
+ Vectorized<int32_t> inline minimum(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
1008
+ return _mm256_min_epi32(a, b);
1009
+ }
1010
+
1011
+ template <>
1012
+ Vectorized<int16_t> inline minimum(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
1013
+ return _mm256_min_epi16(a, b);
1014
+ }
1015
+
1016
+ template <>
1017
+ Vectorized<int8_t> inline minimum(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
1018
+ return _mm256_min_epi8(a, b);
1019
+ }
1020
+
1021
+ template <>
1022
+ Vectorized<uint8_t> inline minimum(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
1023
+ return _mm256_min_epu8(a, b);
1024
+ }
1025
+
1026
+ template <>
1027
+ Vectorized<int64_t> inline maximum(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
1028
+ #ifndef CPU_CAPABILITY_AVX2
1029
+ return emulate(a, b, [](int64_t a_point, int64_t b_point) {return std::max(a_point, b_point);});
1030
+ #else
1031
+ __m256i cmp = _mm256_cmpgt_epi64(a, b);
1032
+ return _mm256_blendv_epi8(b, a, cmp);
1033
+ #endif
1034
+ }
1035
+
1036
+ template <>
1037
+ Vectorized<int32_t> inline maximum(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
1038
+ return _mm256_max_epi32(a, b);
1039
+ }
1040
+
1041
+ template <>
1042
+ Vectorized<int16_t> inline maximum(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
1043
+ return _mm256_max_epi16(a, b);
1044
+ }
1045
+
1046
+ template <>
1047
+ Vectorized<int8_t> inline maximum(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
1048
+ return _mm256_max_epi8(a, b);
1049
+ }
1050
+
1051
+ template <>
1052
+ Vectorized<uint8_t> inline maximum(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
1053
+ return _mm256_max_epu8(a, b);
1054
+ }
1055
+
1056
+ template <>
1057
+ Vectorized<int64_t> inline clamp(const Vectorized<int64_t>& a, const Vectorized<int64_t>& min_val, const Vectorized<int64_t>& max_val) {
1058
+ #ifndef CPU_CAPABILITY_AVX2
1059
+ return emulate(a, min_val, max_val, [](int64_t a_point, int64_t min_point, int64_t max_point) {return std::min(max_point, std::max(a_point, min_point));});
1060
+ #else
1061
+ return minimum(maximum(a, min_val), max_val);
1062
+ #endif
1063
+ }
1064
+
1065
+ template <>
1066
+ Vectorized<int32_t> inline clamp(const Vectorized<int32_t>& a, const Vectorized<int32_t>& min_val, const Vectorized<int32_t>& max_val) {
1067
+ return _mm256_min_epi32(max_val, _mm256_max_epi32(a, min_val));
1068
+ }
1069
+
1070
+ template <>
1071
+ Vectorized<int16_t> inline clamp(const Vectorized<int16_t>& a, const Vectorized<int16_t>& min_val, const Vectorized<int16_t>& max_val) {
1072
+ return _mm256_min_epi16(max_val, _mm256_max_epi16(a, min_val));
1073
+ }
1074
+
1075
+ template <>
1076
+ Vectorized<int8_t> inline clamp(const Vectorized<int8_t>& a, const Vectorized<int8_t>& min_val, const Vectorized<int8_t>& max_val) {
1077
+ return _mm256_min_epi8(max_val, _mm256_max_epi8(a, min_val));
1078
+ }
1079
+
1080
+ template <>
1081
+ Vectorized<uint8_t> inline clamp(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& min_val, const Vectorized<uint8_t>& max_val) {
1082
+ return _mm256_min_epu8(max_val, _mm256_max_epu8(a, min_val));
1083
+ }
1084
+
1085
+ template <>
1086
+ Vectorized<int64_t> inline clamp_max(const Vectorized<int64_t>& a, const Vectorized<int64_t>& max_val) {
1087
+ #ifndef CPU_CAPABILITY_AVX2
1088
+ return emulate(a, max_val, [](int64_t a_point, int64_t max_point) {return std::min(max_point, a_point);});
1089
+ #else
1090
+ return minimum(max_val, a);
1091
+ #endif
1092
+ }
1093
+
1094
+ template <>
1095
+ Vectorized<int32_t> inline clamp_max(const Vectorized<int32_t>& a, const Vectorized<int32_t>& max_val) {
1096
+ return _mm256_min_epi32(max_val, a);
1097
+ }
1098
+
1099
+ template <>
1100
+ Vectorized<int16_t> inline clamp_max(const Vectorized<int16_t>& a, const Vectorized<int16_t>& max_val) {
1101
+ return _mm256_min_epi16(max_val, a);
1102
+ }
1103
+
1104
+ template <>
1105
+ Vectorized<int8_t> inline clamp_max(const Vectorized<int8_t>& a, const Vectorized<int8_t>& max_val) {
1106
+ return _mm256_min_epi8(max_val, a);
1107
+ }
1108
+
1109
+ template <>
1110
+ Vectorized<uint8_t> inline clamp_max(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& max_val) {
1111
+ return _mm256_min_epu8(max_val, a);
1112
+ }
1113
+
1114
+ template <>
1115
+ Vectorized<int64_t> inline clamp_min(const Vectorized<int64_t>& a, const Vectorized<int64_t>& min_val) {
1116
+ #ifndef CPU_CAPABILITY_AVX2
1117
+ return emulate(a, min_val, [](int64_t a_point, int64_t min_point) {return std::max(min_point, a_point);});
1118
+ #else
1119
+ return maximum(min_val, a);
1120
+ #endif
1121
+ }
1122
+
1123
+ template <>
1124
+ Vectorized<int32_t> inline clamp_min(const Vectorized<int32_t>& a, const Vectorized<int32_t>& min_val) {
1125
+ return _mm256_max_epi32(min_val, a);
1126
+ }
1127
+
1128
+ template <>
1129
+ Vectorized<int16_t> inline clamp_min(const Vectorized<int16_t>& a, const Vectorized<int16_t>& min_val) {
1130
+ return _mm256_max_epi16(min_val, a);
1131
+ }
1132
+
1133
+ template <>
1134
+ Vectorized<int8_t> inline clamp_min(const Vectorized<int8_t>& a, const Vectorized<int8_t>& min_val) {
1135
+ return _mm256_max_epi8(min_val, a);
1136
+ }
1137
+
1138
+ template <>
1139
+ Vectorized<uint8_t> inline clamp_min(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& min_val) {
1140
+ return _mm256_max_epu8(min_val, a);
1141
+ }
1142
+
1143
+ template<typename T>
1144
+ Vectorized<int32_t> inline convert_to_int32(const T* ptr) {
1145
+ return Vectorized<int32_t>::loadu(ptr);
1146
+ }
1147
+
1148
+ template<>
1149
+ Vectorized<int32_t> inline convert_to_int32<int8_t>(const int8_t* ptr) {
1150
+ return _mm256_cvtepi8_epi32(_mm_loadl_epi64(reinterpret_cast<const __m128i*>(ptr)));
1151
+ }
1152
+
1153
+ template<>
1154
+ Vectorized<int32_t> inline convert_to_int32<uint8_t>(const uint8_t* ptr) {
1155
+ return _mm256_cvtepu8_epi32(_mm_loadl_epi64(reinterpret_cast<const __m128i*>(ptr)));
1156
+ }
1157
+
1158
+ template <>
1159
+ Vectorized<int64_t> inline operator/(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
1160
+ return int_elementwise_binary_256(a, b, std::divides<int64_t>());
1161
+ }
1162
+ template <>
1163
+ Vectorized<int32_t> inline operator/(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
1164
+ return int_elementwise_binary_256(a, b, std::divides<int32_t>());
1165
+ }
1166
+ template <>
1167
+ Vectorized<int16_t> inline operator/(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
1168
+ return int_elementwise_binary_256(a, b, std::divides<int16_t>());
1169
+ }
1170
+ template <>
1171
+ Vectorized<int8_t> inline operator/(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
1172
+ return int_elementwise_binary_256(a, b, std::divides<int8_t>());
1173
+ }
1174
+ template <>
1175
+ Vectorized<uint8_t> inline operator/(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
1176
+ return int_elementwise_binary_256(a, b, std::divides<uint8_t>());
1177
+ }
1178
+
1179
+ template<class T, typename std::enable_if_t<std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
1180
+ inline Vectorized<T> operator&(const Vectorized<T>& a, const Vectorized<T>& b) {
1181
+ return _mm256_and_si256(a, b);
1182
+ }
1183
+ template<class T, typename std::enable_if_t<std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
1184
+ inline Vectorized<T> operator|(const Vectorized<T>& a, const Vectorized<T>& b) {
1185
+ return _mm256_or_si256(a, b);
1186
+ }
1187
+ template<class T, typename std::enable_if_t<std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
1188
+ inline Vectorized<T> operator^(const Vectorized<T>& a, const Vectorized<T>& b) {
1189
+ return _mm256_xor_si256(a, b);
1190
+ }
1191
+ template<class T, typename std::enable_if_t<std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
1192
+ inline Vectorized<T> operator~(const Vectorized<T>& a) {
1193
+ return _mm256_xor_si256(a, _mm256_set1_epi32(-1));
1194
+ }
1195
+
1196
+ inline Vectorized<int64_t> Vectorized<int64_t>::eq(const Vectorized<int64_t>& other) const {
1197
+ return (*this == other) & Vectorized<int64_t>(1);
1198
+ }
1199
+
1200
+ inline Vectorized<int64_t> Vectorized<int64_t>::ne(const Vectorized<int64_t>& other) const {
1201
+ return (*this != other) & Vectorized<int64_t>(1);
1202
+ }
1203
+
1204
+ inline Vectorized<int64_t> Vectorized<int64_t>::gt(const Vectorized<int64_t>& other) const {
1205
+ return (*this > other) & Vectorized<int64_t>(1);
1206
+ }
1207
+
1208
+ inline Vectorized<int64_t> Vectorized<int64_t>::ge(const Vectorized<int64_t>& other) const {
1209
+ return (*this >= other) & Vectorized<int64_t>(1);
1210
+ }
1211
+
1212
+ inline Vectorized<int64_t> Vectorized<int64_t>::lt(const Vectorized<int64_t>& other) const {
1213
+ return (*this < other) & Vectorized<int64_t>(1);
1214
+ }
1215
+
1216
+ inline Vectorized<int64_t> Vectorized<int64_t>::le(const Vectorized<int64_t>& other) const {
1217
+ return (*this <= other) & Vectorized<int64_t>(1);
1218
+ }
1219
+
1220
+ inline Vectorized<int32_t> Vectorized<int32_t>::eq(const Vectorized<int32_t>& other) const {
1221
+ return (*this == other) & Vectorized<int32_t>(1);
1222
+ }
1223
+
1224
+ inline Vectorized<int32_t> Vectorized<int32_t>::ne(const Vectorized<int32_t>& other) const {
1225
+ return (*this != other) & Vectorized<int32_t>(1);
1226
+ }
1227
+
1228
+ inline Vectorized<int32_t> Vectorized<int32_t>::gt(const Vectorized<int32_t>& other) const {
1229
+ return (*this > other) & Vectorized<int32_t>(1);
1230
+ }
1231
+
1232
+ inline Vectorized<int32_t> Vectorized<int32_t>::ge(const Vectorized<int32_t>& other) const {
1233
+ return (*this >= other) & Vectorized<int32_t>(1);
1234
+ }
1235
+
1236
+ inline Vectorized<int32_t> Vectorized<int32_t>::lt(const Vectorized<int32_t>& other) const {
1237
+ return (*this < other) & Vectorized<int32_t>(1);
1238
+ }
1239
+
1240
+ inline Vectorized<int32_t> Vectorized<int32_t>::le(const Vectorized<int32_t>& other) const {
1241
+ return (*this <= other) & Vectorized<int32_t>(1);
1242
+ }
1243
+
1244
+ inline Vectorized<int16_t> Vectorized<int16_t>::eq(const Vectorized<int16_t>& other) const {
1245
+ return (*this == other) & Vectorized<int16_t>(1);
1246
+ }
1247
+
1248
+ inline Vectorized<int16_t> Vectorized<int16_t>::ne(const Vectorized<int16_t>& other) const {
1249
+ return (*this != other) & Vectorized<int16_t>(1);
1250
+ }
1251
+
1252
+ inline Vectorized<int16_t> Vectorized<int16_t>::gt(const Vectorized<int16_t>& other) const {
1253
+ return (*this > other) & Vectorized<int16_t>(1);
1254
+ }
1255
+
1256
+ inline Vectorized<int16_t> Vectorized<int16_t>::ge(const Vectorized<int16_t>& other) const {
1257
+ return (*this >= other) & Vectorized<int16_t>(1);
1258
+ }
1259
+
1260
+ inline Vectorized<int16_t> Vectorized<int16_t>::lt(const Vectorized<int16_t>& other) const {
1261
+ return (*this < other) & Vectorized<int16_t>(1);
1262
+ }
1263
+
1264
+ inline Vectorized<int16_t> Vectorized<int16_t>::le(const Vectorized<int16_t>& other) const {
1265
+ return (*this <= other) & Vectorized<int16_t>(1);
1266
+ }
1267
+
1268
+ inline Vectorized<int8_t> Vectorized<int8_t>::eq(const Vectorized<int8_t>& other) const {
1269
+ return (*this == other) & Vectorized<int8_t>(1);
1270
+ }
1271
+
1272
+ inline Vectorized<int8_t> Vectorized<int8_t>::ne(const Vectorized<int8_t>& other) const {
1273
+ return (*this != other) & Vectorized<int8_t>(1);
1274
+ }
1275
+
1276
+ inline Vectorized<int8_t> Vectorized<int8_t>::gt(const Vectorized<int8_t>& other) const {
1277
+ return (*this > other) & Vectorized<int8_t>(1);
1278
+ }
1279
+
1280
+ inline Vectorized<int8_t> Vectorized<int8_t>::ge(const Vectorized<int8_t>& other) const {
1281
+ return (*this >= other) & Vectorized<int8_t>(1);
1282
+ }
1283
+
1284
+ inline Vectorized<int8_t> Vectorized<int8_t>::lt(const Vectorized<int8_t>& other) const {
1285
+ return (*this < other) & Vectorized<int8_t>(1);
1286
+ }
1287
+
1288
+ inline Vectorized<int8_t> Vectorized<int8_t>::le(const Vectorized<int8_t>& other) const {
1289
+ return (*this <= other) & Vectorized<int8_t>(1);
1290
+ }
1291
+
1292
+ inline Vectorized<uint8_t> Vectorized<uint8_t>::eq(const Vectorized<uint8_t>& other) const {
1293
+ return (*this == other) & Vectorized<uint8_t>(1);
1294
+ }
1295
+
1296
+ inline Vectorized<uint8_t> Vectorized<uint8_t>::ne(const Vectorized<uint8_t>& other) const {
1297
+ return (*this != other) & Vectorized<uint8_t>(1);
1298
+ }
1299
+
1300
+ inline Vectorized<uint8_t> Vectorized<uint8_t>::gt(const Vectorized<uint8_t>& other) const {
1301
+ return (*this > other) & Vectorized<uint8_t>(1);
1302
+ }
1303
+
1304
+ inline Vectorized<uint8_t> Vectorized<uint8_t>::ge(const Vectorized<uint8_t>& other) const {
1305
+ return (*this >= other) & Vectorized<uint8_t>(1);
1306
+ }
1307
+
1308
+ inline Vectorized<uint8_t> Vectorized<uint8_t>::lt(const Vectorized<uint8_t>& other) const {
1309
+ return (*this < other) & Vectorized<uint8_t>(1);
1310
+ }
1311
+
1312
+ inline Vectorized<uint8_t> Vectorized<uint8_t>::le(const Vectorized<uint8_t>& other) const {
1313
+ return (*this <= other) & Vectorized<uint8_t>(1);
1314
+ }
1315
+
1316
+ template <bool left_shift>
1317
+ Vectorized<int16_t> inline shift_256_16(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
1318
+ // No vector instruction for shifting int16_t, so emulating it instead.
1319
+
1320
+ // Control masks for shuffle operation, treating 256 bits as an
1321
+ // array of 16-bit elements, and considering pairs of neighboring
1322
+ // elements. Specifially, a mask named "ctl_M_N" (M,N in [0,1], and
1323
+ // M!=N) is set so that shuffle will move element with index M from
1324
+ // input pair into element with index N in output pair, and element
1325
+ // with index M in output pair will be set to all 0s.
1326
+ __m256i ctl_0_1 = _mm256_set_epi8(29, 28, 0x80, 0x80, 25, 24, 0x80, 0x80,
1327
+ 21, 20, 0x80, 0x80, 17, 16, 0x80, 0x80,
1328
+ 13, 12, 0x80, 0x80, 9, 8, 0x80, 0x80,
1329
+ 5, 4, 0x80, 0x80, 1, 0, 0x80, 0x80);
1330
+ __m256i ctl_1_0 = _mm256_set_epi8(0x80, 0x80, 31, 30, 0x80, 0x80, 27, 26,
1331
+ 0x80, 0x80, 23, 22, 0x80, 0x80, 19, 18,
1332
+ 0x80, 0x80, 15, 14, 0x80, 0x80, 11, 10,
1333
+ 0x80, 0x80, 7, 6, 0x80, 0x80, 3, 2);
1334
+
1335
+ // Masks for bitwise and operation, treating 256 bits as an array of
1336
+ // 16-bit elements, and considering them in pairs of neighboring
1337
+ // elements. A mask named "keep_M" (M in [0,1]) is set so that
1338
+ // bitwise and will copy element with index M from input pair into
1339
+ // element with the same index in output pair, while the other
1340
+ // element in output pair will be set to all 0s.
1341
+ __m256i keep_0 = _mm256_set1_epi32(0xFFFF);
1342
+ __m256i keep_1 = _mm256_set1_epi32(0xFFFF0000);
1343
+
1344
+ // Take each 16-bit element with idx%2==0 from input array to be
1345
+ // shifted and extend it to 32 bits so that 0s are added to the
1346
+ // right. Then, perform shifting on this 32-bit number. Upper 16
1347
+ // bits will be proper result of shifting original 16-bit number, so
1348
+ // write them to result array, into the same position from which
1349
+ // corresponding input element is taken. Also, make sure that
1350
+ // result array elements with idx%2!=0 are set to all 0s.
1351
+ //
1352
+ // Note that number of bits to shift for is extended to 32 bits by
1353
+ // adding 0s to the left. That means this number is not properly
1354
+ // sign-extended for negative values. However, number of bits to
1355
+ // shift is treated as an unsigned integer by respective shift
1356
+ // intrinsics anyway so if negative then either with or without
1357
+ // proper sign extension, it will be interpreted as a number greater
1358
+ // than 32, and the shifting result will be the same.
1359
+ __m256i a0 = _mm256_shuffle_epi8(a, ctl_0_1);
1360
+ __m256i b0 = _mm256_and_si256(b, keep_0);
1361
+ __m256i c0;
1362
+ if (left_shift)
1363
+ c0 = _mm256_sllv_epi32(a0, b0);
1364
+ else
1365
+ c0 = _mm256_srav_epi32(a0, b0);
1366
+ c0 = _mm256_shuffle_epi8(c0, ctl_1_0);
1367
+
1368
+ // Peform shifting the same way for input array elements with
1369
+ // idx%2==1.
1370
+ __m256i a1 = _mm256_and_si256(a, keep_1);
1371
+ __m256i b1 = _mm256_shuffle_epi8(b, ctl_1_0);
1372
+ __m256i c1;
1373
+ if (left_shift)
1374
+ c1 = _mm256_sllv_epi32(a1, b1);
1375
+ else
1376
+ c1 = _mm256_srav_epi32(a1, b1);
1377
+ c1 = _mm256_and_si256(c1, keep_1);
1378
+
1379
+ // Merge partial results into the final result.
1380
+ __m256i c = _mm256_or_si256(c0, c1);
1381
+
1382
+ return c;
1383
+ }
1384
+
1385
+ template <bool left_shift, typename T, typename std::enable_if_t<std::is_same<T, int8_t>::value || std::is_same<T, uint8_t>::value, int> = 0>
1386
+ Vectorized<T> inline shift_256_8(const Vectorized<T>& a, const Vectorized<T>& b) {
1387
+ // No vector instruction for shifting int8_t/uint8_t, so emulating
1388
+ // it instead.
1389
+
1390
+ // Control masks for shuffle operation, treating 256 bits as an
1391
+ // array of 8-bit elements, and considering quadruples of
1392
+ // neighboring elements. Specifially, a mask named "ctl_M_N" (M,N
1393
+ // in [0,1,2,3], and M!=N) is set so that shuffle will move element
1394
+ // with index M from input quadruple into element with index N in
1395
+ // output quadruple, and other elements in output quadruple will be
1396
+ // set to all 0s.
1397
+ __m256i ctl_0_3 = _mm256_set_epi8(28, 0x80, 0x80, 0x80, 24, 0x80, 0x80, 0x80,
1398
+ 20, 0x80, 0x80, 0x80, 16, 0x80, 0x80, 0x80,
1399
+ 12, 0x80, 0x80, 0x80, 8, 0x80, 0x80, 0x80,
1400
+ 4, 0x80, 0x80, 0x80, 0, 0x80, 0x80, 0x80);
1401
+ __m256i ctl_1_0 = _mm256_set_epi8(0x80, 0x80, 0x80, 29, 0x80, 0x80, 0x80, 25,
1402
+ 0x80, 0x80, 0x80, 21, 0x80, 0x80, 0x80, 17,
1403
+ 0x80, 0x80, 0x80, 13, 0x80, 0x80, 0x80, 9,
1404
+ 0x80, 0x80, 0x80, 5, 0x80, 0x80, 0x80, 1);
1405
+ __m256i ctl_1_3 = _mm256_set_epi8(29, 0x80, 0x80, 0x80, 25, 0x80, 0x80, 0x80,
1406
+ 21, 0x80, 0x80, 0x80, 17, 0x80, 0x80, 0x80,
1407
+ 13, 0x80, 0x80, 0x80, 9, 0x80, 0x80, 0x80,
1408
+ 5, 0x80, 0x80, 0x80, 1, 0x80, 0x80, 0x80);
1409
+ __m256i ctl_2_0 = _mm256_set_epi8(0x80, 0x80, 0x80, 30, 0x80, 0x80, 0x80, 26,
1410
+ 0x80, 0x80, 0x80, 22, 0x80, 0x80, 0x80, 18,
1411
+ 0x80, 0x80, 0x80, 14, 0x80, 0x80, 0x80, 10,
1412
+ 0x80, 0x80, 0x80, 6, 0x80, 0x80, 0x80, 2);
1413
+ __m256i ctl_2_3 = _mm256_set_epi8(30, 0x80, 0x80, 0x80, 26, 0x80, 0x80, 0x80,
1414
+ 22, 0x80, 0x80, 0x80, 18, 0x80, 0x80, 0x80,
1415
+ 14, 0x80, 0x80, 0x80, 10, 0x80, 0x80, 0x80,
1416
+ 6, 0x80, 0x80, 0x80, 2, 0x80, 0x80, 0x80);
1417
+ __m256i ctl_3_0 = _mm256_set_epi8(0x80, 0x80, 0x80, 31, 0x80, 0x80, 0x80, 27,
1418
+ 0x80, 0x80, 0x80, 23, 0x80, 0x80, 0x80, 19,
1419
+ 0x80, 0x80, 0x80, 15, 0x80, 0x80, 0x80, 11,
1420
+ 0x80, 0x80, 0x80, 7, 0x80, 0x80, 0x80, 3);
1421
+ __m256i ctl_3_1 = _mm256_set_epi8(0x80, 0x80, 31, 0x80, 0x80, 0x80, 27, 0x80,
1422
+ 0x80, 0x80, 23, 0x80, 0x80, 0x80, 19, 0x80,
1423
+ 0x80, 0x80, 15, 0x80, 0x80, 0x80, 11, 0x80,
1424
+ 0x80, 0x80, 7, 0x80, 0x80, 0x80, 3, 0x80);
1425
+ __m256i ctl_3_2 = _mm256_set_epi8(0x80, 31, 0x80, 0x80, 0x80, 27, 0x80, 0x80,
1426
+ 0x80, 23, 0x80, 0x80, 0x80, 19, 0x80, 0x80,
1427
+ 0x80, 15, 0x80, 0x80, 0x80, 11, 0x80, 0x80,
1428
+ 0x80, 7, 0x80, 0x80, 0x80, 3, 0x80, 0x80);
1429
+
1430
+ // Masks for bitwise and operation, treating 256 bits as an array of
1431
+ // 8-bit elements, and considering them in quadruples of neighboring
1432
+ // elements. A mask named "keep_M" (M in [0,1,2,3]) is set so that
1433
+ // bitwise and will copy element with index M from input quadruple
1434
+ // into element with the same index in output quadruple, while the
1435
+ // other elements in output quadruple will be set to all 0s.
1436
+ __m256i keep_0 = _mm256_set1_epi32(0xFF);
1437
+ __m256i keep_3 = _mm256_set1_epi32(0xFF000000);
1438
+
1439
+ // Take each 8-bit element with idx%4==0 from input array to be
1440
+ // shifted and extend it to 32 bits so that 0s are added to the
1441
+ // right. Then, perform shifting on this 32-bit number. Upper 8
1442
+ // bits will be proper result of shifting original 8-bit number, so
1443
+ // write them to result array, into the same position from which
1444
+ // corresponding input element is taken. Also, make sure that
1445
+ // result array elements with idx%4!=0 are set to all 0s.
1446
+ //
1447
+ // Note that number of bits to shift for is extended to 32 bits by
1448
+ // adding 0s to the left. That means this number is not properly
1449
+ // sign-extended for negative values. However, number of bits to
1450
+ // shift is treated as an unsigned integer by respective shift
1451
+ // intrinsics anyway so if negative then either with or without
1452
+ // proper sign extension, it will be interpreted as a number greater
1453
+ // than 32, and the shifting result will be the same.
1454
+ __m256i a0 = _mm256_shuffle_epi8(a, ctl_0_3);
1455
+ __m256i b0 = _mm256_and_si256(b, keep_0);
1456
+ __m256i c0;
1457
+ if (left_shift)
1458
+ c0 = _mm256_sllv_epi32(a0, b0);
1459
+ else
1460
+ if constexpr (std::is_same_v<T, int8_t>)
1461
+ c0 = _mm256_srav_epi32(a0, b0);
1462
+ else
1463
+ c0 = _mm256_srlv_epi32(a0, b0);
1464
+ c0 = _mm256_shuffle_epi8(c0, ctl_3_0);
1465
+
1466
+ // Peform shifting the same way for input array elements with
1467
+ // idx%4==1.
1468
+ __m256i a1 = _mm256_shuffle_epi8(a, ctl_1_3);
1469
+ __m256i b1 = _mm256_shuffle_epi8(b, ctl_1_0);
1470
+ __m256i c1;
1471
+ if (left_shift)
1472
+ c1 = _mm256_sllv_epi32(a1, b1);
1473
+ else
1474
+ if constexpr (std::is_same_v<T, int8_t>)
1475
+ c1 = _mm256_srav_epi32(a1, b1);
1476
+ else
1477
+ c1 = _mm256_srlv_epi32(a1, b1);
1478
+ c1 = _mm256_shuffle_epi8(c1, ctl_3_1);
1479
+
1480
+ // Peform shifting the same way for input array elements with
1481
+ // idx%4==2.
1482
+ __m256i a2 = _mm256_shuffle_epi8(a, ctl_2_3);
1483
+ __m256i b2 = _mm256_shuffle_epi8(b, ctl_2_0);
1484
+ __m256i c2;
1485
+ if (left_shift)
1486
+ c2 = _mm256_sllv_epi32(a2, b2);
1487
+ else
1488
+ if constexpr (std::is_same_v<T, int8_t>)
1489
+ c2 = _mm256_srav_epi32(a2, b2);
1490
+ else
1491
+ c2 = _mm256_srlv_epi32(a2, b2);
1492
+ c2 = _mm256_shuffle_epi8(c2, ctl_3_2);
1493
+
1494
+ // Peform shifting the same way for input array elements with
1495
+ // idx%4==3.
1496
+ __m256i a3 = _mm256_and_si256(a, keep_3);
1497
+ __m256i b3 = _mm256_shuffle_epi8(b, ctl_3_0);
1498
+ __m256i c3;
1499
+ if (left_shift)
1500
+ c3 = _mm256_sllv_epi32(a3, b3);
1501
+ else
1502
+ if constexpr (std::is_same_v<T, int8_t>)
1503
+ c3 = _mm256_srav_epi32(a3, b3);
1504
+ else
1505
+ c3 = _mm256_srlv_epi32(a3, b3);
1506
+ c3 = _mm256_and_si256(c3, keep_3);
1507
+
1508
+ // Merge partial results into the final result.
1509
+ __m256i c01 = _mm256_or_si256(c0, c1);
1510
+ __m256i c23 = _mm256_or_si256(c2, c3);
1511
+ __m256i c = _mm256_or_si256(c01, c23);
1512
+
1513
+ return c;
1514
+ }
1515
+
1516
+ template <>
1517
+ Vectorized<int64_t> inline operator<<(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
1518
+ return _mm256_sllv_epi64(a, b);
1519
+ }
1520
+
1521
+ template <>
1522
+ Vectorized<int32_t> inline operator<<(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
1523
+ return _mm256_sllv_epi32(a, b);
1524
+ }
1525
+
1526
+ template <>
1527
+ Vectorized<int16_t> inline operator<<(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
1528
+ return shift_256_16<true>(a, b);
1529
+ }
1530
+
1531
+ template <>
1532
+ Vectorized<int8_t> inline operator<<(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
1533
+ return shift_256_8<true>(a, b);
1534
+ }
1535
+
1536
+ template <>
1537
+ Vectorized<uint8_t> inline operator<<(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
1538
+ return shift_256_8<true>(a, b);
1539
+ }
1540
+
1541
+ template <>
1542
+ Vectorized<int64_t> inline operator>>(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
1543
+ // No vector instruction for right arithmetic shifting int64_t, so emulating it
1544
+ // instead.
1545
+
1546
+ // Clamp the shift values such that shift values < 0 and > 64 are changed to 64
1547
+ // which results in -1 for negative input and 0 for non-negative input.
1548
+ __m256i zero = _mm256_set1_epi64x(0);
1549
+ __m256i max_shift = _mm256_set1_epi64x(64);
1550
+ __m256i mask = _mm256_or_si256(_mm256_cmpgt_epi64(zero, b), _mm256_cmpgt_epi64(b, max_shift));
1551
+ __m256i shift = _mm256_blendv_epi8(b, max_shift, mask);
1552
+ // Shift the number logically to the right, thus filling the most
1553
+ // significant bits with 0s. Then, replace these bits with the sign
1554
+ // bit.
1555
+ __m256i sign_bits = _mm256_cmpgt_epi64(zero, a);
1556
+ __m256i sign_shift = _mm256_sub_epi64(max_shift, shift);
1557
+ __m256i sign_ext = _mm256_sllv_epi64(sign_bits, sign_shift);
1558
+ __m256i c = _mm256_srlv_epi64(a, shift);
1559
+ c = _mm256_or_si256(c, sign_ext);
1560
+
1561
+ return c;
1562
+ }
1563
+
1564
+ template <>
1565
+ Vectorized<int32_t> inline operator>>(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
1566
+ return _mm256_srav_epi32(a, b);
1567
+ }
1568
+
1569
+ template <>
1570
+ Vectorized<int16_t> inline operator>>(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
1571
+ return shift_256_16<false>(a, b);
1572
+ }
1573
+
1574
+ template <>
1575
+ Vectorized<int8_t> inline operator>>(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
1576
+ return shift_256_8<false>(a, b);
1577
+ }
1578
+
1579
+ template <>
1580
+ Vectorized<uint8_t> inline operator>>(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
1581
+ return shift_256_8<false>(a, b);
1582
+ }
1583
+
1584
+ #endif
1585
+
1586
+ }} // namespace at::vec::CPU_CAPABILITY
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vec256_qint.h ADDED
@@ -0,0 +1,1335 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/intrinsics.h>
7
+ #include <ATen/cpu/vec/vec_base.h>
8
+ #include <ATen/native/quantized/AffineQuantizerBase.h>
9
+
10
+ #include <c10/util/irange.h>
11
+ #include <c10/util/qint32.h>
12
+ #include <c10/util/qint8.h>
13
+ #include <c10/util/quint8.h>
14
+
15
+ #include <array>
16
+ #include <cmath>
17
+
18
+ // This file defines Vectorized<> for the quantized types.
19
+ //
20
+ //
21
+ // Currently, we simply use these classes as efficient converters between
22
+ // the quantized types and Vectorized<float>, usually in bandwidth-bound cases
23
+ // where doing the arithmetic in full-precision is acceptable (e.g.
24
+ // elementwise operators).
25
+ //
26
+ //
27
+ // Conversions are as follows:
28
+ // Vectorized<qint8> -> 4x Vectorized<float>
29
+ // Vectorized<quint8> -> 4x Vectorized<float>
30
+ // Vectorized<qint32> -> 1x Vectorized<float>
31
+ //
32
+ // The size of the returned float vector is specified by the special
33
+ // constexpr function float_num_vecs. The type of the value returned
34
+ // from dequantize (and expected as an argument to quantize) is
35
+ // specified by float_vec_return_type.
36
+ //
37
+ // When writing kernels with these vectors, it is expected that floating-
38
+ // point operations will be carried out in a loop over Vectorized<T>::float_num_vecs
39
+ // iterations.
40
+
41
+ namespace at::vec {
42
+ inline namespace CPU_CAPABILITY {
43
+
44
+ #if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
45
+
46
+ struct Vectorizedqi {
47
+ protected:
48
+ __m256i vals __attribute__((aligned(64)));
49
+
50
+ public:
51
+ Vectorizedqi() {}
52
+ Vectorizedqi(__m256i v) : vals(v) {}
53
+ operator __m256i() const {
54
+ return vals;
55
+ }
56
+ };
57
+
58
+ template <typename T>
59
+ __m256i pack_saturate_and_clamp(
60
+ __m256i first,
61
+ __m256i second,
62
+ T min_val,
63
+ T max_val);
64
+
65
+ template <>
66
+ inline __m256i pack_saturate_and_clamp<int32_t>(
67
+ __m256i /*first*/,
68
+ __m256i /*second*/,
69
+ int32_t /*min_val*/,
70
+ int32_t /*max_val*/) {
71
+ // This function is for linkage only, will not be used
72
+ AT_ERROR("pack_saturate_and_clamp<int32_t> is not supported");
73
+ }
74
+
75
+ template <>
76
+ inline __m256i pack_saturate_and_clamp<int8_t>(
77
+ __m256i first,
78
+ __m256i second,
79
+ int8_t min_val,
80
+ int8_t max_val) {
81
+ __m256i packed_and_sat = _mm256_packs_epi16(first, second);
82
+ return _mm256_max_epi8(
83
+ _mm256_set1_epi8(min_val),
84
+ _mm256_min_epi8(packed_and_sat, _mm256_set1_epi8(max_val)));
85
+ }
86
+
87
+ template <>
88
+ inline __m256i pack_saturate_and_clamp<uint8_t>(
89
+ __m256i first,
90
+ __m256i second,
91
+ uint8_t min_val,
92
+ uint8_t max_val) {
93
+ __m256i packed_and_sat = _mm256_packus_epi16(first, second);
94
+ return _mm256_max_epu8(
95
+ _mm256_set1_epi8(min_val),
96
+ _mm256_min_epu8(packed_and_sat, _mm256_set1_epi8(max_val)));
97
+ }
98
+
99
+ template <typename T>
100
+ typename std::enable_if<std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, at::vec::Vectorized<float>>::type
101
+ inline convert_int8_to_float(at::vec::Vectorized<T> src) {
102
+ // Note: this function only convert inputs number of elements equal to at::vec::Vectorized<float>.size()
103
+ // Only handle first 8*8 bits
104
+ __m128i input_128 = _mm256_castsi256_si128(src);
105
+ // Convert from 8*uint8/int8 to 8*int32
106
+ __m256i input_256_int32;
107
+ if constexpr (std::is_same_v<T, uint8_t>)
108
+ input_256_int32 = _mm256_cvtepu8_epi32(input_128);
109
+ else
110
+ input_256_int32 = _mm256_cvtepi8_epi32(input_128);
111
+ // Convert from 8*int32 to 8*float
112
+ return _mm256_cvtepi32_ps(input_256_int32);
113
+ }
114
+
115
+ template <typename T>
116
+ typename std::enable_if<std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, at::vec::Vectorized<T>>::type
117
+ inline convert_float_to_int8(at::vec::Vectorized<float> src) {
118
+ // Convert from float32 to int32 with truncation
119
+ __m256i x_values_int32 = _mm256_cvttps_epi32(src);
120
+
121
+ // Convert from int32 to int16 using signed saturation
122
+ __m256i xy_packed_v = _mm256_packs_epi32(x_values_int32, x_values_int32);
123
+
124
+ constexpr auto min_val = std::numeric_limits<T>::min();
125
+ constexpr auto max_val = std::numeric_limits<T>::max();
126
+
127
+ // Convert from int16 to uint8/int8 using unsigned saturation
128
+ __m256i xyzw_clamped_v = pack_saturate_and_clamp<T>(
129
+ xy_packed_v, xy_packed_v, min_val, max_val);
130
+ __m256i permute_mask_v =
131
+ _mm256_set_epi32(0x07, 0x03, 0x06, 0x02, 0x05, 0x01, 0x04, 0x00);
132
+ return _mm256_permutevar8x32_epi32(xyzw_clamped_v, permute_mask_v);
133
+ }
134
+
135
+ template <typename T>
136
+ inline void __attribute__((always_inline)) QuantizeAvx2(
137
+ const float* src,
138
+ T* dst,
139
+ int len,
140
+ float inverse_scale,
141
+ int64_t zero_point) {
142
+ constexpr int VLEN = 8;
143
+ constexpr auto min_val = std::numeric_limits<T>::min();
144
+ constexpr auto max_val = std::numeric_limits<T>::max();
145
+ const __m256i min_v = _mm256_set1_epi32(min_val);
146
+ const __m256i max_v = _mm256_set1_epi32(max_val);
147
+ // This is the largest int32 value < int32_max exactly representable in float
148
+ constexpr int32_t int32_float_max_val =
149
+ std::numeric_limits<int32_t>::max() - 127;
150
+ int i = 0;
151
+ __m256 inverse_scale_v = _mm256_set1_ps(inverse_scale);
152
+ // clang-format off
153
+ static const __m256i shuffle_mask_v = _mm256_set_epi8(
154
+ 0xff, 0xff, 0xff, 0xff,
155
+ 0xff, 0xff, 0xff, 0xff,
156
+ 0xff, 0xff, 0xff, 0xff,
157
+ 0x0c, 0x08, 0x04, 0x00,
158
+ 0xff, 0xff, 0xff, 0xff,
159
+ 0xff, 0xff, 0xff, 0xff,
160
+ 0xff, 0xff, 0xff, 0xff,
161
+ 0x0c, 0x08, 0x04, 0x00);
162
+ // clang-format on
163
+ __m256i permute_mask_v =
164
+ _mm256_set_epi32(0x07, 0x03, 0x06, 0x02, 0x05, 0x01, 0x04, 0x00);
165
+ __m256i permute_mask_l8_v =
166
+ _mm256_set_epi32(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00);
167
+ int len_aligned = len / (VLEN * 4) * (VLEN * 4);
168
+ for (; i < len_aligned; i += 4 * VLEN) {
169
+ // x
170
+ __m256 x_vals = _mm256_load_ps(src + i);
171
+ __m256 x_transformed_v = _mm256_mul_ps(x_vals, inverse_scale_v);
172
+ // If the floating point value is greater than int32_max,
173
+ // _mm256_cvtps_epi32 converts them to -ve. Clip at int32_float_max_val to
174
+ // Clip at int32_float_max_val to avoid this.
175
+ x_transformed_v =
176
+ _mm256_min_ps(x_transformed_v, _mm256_set1_ps(int32_float_max_val));
177
+ // y
178
+ __m256 y_vals = _mm256_load_ps(src + i + VLEN);
179
+ __m256 y_transformed_v = _mm256_mul_ps(y_vals, inverse_scale_v);
180
+ y_transformed_v =
181
+ _mm256_min_ps(y_transformed_v, _mm256_set1_ps(int32_float_max_val));
182
+ // z
183
+ __m256 z_vals = _mm256_load_ps(src + i + 2 * VLEN);
184
+ __m256 z_transformed_v = _mm256_mul_ps(z_vals, inverse_scale_v);
185
+ z_transformed_v =
186
+ _mm256_min_ps(z_transformed_v, _mm256_set1_ps(int32_float_max_val));
187
+ // w
188
+ __m256 w_vals = _mm256_load_ps(src + i + 3 * VLEN);
189
+ __m256 w_transformed_v = _mm256_mul_ps(w_vals, inverse_scale_v);
190
+ w_transformed_v =
191
+ _mm256_min_ps(w_transformed_v, _mm256_set1_ps(int32_float_max_val));
192
+
193
+ __m256i x_rounded_v = _mm256_cvtps_epi32(x_transformed_v);
194
+ __m256i y_rounded_v = _mm256_cvtps_epi32(y_transformed_v);
195
+ __m256i z_rounded_v = _mm256_cvtps_epi32(z_transformed_v);
196
+ __m256i w_rounded_v = _mm256_cvtps_epi32(w_transformed_v);
197
+
198
+ // add zero point
199
+ x_rounded_v = _mm256_add_epi32(x_rounded_v, _mm256_set1_epi32(zero_point));
200
+ y_rounded_v = _mm256_add_epi32(y_rounded_v, _mm256_set1_epi32(zero_point));
201
+ z_rounded_v = _mm256_add_epi32(z_rounded_v, _mm256_set1_epi32(zero_point));
202
+ w_rounded_v = _mm256_add_epi32(w_rounded_v, _mm256_set1_epi32(zero_point));
203
+
204
+ __m256i xy_packed_v = _mm256_packs_epi32(x_rounded_v, y_rounded_v);
205
+ __m256i zw_packed_v = _mm256_packs_epi32(z_rounded_v, w_rounded_v);
206
+ __m256i xyzw_clamped_v =
207
+ pack_saturate_and_clamp<T>(xy_packed_v, zw_packed_v, min_val, max_val);
208
+
209
+ xyzw_clamped_v =
210
+ _mm256_permutevar8x32_epi32(xyzw_clamped_v, permute_mask_v);
211
+ _mm256_storeu_si256(reinterpret_cast<__m256i*>(dst + i), xyzw_clamped_v);
212
+ }
213
+
214
+ // Additional 8-lane AVX2 version to take advantage when len is smaller
215
+ // based on fbgemm::QuantizeAvx2 (https://github.com/pytorch/FBGEMM)
216
+ for (; i < len / VLEN * VLEN; i += VLEN) {
217
+ __m256 x_vals = _mm256_load_ps(src + i);
218
+ __m256 x_transformed_v = _mm256_mul_ps(x_vals, inverse_scale_v);
219
+ x_transformed_v =
220
+ _mm256_min_ps(x_transformed_v, _mm256_set1_ps(int32_float_max_val));
221
+ __m256i x_rounded_v = _mm256_cvtps_epi32(x_transformed_v);
222
+ x_rounded_v = _mm256_add_epi32(x_rounded_v, _mm256_set1_epi32(zero_point));
223
+ __m256i x_clipped_v =
224
+ _mm256_max_epi32(min_v, _mm256_min_epi32(max_v, x_rounded_v));
225
+
226
+ x_clipped_v = _mm256_shuffle_epi8(x_clipped_v, shuffle_mask_v);
227
+ x_clipped_v = _mm256_permutevar8x32_epi32(x_clipped_v, permute_mask_l8_v);
228
+ _mm_storel_epi64(
229
+ reinterpret_cast<__m128i*>(dst + i),
230
+ _mm256_castsi256_si128(x_clipped_v));
231
+ }
232
+
233
+ for (; i < len; ++i) {
234
+ float transformed = src[i] * inverse_scale;
235
+
236
+ // Not exactly the same behavior as the vectorized code.
237
+ // The vectorized code above always rounds to even in halfway cases
238
+ // (https://software.intel.com/en-us/node/523819), but std::nearbyint
239
+ // does the same only when the current rounding mode is FE_TONEAREST.
240
+ // However, in practice, this should not be a problem because most cases
241
+ // use the default rounding mode FE_TONEAREST.
242
+ // Note that we cannot implement the same behavior as the vectorized code
243
+ // using std::round because it does rounding away from zero in halfway
244
+ // cases.
245
+ transformed = zero_point + std::nearbyint(transformed);
246
+ float clipped =
247
+ std::min(std::max(transformed, float(min_val)), float(max_val));
248
+ dst[i] = clipped;
249
+ }
250
+ }
251
+
252
+ template<>
253
+ struct Vectorized<c10::qint32> : public Vectorizedqi {
254
+ using size_type = int;
255
+ static constexpr size_type size() {
256
+ return 8;
257
+ }
258
+
259
+ static constexpr int float_num_vecs() {
260
+ return 1;
261
+ }
262
+
263
+ static constexpr int int_num_vecs() {
264
+ return 1;
265
+ }
266
+
267
+ using float_vec_return_type = std::array<Vectorized<float>, 1>;
268
+ using int_vec_return_type = std::array<Vectorized<c10::qint32>, 1>;
269
+ using value_type = c10::qint32::underlying;
270
+
271
+ public:
272
+ using Vectorizedqi::Vectorizedqi;
273
+ Vectorized() {}
274
+
275
+ Vectorized(__m256i vals_) { vals = vals_;}
276
+
277
+ // Broadcast constructor
278
+ Vectorized(const c10::qint32& val) {
279
+ value_type uw = val.val_;
280
+ vals = _mm256_set1_epi32(uw);
281
+ }
282
+
283
+ void store(void* ptr, int count = size()) const {
284
+ if (count != size()) {
285
+ memcpy(ptr, &vals, count * sizeof(value_type));
286
+ } else {
287
+ _mm256_storeu_si256((__m256i*)ptr, vals);
288
+ }
289
+ }
290
+
291
+ static Vectorized<c10::qint32> loadu(const void* ptr) {
292
+ return Vectorized<c10::qint32>(ptr);
293
+ }
294
+
295
+ static Vectorized<c10::qint32> loadu(const void* ptr, int64_t count) {
296
+ __at_align__ value_type tmp_values[size()];
297
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
298
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
299
+ // instructions while a loop would be compiled to one instruction.
300
+ for (const auto i : c10::irange(size())) {
301
+ tmp_values[i] = 0;
302
+ }
303
+ std::memcpy(
304
+ tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
305
+ return _mm256_loadu_si256((const __m256i*)tmp_values);
306
+ }
307
+
308
+ float_vec_return_type dequantize(
309
+ Vectorized<float> scale,
310
+ Vectorized<float> /*zero_point*/,
311
+ Vectorized<float> scale_zp_premul) const {
312
+ __m256 float_vals = _mm256_cvtepi32_ps(vals);
313
+ return {vec::fmadd(scale, Vectorized<float>(float_vals), scale_zp_premul)};
314
+ }
315
+
316
+ float_vec_return_type dequantize(
317
+ Vectorized<float> scale,
318
+ Vectorized<float> zero_point) const {
319
+ __m256 float_vals = _mm256_cvtepi32_ps(vals);
320
+ return {(Vectorized<float>(float_vals) - zero_point) * scale};
321
+ }
322
+
323
+ static Vectorized<c10::qint32> quantize(
324
+ const float_vec_return_type& rhs,
325
+ float scale,
326
+ int32_t zero_point,
327
+ float /*inverse_scale*/) {
328
+ Vectorized<c10::qint32> retval;
329
+ auto rhs_data = (__m256)rhs[0];
330
+ at::native::quantize_vec<c10::qint32, /*precision=*/32>(
331
+ scale, zero_point, (float*)&rhs_data, (c10::qint32*)&retval.vals, 8);
332
+ return retval;
333
+ }
334
+
335
+ Vectorized<c10::qint32> maximum(Vectorized<c10::qint32> b) const {
336
+ return _mm256_max_epi32(vals, b.vals);
337
+ }
338
+
339
+ Vectorized<c10::qint32> minimum(Vectorized<c10::qint32> b) const {
340
+ return _mm256_min_epi32(vals, b.vals);
341
+ }
342
+
343
+ Vectorized<c10::qint32> relu(Vectorized<c10::qint32> zero_point) const {
344
+ return maximum(zero_point);
345
+ }
346
+
347
+ Vectorized<c10::qint32> relu6(
348
+ Vectorized<c10::qint32> zero_point,
349
+ Vectorized<c10::qint32> q_six) {
350
+ return _mm256_min_epi32(
351
+ _mm256_max_epi32(vals, zero_point.vals), q_six.vals);
352
+ }
353
+
354
+ int_vec_return_type widening_subtract(Vectorized<c10::qint32> b) const {
355
+ return {_mm256_sub_epi32(vals, b)};
356
+ }
357
+
358
+ static Vectorized<c10::qint32> requantize_from_int(
359
+ const int_vec_return_type& inp,
360
+ float multiplier,
361
+ int32_t zero_point) {
362
+ __m256 multiplier_v = _mm256_set1_ps(multiplier);
363
+ __m256i zero_point_v = _mm256_set1_epi32(zero_point);
364
+
365
+ __m256 scaled = _mm256_mul_ps(_mm256_cvtepi32_ps(inp[0]), multiplier_v);
366
+ __m256i rounded = _mm256_cvtps_epi32(scaled);
367
+ return _mm256_add_epi32(rounded, zero_point_v);
368
+ }
369
+
370
+ private:
371
+ // Load from memory constructor
372
+ Vectorized(const void* ptr) {
373
+ vals = _mm256_loadu_si256((const __m256i*)ptr);
374
+ }
375
+ };
376
+
377
+ template <>
378
+ Vectorized<c10::qint32> inline maximum(const Vectorized<c10::qint32>& a, const Vectorized<c10::qint32>& b) {
379
+ return a.maximum(b);
380
+ }
381
+
382
+ template <>
383
+ Vectorized<c10::qint32> inline operator*(
384
+ const Vectorized<c10::qint32>& a,
385
+ const Vectorized<c10::qint32>& b) {
386
+ return _mm256_mullo_epi32(a, b);
387
+ }
388
+
389
+ template <>
390
+ Vectorized<c10::qint32> inline operator+(
391
+ const Vectorized<c10::qint32>& a,
392
+ const Vectorized<c10::qint32>& b) {
393
+ return _mm256_add_epi32(a, b);
394
+ }
395
+
396
+ /*
397
+ * Convert values from int32 back to int8/uint8
398
+ */
399
+ template <typename T>
400
+ __m256i RequantizeAvx2(
401
+ const std::array<Vectorized<c10::qint32>, 4>& inp,
402
+ __m256 multiplier,
403
+ __m256i zp) {
404
+ static_assert(
405
+ std::is_same<T, int8_t>::value || std::is_same<T, uint8_t>::value,
406
+ "Only int8_t/uint8_t are supported");
407
+ constexpr auto min_val = std::numeric_limits<T>::min();
408
+ constexpr auto max_val = std::numeric_limits<T>::max();
409
+ __m256i permute_mask_v =
410
+ _mm256_set_epi32(0x07, 0x03, 0x06, 0x02, 0x05, 0x01, 0x04, 0x00);
411
+ __m256 x_scaled_v = _mm256_mul_ps(_mm256_cvtepi32_ps(inp[0]), multiplier);
412
+ __m256 y_scaled_v = _mm256_mul_ps(_mm256_cvtepi32_ps(inp[1]), multiplier);
413
+ __m256 z_scaled_v = _mm256_mul_ps(_mm256_cvtepi32_ps(inp[2]), multiplier);
414
+ __m256 w_scaled_v = _mm256_mul_ps(_mm256_cvtepi32_ps(inp[3]), multiplier);
415
+
416
+ __m256i x_rounded_v = _mm256_cvtps_epi32(x_scaled_v);
417
+ __m256i y_rounded_v = _mm256_cvtps_epi32(y_scaled_v);
418
+ __m256i z_rounded_v = _mm256_cvtps_epi32(z_scaled_v);
419
+ __m256i w_rounded_v = _mm256_cvtps_epi32(w_scaled_v);
420
+
421
+ /* Add zero point */
422
+ __m256i x_v = _mm256_add_epi32(x_rounded_v, zp);
423
+ __m256i y_v = _mm256_add_epi32(y_rounded_v, zp);
424
+ __m256i z_v = _mm256_add_epi32(z_rounded_v, zp);
425
+ __m256i w_v = _mm256_add_epi32(w_rounded_v, zp);
426
+
427
+ /* Pack to int16_t and saturate */
428
+ __m256i xy_packed_v = _mm256_packs_epi32(x_v, y_v);
429
+ __m256i zw_packed_v = _mm256_packs_epi32(z_v, w_v);
430
+
431
+ __m256i xyzw_clamped_v =
432
+ pack_saturate_and_clamp<T>(xy_packed_v, zw_packed_v, min_val, max_val);
433
+
434
+ /*
435
+ * xyzw_clamped_v has results in the following layout so we need to
436
+ * permute: x0-3 y0-3 z0-3 w0-3 x4-7 y4-7 z4-7 w4-7
437
+ */
438
+ xyzw_clamped_v = _mm256_permutevar8x32_epi32(xyzw_clamped_v, permute_mask_v);
439
+ return xyzw_clamped_v;
440
+ }
441
+
442
+ template<>
443
+ struct Vectorized<c10::qint8> : public Vectorizedqi {
444
+ static constexpr int size() {
445
+ return 32;
446
+ }
447
+
448
+ static constexpr int float_num_vecs() {
449
+ return 4;
450
+ }
451
+
452
+ static constexpr int int_num_vecs() {
453
+ return 4;
454
+ }
455
+
456
+ using float_vec_return_type = std::array<Vectorized<float>, 4>;
457
+ using int_vec_return_type = std::array<Vectorized<c10::qint32>, 4>;
458
+ using value_type = typename c10::qint8::underlying;
459
+
460
+ public:
461
+ using Vectorizedqi::Vectorizedqi;
462
+
463
+ Vectorized() {}
464
+ Vectorized(__m256i vals_) { vals = vals_;}
465
+
466
+ // Broadcast constructor
467
+ Vectorized(const c10::qint8& val) {
468
+ value_type uw = val.val_;
469
+ vals = _mm256_set1_epi8(uw);
470
+ }
471
+
472
+ // This is needed because the compiler emits awful code for the default
473
+ // constructor for moving the enum
474
+ // NOLINTNEXTLINE(clang-diagnostic-deprecated-copy)
475
+ C10_CLANG_DIAGNOSTIC_PUSH()
476
+ #if C10_CLANG_HAS_WARNING("-Wdeprecated-copy")
477
+ C10_CLANG_DIAGNOSTIC_IGNORE("-Wdeprecated-copy")
478
+ #endif
479
+ Vectorized(const Vectorized<c10::qint8>& other) : Vectorizedqi(other.vals) { }
480
+ C10_CLANG_DIAGNOSTIC_POP()
481
+
482
+ void store(void* ptr, int count = size()) const {
483
+ if (count != size()) {
484
+ memcpy(ptr, &vals, count * sizeof(value_type));
485
+ } else {
486
+ _mm256_storeu_si256((__m256i*)ptr, vals);
487
+ }
488
+ }
489
+
490
+ static Vectorized<c10::qint8> loadu(const void* ptr) {
491
+ return Vectorized<c10::qint8>(ptr);
492
+ }
493
+
494
+ static Vectorized<c10::qint8> loadu(const void* ptr, int64_t count) {
495
+ __at_align__ value_type tmp_values[size()];
496
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
497
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
498
+ // instructions while a loop would be compiled to one instruction.
499
+ for (const auto i : c10::irange(size())) {
500
+ tmp_values[i] = 0;
501
+ }
502
+ std::memcpy(
503
+ tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
504
+ return _mm256_loadu_si256((const __m256i*)tmp_values);
505
+ }
506
+
507
+ private:
508
+ __m256i cvtepi8_epi32(__m128i epi8_vals) const {
509
+ return _mm256_cvtepi8_epi32(epi8_vals);
510
+ }
511
+
512
+ public:
513
+ float_vec_return_type dequantize(
514
+ Vectorized<float> scale,
515
+ Vectorized<float> /*zero_point*/,
516
+ Vectorized<float> scale_neg_zp_premul) const {
517
+ __m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0));
518
+ __m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1));
519
+ __m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2));
520
+ __m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3));
521
+
522
+ __m256 float_val0 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val0));
523
+ __m256 float_val1 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val1));
524
+ __m256 float_val2 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val2));
525
+ __m256 float_val3 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val3));
526
+
527
+ auto val0 =
528
+ vec::fmadd(scale, Vectorized<float>(float_val0), scale_neg_zp_premul);
529
+ auto val1 =
530
+ vec::fmadd(scale, Vectorized<float>(float_val1), scale_neg_zp_premul);
531
+ auto val2 =
532
+ vec::fmadd(scale, Vectorized<float>(float_val2), scale_neg_zp_premul);
533
+ auto val3 =
534
+ vec::fmadd(scale, Vectorized<float>(float_val3), scale_neg_zp_premul);
535
+ return {val0, val1, val2, val3};
536
+ }
537
+
538
+ float_vec_return_type dequantize(
539
+ Vectorized<float> scale,
540
+ Vectorized<float> zero_point) const {
541
+ __m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0));
542
+ __m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1));
543
+ __m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2));
544
+ __m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3));
545
+
546
+ __m256 float_val0 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val0));
547
+ __m256 float_val1 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val1));
548
+ __m256 float_val2 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val2));
549
+ __m256 float_val3 = _mm256_cvtepi32_ps(cvtepi8_epi32(int_val3));
550
+
551
+ auto val0 = (Vectorized<float>(float_val0) - zero_point) * scale;
552
+ auto val1 = (Vectorized<float>(float_val1) - zero_point) * scale;
553
+ auto val2 = (Vectorized<float>(float_val2) - zero_point) * scale;
554
+ auto val3 = (Vectorized<float>(float_val3) - zero_point) * scale;
555
+ return {val0, val1, val2, val3};
556
+ }
557
+
558
+ static Vectorized<c10::qint8> quantize(
559
+ const float_vec_return_type& rhs,
560
+ float /*scale*/,
561
+ int32_t zero_point,
562
+ float inverse_scale) {
563
+ auto* rhs_data = (float*)rhs.data();
564
+ int8_t quantized_values[32];
565
+ QuantizeAvx2<value_type>(
566
+ rhs_data, quantized_values, 32, inverse_scale, zero_point);
567
+ return Vectorized<c10::qint8>::loadu(quantized_values);
568
+ }
569
+
570
+ Vectorized<c10::qint8> maximum(Vectorized<c10::qint8> b) const {
571
+ return _mm256_max_epi8(vals, b.vals);
572
+ }
573
+
574
+ Vectorized<c10::qint8> minimum(Vectorized<c10::qint8> b) const {
575
+ return _mm256_min_epi8(vals, b.vals);
576
+ }
577
+
578
+ Vectorized<c10::qint8> relu(Vectorized<c10::qint8> zero_point) const {
579
+ return maximum(zero_point);
580
+ }
581
+
582
+ Vectorized<c10::qint8> relu6(
583
+ Vectorized<c10::qint8> zero_point,
584
+ Vectorized<c10::qint8> q_six) {
585
+ return _mm256_min_epi8(
586
+ _mm256_max_epi8(vals, zero_point.vals), q_six.vals);
587
+ }
588
+
589
+ int_vec_return_type widening_subtract(Vectorized<c10::qint8> b) const {
590
+ __m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0));
591
+ __m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1));
592
+ __m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2));
593
+ __m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3));
594
+
595
+ __m256i int32_val0 = cvtepi8_epi32(int_val0);
596
+ __m256i int32_val1 = cvtepi8_epi32(int_val1);
597
+ __m256i int32_val2 = cvtepi8_epi32(int_val2);
598
+ __m256i int32_val3 = cvtepi8_epi32(int_val3);
599
+
600
+ __m128i int_b0 = _mm_set1_epi64x(_mm256_extract_epi64(b, 0));
601
+ __m128i int_b1 = _mm_set1_epi64x(_mm256_extract_epi64(b, 1));
602
+ __m128i int_b2 = _mm_set1_epi64x(_mm256_extract_epi64(b, 2));
603
+ __m128i int_b3 = _mm_set1_epi64x(_mm256_extract_epi64(b, 3));
604
+
605
+ __m256i int32_b0 = cvtepi8_epi32(int_b0);
606
+ __m256i int32_b1 = cvtepi8_epi32(int_b1);
607
+ __m256i int32_b2 = cvtepi8_epi32(int_b2);
608
+ __m256i int32_b3 = cvtepi8_epi32(int_b3);
609
+
610
+ __m256i res_0 = _mm256_sub_epi32(int32_val0, int32_b0);
611
+ __m256i res_1 = _mm256_sub_epi32(int32_val1, int32_b1);
612
+ __m256i res_2 = _mm256_sub_epi32(int32_val2, int32_b2);
613
+ __m256i res_3 = _mm256_sub_epi32(int32_val3, int32_b3);
614
+
615
+ return {Vectorized<c10::qint32>(res_0),
616
+ Vectorized<c10::qint32>(res_1),
617
+ Vectorized<c10::qint32>(res_2),
618
+ Vectorized<c10::qint32>(res_3)};
619
+ }
620
+
621
+ static Vectorized<c10::qint8> requantize_from_int(
622
+ const int_vec_return_type& inp,
623
+ float multiplier,
624
+ int32_t zero_point) {
625
+ __m256 multiplier_v = _mm256_set1_ps(multiplier);
626
+ __m256i zero_point_v = _mm256_set1_epi32(zero_point);
627
+ return RequantizeAvx2<value_type>(inp, multiplier_v, zero_point_v);
628
+ }
629
+
630
+ private:
631
+ // Load from memory constructor
632
+ Vectorized(const void* ptr) {
633
+ vals = _mm256_loadu_si256((const __m256i*)ptr);
634
+ }
635
+ };
636
+
637
+ template <>
638
+ Vectorized<c10::qint8> inline maximum(const Vectorized<c10::qint8>& a, const Vectorized<c10::qint8>& b) {
639
+ return a.maximum(b);
640
+ }
641
+
642
+ template<>
643
+ struct Vectorized<c10::quint8> : public Vectorizedqi {
644
+ static constexpr int size() {
645
+ return 32;
646
+ }
647
+
648
+ static constexpr int float_num_vecs() {
649
+ return 4;
650
+ }
651
+
652
+ static constexpr int int_num_vecs() {
653
+ return 4;
654
+ }
655
+
656
+ using float_vec_return_type = std::array<Vectorized<float>, 4>;
657
+ using int_vec_return_type = std::array<Vectorized<c10::qint32>, 4>;
658
+ using value_type = typename c10::quint8::underlying;
659
+
660
+ public:
661
+ using Vectorizedqi::Vectorizedqi;
662
+ Vectorized() {}
663
+
664
+ Vectorized(__m256i vals_) { vals = vals_;}
665
+
666
+ // Broadcast constructor
667
+ Vectorized(const c10::quint8& val) {
668
+ value_type uw = val.val_;
669
+ vals = _mm256_set1_epi8(uw);
670
+ }
671
+
672
+ // NOLINTNEXTLINE(clang-diagnostic-deprecated-copy)
673
+ C10_CLANG_DIAGNOSTIC_PUSH()
674
+ #if C10_CLANG_HAS_WARNING("-Wdeprecated-copy")
675
+ C10_CLANG_DIAGNOSTIC_IGNORE("-Wdeprecated-copy")
676
+ #endif
677
+ Vectorized(const Vectorized<c10::quint8>& other) : Vectorizedqi(other.vals) { }
678
+ C10_CLANG_DIAGNOSTIC_POP()
679
+
680
+ void store(void* ptr, int count = size()) const {
681
+ if (count != size()) {
682
+ memcpy(ptr, &vals, count * sizeof(value_type));
683
+ } else {
684
+ _mm256_storeu_si256((__m256i*)ptr, vals);
685
+ }
686
+ }
687
+
688
+ static Vectorized<c10::quint8> loadu(const void* ptr) {
689
+ return Vectorized<c10::quint8>(ptr);
690
+ }
691
+
692
+ static Vectorized<c10::quint8> loadu(const void* ptr, int64_t count) {
693
+ __at_align__ value_type tmp_values[size()];
694
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
695
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
696
+ // instructions while a loop would be compiled to one instruction.
697
+ for (const auto i : c10::irange(size())) {
698
+ tmp_values[i] = 0;
699
+ }
700
+ std::memcpy(
701
+ tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
702
+ return _mm256_loadu_si256((const __m256i*)tmp_values);
703
+ }
704
+
705
+ private:
706
+ __m256i cvtepu8_epi32(__m128i epu8_vals) const {
707
+ return _mm256_cvtepu8_epi32(epu8_vals);
708
+ }
709
+
710
+ public:
711
+ float_vec_return_type dequantize(
712
+ Vectorized<float> scale,
713
+ Vectorized<float> /*zero_point*/,
714
+ Vectorized<float> scale_zp_premul) const {
715
+ __m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0));
716
+ __m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1));
717
+ __m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2));
718
+ __m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3));
719
+
720
+ __m256 float_val0 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val0));
721
+ __m256 float_val1 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val1));
722
+ __m256 float_val2 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val2));
723
+ __m256 float_val3 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val3));
724
+
725
+ auto val0 =
726
+ vec::fmadd(scale, Vectorized<float>(float_val0), scale_zp_premul);
727
+ auto val1 =
728
+ vec::fmadd(scale, Vectorized<float>(float_val1), scale_zp_premul);
729
+ auto val2 =
730
+ vec::fmadd(scale, Vectorized<float>(float_val2), scale_zp_premul);
731
+ auto val3 =
732
+ vec::fmadd(scale, Vectorized<float>(float_val3), scale_zp_premul);
733
+ return {val0, val1, val2, val3};
734
+ }
735
+
736
+ float_vec_return_type dequantize(
737
+ Vectorized<float> scale,
738
+ Vectorized<float> zero_point) const {
739
+ __m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0));
740
+ __m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1));
741
+ __m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2));
742
+ __m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3));
743
+
744
+ __m256 float_val0 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val0));
745
+ __m256 float_val1 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val1));
746
+ __m256 float_val2 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val2));
747
+ __m256 float_val3 = _mm256_cvtepi32_ps(cvtepu8_epi32(int_val3));
748
+
749
+ auto val0 = (Vectorized<float>(float_val0) - zero_point) * scale;
750
+ auto val1 = (Vectorized<float>(float_val1) - zero_point) * scale;
751
+ auto val2 = (Vectorized<float>(float_val2) - zero_point) * scale;
752
+ auto val3 = (Vectorized<float>(float_val3) - zero_point) * scale;
753
+ return {val0, val1, val2, val3};
754
+ }
755
+
756
+ static Vectorized<c10::quint8> quantize(
757
+ const float_vec_return_type& rhs,
758
+ float /*scale*/,
759
+ int32_t zero_point,
760
+ float inverse_scale) {
761
+ auto* rhs_data = (float*)rhs.data();
762
+ uint8_t quantized_values[32];
763
+ QuantizeAvx2<value_type>(
764
+ rhs_data, quantized_values, 32, inverse_scale, zero_point);
765
+ return Vectorized<c10::quint8>::loadu(quantized_values);
766
+ }
767
+
768
+ Vectorized<c10::quint8> maximum(Vectorized<c10::quint8> b) const {
769
+ return _mm256_max_epu8(vals, b.vals);
770
+ }
771
+
772
+ Vectorized<c10::quint8> minimum(Vectorized<c10::quint8> b) const {
773
+ return _mm256_min_epu8(vals, b.vals);
774
+ }
775
+
776
+ Vectorized<c10::quint8> relu(Vectorized<c10::quint8> zero_point) const {
777
+ return maximum(zero_point);
778
+ }
779
+
780
+ Vectorized<c10::quint8> relu6(
781
+ Vectorized<c10::quint8> zero_point,
782
+ Vectorized<c10::quint8> q_six) {
783
+ return _mm256_min_epu8(
784
+ _mm256_max_epu8(vals, zero_point.vals), q_six.vals);
785
+ }
786
+
787
+ int_vec_return_type widening_subtract(Vectorized<c10::quint8> b) const {
788
+ __m128i int_val0 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 0));
789
+ __m128i int_val1 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 1));
790
+ __m128i int_val2 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 2));
791
+ __m128i int_val3 = _mm_set1_epi64x(_mm256_extract_epi64(vals, 3));
792
+
793
+ __m256i int32_val0 = cvtepu8_epi32(int_val0);
794
+ __m256i int32_val1 = cvtepu8_epi32(int_val1);
795
+ __m256i int32_val2 = cvtepu8_epi32(int_val2);
796
+ __m256i int32_val3 = cvtepu8_epi32(int_val3);
797
+
798
+ __m128i int_b0 = _mm_set1_epi64x(_mm256_extract_epi64(b, 0));
799
+ __m128i int_b1 = _mm_set1_epi64x(_mm256_extract_epi64(b, 1));
800
+ __m128i int_b2 = _mm_set1_epi64x(_mm256_extract_epi64(b, 2));
801
+ __m128i int_b3 = _mm_set1_epi64x(_mm256_extract_epi64(b, 3));
802
+
803
+ __m256i int32_b0 = cvtepu8_epi32(int_b0);
804
+ __m256i int32_b1 = cvtepu8_epi32(int_b1);
805
+ __m256i int32_b2 = cvtepu8_epi32(int_b2);
806
+ __m256i int32_b3 = cvtepu8_epi32(int_b3);
807
+
808
+ __m256i res_0 = _mm256_sub_epi32(int32_val0, int32_b0);
809
+ __m256i res_1 = _mm256_sub_epi32(int32_val1, int32_b1);
810
+ __m256i res_2 = _mm256_sub_epi32(int32_val2, int32_b2);
811
+ __m256i res_3 = _mm256_sub_epi32(int32_val3, int32_b3);
812
+ return {Vectorized<c10::qint32>(res_0),
813
+ Vectorized<c10::qint32>(res_1),
814
+ Vectorized<c10::qint32>(res_2),
815
+ Vectorized<c10::qint32>(res_3)};
816
+ }
817
+
818
+ static Vectorized<c10::quint8> requantize_from_int(
819
+ const int_vec_return_type& inp,
820
+ float multiplier,
821
+ int32_t zero_point) {
822
+ __m256 multiplier_v = _mm256_set1_ps(multiplier);
823
+ __m256i zero_point_v = _mm256_set1_epi32(zero_point);
824
+ return RequantizeAvx2<value_type>(inp, multiplier_v, zero_point_v);
825
+ }
826
+
827
+ private:
828
+
829
+ // Load from memory constructor
830
+ Vectorized(const void* ptr) {
831
+ vals = _mm256_loadu_si256((const __m256i*)ptr);
832
+ }
833
+ };
834
+
835
+ template <>
836
+ Vectorized<c10::quint8> inline maximum(const Vectorized<c10::quint8>& a, const Vectorized<c10::quint8>& b) {
837
+ return a.maximum(b);
838
+ }
839
+
840
+ #else
841
+
842
+ // NOTE: These are low-performance implementations that we fall back on
843
+ // if we are not building with AVX2. This may not be an issue, because
844
+ // currently for quantization we assume the user has at least AVX512
845
+ // installed, so these can simply act as a reference implementation.
846
+ //
847
+ // If in the future we relax this requirement (AVX2+), we should probably
848
+ // revisit these implementations
849
+
850
+ template <
851
+ typename T,
852
+ typename float_vec_return_type_,
853
+ typename int_vec_return_type_,
854
+ int size_>
855
+ struct VectorizedQuantizedConverter {
856
+ static constexpr int size() {
857
+ return size_;
858
+ }
859
+
860
+ static constexpr int float_num_vecs() {
861
+ return size() / 8;
862
+ }
863
+
864
+ static constexpr int int_num_vecs() {
865
+ return size() / 8;
866
+ }
867
+
868
+ using float_vec_return_type = float_vec_return_type_;
869
+ using int_vec_return_type = int_vec_return_type_;
870
+
871
+ using value_type = typename T::underlying;
872
+ std::array<value_type, size_> vals;
873
+
874
+ VectorizedQuantizedConverter(T val) {
875
+ for (const auto i : c10::irange(size())) {
876
+ vals[i] = val.val_;
877
+ }
878
+ }
879
+
880
+ VectorizedQuantizedConverter(const void* ptr) {
881
+ memcpy(vals.data(), ptr, sizeof(value_type) * size());
882
+ }
883
+
884
+ void store(void* ptr, int count = size()) const {
885
+ memcpy(ptr, vals.data(), count * sizeof(value_type));
886
+ }
887
+
888
+ float_vec_return_type dequantize(
889
+ Vectorized<float> scale,
890
+ Vectorized<float> zero_point,
891
+ Vectorized<float> /*scale_zp_premul*/) const {
892
+ float_vec_return_type rv;
893
+ for (const auto i : c10::irange(float_num_vecs())) {
894
+ float tmp_vals[8];
895
+ for (const auto j : c10::irange(8)) {
896
+ tmp_vals[j] = at::native::dequantize_val<T>(
897
+ scale[j], zero_point[j], T(vals[8 * i + j]));
898
+ }
899
+ rv[i] = Vectorized<float>(tmp_vals[0],
900
+ tmp_vals[1],
901
+ tmp_vals[2],
902
+ tmp_vals[3],
903
+ tmp_vals[4],
904
+ tmp_vals[5],
905
+ tmp_vals[6],
906
+ tmp_vals[7]);
907
+ }
908
+ return rv;
909
+ }
910
+
911
+ float_vec_return_type dequantize(
912
+ Vectorized<float> scale,
913
+ Vectorized<float> zero_point) const {
914
+ Vectorized<float> scale_zp_premul;
915
+ return dequantize(scale, zero_point, scale_zp_premul);
916
+ }
917
+
918
+ protected:
919
+ VectorizedQuantizedConverter() {}
920
+ };
921
+
922
+ template <>
923
+ struct Vectorized<c10::qint32> : public VectorizedQuantizedConverter<
924
+ c10::qint32,
925
+ std::array<Vectorized<float>, 1>,
926
+ std::array<Vectorized<c10::qint32>, 1>,
927
+ 8> {
928
+ Vectorized()
929
+ : VectorizedQuantizedConverter<
930
+ c10::qint32,
931
+ std::array<Vectorized<float>, 1>,
932
+ std::array<Vectorized<c10::qint32>, 1>,
933
+ 8>() {}
934
+ Vectorized(c10::qint32 val)
935
+ : VectorizedQuantizedConverter<
936
+ c10::qint32,
937
+ std::array<Vectorized<float>, 1>,
938
+ std::array<Vectorized<c10::qint32>, 1>,
939
+ 8>(val) {}
940
+ Vectorized(const void* ptr)
941
+ : VectorizedQuantizedConverter<
942
+ c10::qint32,
943
+ std::array<Vectorized<float>, 1>,
944
+ std::array<Vectorized<c10::qint32>, 1>,
945
+ 8>(ptr) {}
946
+
947
+ static Vectorized<c10::qint32> loadu(const void* ptr) {
948
+ return Vectorized<c10::qint32>(ptr);
949
+ }
950
+
951
+ static Vectorized<c10::qint32> loadu(const void* ptr, int64_t count) {
952
+ __at_align__ value_type tmp_values[size()];
953
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
954
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
955
+ // instructions while a loop would be compiled to one instruction.
956
+ for (const auto i : c10::irange(size())) {
957
+ tmp_values[i] = 0;
958
+ }
959
+ std::memcpy(
960
+ tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
961
+ return Vectorized<c10::qint32>(tmp_values);
962
+ }
963
+
964
+ static Vectorized<c10::qint32> quantize(
965
+ const float_vec_return_type& rhs,
966
+ float scale,
967
+ int32_t zero_point,
968
+ float /*inverse_scale*/) {
969
+ std::array<value_type, size()> qvals;
970
+ std::array<float, float_num_vecs() * 8> float_vals;
971
+
972
+ for (const auto i : c10::irange(float_num_vecs())) {
973
+ rhs[i].store(&float_vals[i * 8], 8);
974
+ }
975
+
976
+ at::native::quantize_vec<c10::qint32, /*precision=*/32>(
977
+ scale,
978
+ zero_point,
979
+ float_vals.data(),
980
+ (c10::qint32*)qvals.data(),
981
+ 8 * float_num_vecs());
982
+
983
+ return Vectorized<c10::qint32>::loadu(qvals.data());
984
+ }
985
+
986
+ Vectorized<c10::qint32> maximum(Vectorized<c10::qint32> b) const {
987
+ Vectorized<c10::qint32> retval;
988
+ for (const auto i : c10::irange(size())) {
989
+ retval.vals[i] = std::max<value_type>(vals[i], b.vals[i]);
990
+ }
991
+ return retval;
992
+ }
993
+
994
+ Vectorized<c10::qint32> minimum(Vectorized<c10::qint32> b) const {
995
+ Vectorized<c10::qint32> retval;
996
+ for (const auto i : c10::irange(size())) {
997
+ retval.vals[i] = std::min<value_type>(vals[i], b.vals[i]);
998
+ }
999
+ return retval;
1000
+ }
1001
+
1002
+ Vectorized<c10::qint32> relu(Vectorized<c10::qint32> zero_point) const {
1003
+ return maximum(zero_point);
1004
+ }
1005
+
1006
+
1007
+ Vectorized<c10::qint32> relu6(
1008
+ Vectorized<c10::qint32> zero_point,
1009
+ Vectorized<c10::qint32> q_six) {
1010
+ Vectorized<c10::qint32> retval;
1011
+ for (const auto i : c10::irange(size())) {
1012
+ retval.vals[i] = std::min<value_type>(
1013
+ std::max<value_type>(vals[i], zero_point.vals[i]), q_six.vals[i]);
1014
+ }
1015
+ return retval;
1016
+ }
1017
+
1018
+ int_vec_return_type widening_subtract(Vectorized<c10::qint32> b) const {
1019
+ int_vec_return_type retval;
1020
+ for (const auto i : c10::irange(size())) {
1021
+ retval[0].vals[i] = vals[i] - b.vals[i];
1022
+ }
1023
+ return retval;
1024
+ }
1025
+
1026
+ static Vectorized<c10::qint32> requantize_from_int(
1027
+ const int_vec_return_type& inp,
1028
+ float multiplier,
1029
+ int32_t zero_point) {
1030
+ Vectorized<c10::qint32> retval;
1031
+ for (const auto i : c10::irange(size())) {
1032
+ retval.vals[i] =
1033
+ std::nearbyint(static_cast<float>(inp[0].vals[i]) * multiplier) +
1034
+ zero_point;
1035
+ }
1036
+ return retval;
1037
+ }
1038
+ };
1039
+
1040
+ template <>
1041
+ Vectorized<c10::qint32> inline maximum(const Vectorized<c10::qint32>& a, const Vectorized<c10::qint32>& b) {
1042
+ return a.maximum(b);
1043
+ }
1044
+
1045
+ template <>
1046
+ Vectorized<c10::qint32> inline operator*(
1047
+ const Vectorized<c10::qint32>& a,
1048
+ const Vectorized<c10::qint32>& b) {
1049
+ Vectorized<c10::qint32> retval;
1050
+ for (const auto i : c10::irange(std::decay_t<decltype(a)>::size())) {
1051
+ retval.vals[i] = a.vals[i] * b.vals[i];
1052
+ }
1053
+ return retval;
1054
+ }
1055
+
1056
+ template <>
1057
+ Vectorized<c10::qint32> inline operator+(
1058
+ const Vectorized<c10::qint32>& a,
1059
+ const Vectorized<c10::qint32>& b) {
1060
+ Vectorized<c10::qint32> retval;
1061
+ for (const auto i : c10::irange(std::decay_t<decltype(a)>::size())) {
1062
+ retval.vals[i] = a.vals[i] + b.vals[i];
1063
+ }
1064
+ return retval;
1065
+ }
1066
+
1067
+ template <>
1068
+ struct Vectorized<c10::qint8> : public VectorizedQuantizedConverter<
1069
+ c10::qint8,
1070
+ std::array<Vectorized<float>, 4>,
1071
+ std::array<Vectorized<c10::qint32>, 4>,
1072
+ 32> {
1073
+ Vectorized()
1074
+ : VectorizedQuantizedConverter<
1075
+ c10::qint8,
1076
+ std::array<Vectorized<float>, 4>,
1077
+ std::array<Vectorized<c10::qint32>, 4>,
1078
+ 32>() {}
1079
+ Vectorized(c10::qint8 val)
1080
+ : VectorizedQuantizedConverter<
1081
+ c10::qint8,
1082
+ std::array<Vectorized<float>, 4>,
1083
+ std::array<Vectorized<c10::qint32>, 4>,
1084
+ 32>(val) {}
1085
+ Vectorized(const void* ptr)
1086
+ : VectorizedQuantizedConverter<
1087
+ c10::qint8,
1088
+ std::array<Vectorized<float>, 4>,
1089
+ std::array<Vectorized<c10::qint32>, 4>,
1090
+ 32>(ptr) {}
1091
+
1092
+ static Vectorized<c10::qint8> loadu(const void* ptr) {
1093
+ return Vectorized<c10::qint8>(ptr);
1094
+ }
1095
+
1096
+ static Vectorized<c10::qint8> loadu(const void* ptr, int64_t count) {
1097
+ __at_align__ value_type tmp_values[size()];
1098
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
1099
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
1100
+ // instructions while a loop would be compiled to one instruction.
1101
+ for (const auto i : c10::irange(size())) {
1102
+ tmp_values[i] = 0;
1103
+ }
1104
+ std::memcpy(
1105
+ tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
1106
+ return Vectorized<c10::qint8>(tmp_values);
1107
+ }
1108
+
1109
+ static Vectorized<c10::qint8> quantize(
1110
+ const float_vec_return_type& rhs,
1111
+ float scale,
1112
+ int32_t zero_point,
1113
+ float /*inverse_scale*/) {
1114
+ std::array<value_type, size()> qvals;
1115
+ std::array<float, float_num_vecs() * 8> float_vals;
1116
+
1117
+ for (const auto i : c10::irange(float_num_vecs())) {
1118
+ rhs[i].store(&float_vals[i * 8], 8);
1119
+ }
1120
+
1121
+ at::native::quantize_vec<c10::qint8>(
1122
+ scale,
1123
+ zero_point,
1124
+ float_vals.data(),
1125
+ (c10::qint8*)qvals.data(),
1126
+ 8 * float_num_vecs());
1127
+
1128
+ return Vectorized<c10::qint8>::loadu(qvals.data());
1129
+ }
1130
+
1131
+ Vectorized<c10::qint8> maximum(Vectorized<c10::qint8> b) const {
1132
+ Vectorized<c10::qint8> retval;
1133
+ for (const auto i : c10::irange(size())) {
1134
+ retval.vals[i] = std::max<value_type>(vals[i], b.vals[i]);
1135
+ }
1136
+ return retval;
1137
+ }
1138
+
1139
+ Vectorized<c10::qint8> minimum(Vectorized<c10::qint8> b) const {
1140
+ Vectorized<c10::qint8> retval;
1141
+ for (const auto i : c10::irange(size())) {
1142
+ retval.vals[i] = std::min<value_type>(vals[i], b.vals[i]);
1143
+ }
1144
+ return retval;
1145
+ }
1146
+
1147
+ Vectorized<c10::qint8> relu(Vectorized<c10::qint8> zero_point) const {
1148
+ return maximum(zero_point);
1149
+ }
1150
+
1151
+ Vectorized<c10::qint8> relu6(
1152
+ Vectorized<c10::qint8> zero_point,
1153
+ Vectorized<c10::qint8> q_six) {
1154
+ Vectorized<c10::qint8> retval;
1155
+ for (const auto i : c10::irange(size())) {
1156
+ retval.vals[i] = std::min<value_type>(
1157
+ std::max<value_type>(vals[i], zero_point.vals[i]), q_six.vals[i]);
1158
+ }
1159
+ return retval;
1160
+ }
1161
+
1162
+ int_vec_return_type widening_subtract(Vectorized<c10::qint8> b) const {
1163
+ int_vec_return_type retval;
1164
+ constexpr int elem_per_int_vec = size() / int_num_vecs();
1165
+ for (const auto i : c10::irange(int_num_vecs())) {
1166
+ for (const auto j : c10::irange(elem_per_int_vec)) {
1167
+ retval[i].vals[j] =
1168
+ static_cast<int32_t>(vals[i * elem_per_int_vec + j]) -
1169
+ static_cast<int32_t>(b.vals[i * elem_per_int_vec + j]);
1170
+ }
1171
+ }
1172
+ return retval;
1173
+ }
1174
+ static Vectorized<c10::qint8> requantize_from_int(
1175
+ const int_vec_return_type& inp,
1176
+ float multiplier,
1177
+ int32_t zero_point) {
1178
+ constexpr int elem_per_int_vec = size() / int_num_vecs();
1179
+ constexpr auto min_val = std::numeric_limits<value_type>::min();
1180
+ constexpr auto max_val = std::numeric_limits<value_type>::max();
1181
+ Vectorized<c10::qint8> retval;
1182
+ for (const auto i : c10::irange(int_num_vecs())) {
1183
+ for (const auto j : c10::irange(elem_per_int_vec)) {
1184
+ int32_t rounded =
1185
+ std::nearbyint(static_cast<float>(inp[i].vals[j]) * multiplier) +
1186
+ zero_point;
1187
+ retval.vals[i * elem_per_int_vec + j] =
1188
+ std::min<int32_t>(std::max<int32_t>(rounded, min_val), max_val);
1189
+ }
1190
+ }
1191
+ return retval;
1192
+ }
1193
+ };
1194
+
1195
+ template <>
1196
+ Vectorized<c10::qint8> inline maximum(const Vectorized<c10::qint8>& a, const Vectorized<c10::qint8>& b) {
1197
+ return a.maximum(b);
1198
+ }
1199
+
1200
+ template <>
1201
+ struct Vectorized<c10::quint8> : public VectorizedQuantizedConverter<
1202
+ c10::quint8,
1203
+ std::array<Vectorized<float>, 4>,
1204
+ std::array<Vectorized<c10::qint32>, 4>,
1205
+ 32> {
1206
+ Vectorized()
1207
+ : VectorizedQuantizedConverter<
1208
+ c10::quint8,
1209
+ std::array<Vectorized<float>, 4>,
1210
+ std::array<Vectorized<c10::qint32>, 4>,
1211
+ 32>() {}
1212
+ Vectorized(c10::quint8 val)
1213
+ : VectorizedQuantizedConverter<
1214
+ c10::quint8,
1215
+ std::array<Vectorized<float>, 4>,
1216
+ std::array<Vectorized<c10::qint32>, 4>,
1217
+ 32>(val) {}
1218
+ Vectorized(const void* ptr)
1219
+ : VectorizedQuantizedConverter<
1220
+ c10::quint8,
1221
+ std::array<Vectorized<float>, 4>,
1222
+ std::array<Vectorized<c10::qint32>, 4>,
1223
+ 32>(ptr) {}
1224
+
1225
+ static Vectorized<c10::quint8> loadu(const void* ptr) {
1226
+ return Vectorized<c10::quint8>(ptr);
1227
+ }
1228
+
1229
+ static Vectorized<c10::quint8> loadu(const void* ptr, int64_t count) {
1230
+ __at_align__ value_type tmp_values[size()];
1231
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
1232
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
1233
+ // instructions while a loop would be compiled to one instruction.
1234
+ for (const auto i : c10::irange(size())) {
1235
+ tmp_values[i] = 0;
1236
+ }
1237
+ std::memcpy(
1238
+ tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
1239
+ return Vectorized<c10::quint8>(tmp_values);
1240
+ }
1241
+
1242
+ static Vectorized<c10::quint8> quantize(
1243
+ const float_vec_return_type& rhs,
1244
+ float scale,
1245
+ int32_t zero_point,
1246
+ float /*inverse_scale*/) {
1247
+ std::array<value_type, size()> qvals;
1248
+ std::array<float, float_num_vecs() * 8> float_vals;
1249
+
1250
+ for (const auto i : c10::irange(float_num_vecs())) {
1251
+ rhs[i].store(&float_vals[i * 8], 8);
1252
+ }
1253
+
1254
+ at::native::quantize_vec<c10::quint8>(
1255
+ scale,
1256
+ zero_point,
1257
+ float_vals.data(),
1258
+ (c10::quint8*)qvals.data(),
1259
+ 8 * float_num_vecs());
1260
+
1261
+ return Vectorized<c10::quint8>::loadu(qvals.data());
1262
+ }
1263
+
1264
+ Vectorized<c10::quint8> maximum(Vectorized<c10::quint8> b) const {
1265
+ Vectorized<c10::quint8> retval;
1266
+ for (const auto i : c10::irange(size())) {
1267
+ retval.vals[i] = std::max<value_type>(vals[i], b.vals[i]);
1268
+ }
1269
+ return retval;
1270
+ }
1271
+
1272
+ Vectorized<c10::quint8> minimum(Vectorized<c10::quint8> b) const {
1273
+ Vectorized<c10::quint8> retval;
1274
+ for (const auto i : c10::irange(size())) {
1275
+ retval.vals[i] = std::min<value_type>(vals[i], b.vals[i]);
1276
+ }
1277
+ return retval;
1278
+ }
1279
+
1280
+ Vectorized<c10::quint8> relu(Vectorized<c10::quint8> zero_point) const {
1281
+ return maximum(zero_point);
1282
+ }
1283
+
1284
+
1285
+ Vectorized<c10::quint8> relu6(
1286
+ Vectorized<c10::quint8> zero_point,
1287
+ Vectorized<c10::quint8> q_six) {
1288
+ Vectorized<c10::quint8> retval;
1289
+ for (const auto i : c10::irange(size())) {
1290
+ retval.vals[i] = std::min<value_type>(
1291
+ std::max<value_type>(vals[i], zero_point.vals[i]), q_six.vals[i]);
1292
+ }
1293
+ return retval;
1294
+ }
1295
+
1296
+ int_vec_return_type widening_subtract(Vectorized<c10::quint8> b) const {
1297
+ int_vec_return_type retval;
1298
+ constexpr int elem_per_int_vec = size() / int_num_vecs();
1299
+ for (const auto i : c10::irange(int_num_vecs())) {
1300
+ for (const auto j : c10::irange(elem_per_int_vec)) {
1301
+ retval[i].vals[j] =
1302
+ static_cast<int32_t>(vals[i * elem_per_int_vec + j]) -
1303
+ static_cast<int32_t>(b.vals[i * elem_per_int_vec + j]);
1304
+ }
1305
+ }
1306
+ return retval;
1307
+ }
1308
+ static Vectorized<c10::quint8> requantize_from_int(
1309
+ const int_vec_return_type& inp,
1310
+ float multiplier,
1311
+ int32_t zero_point) {
1312
+ constexpr int elem_per_int_vec = size() / int_num_vecs();
1313
+ constexpr auto min_val = std::numeric_limits<value_type>::min();
1314
+ constexpr auto max_val = std::numeric_limits<value_type>::max();
1315
+ Vectorized<c10::quint8> retval;
1316
+ for (const auto i : c10::irange(int_num_vecs())) {
1317
+ for (const auto j : c10::irange(elem_per_int_vec)) {
1318
+ int32_t rounded =
1319
+ std::nearbyint(static_cast<float>(inp[i].vals[j]) * multiplier) +
1320
+ zero_point;
1321
+ retval.vals[i * elem_per_int_vec + j] =
1322
+ std::min<int32_t>(std::max<int32_t>(rounded, min_val), max_val);
1323
+ }
1324
+ }
1325
+ return retval;
1326
+ }
1327
+ };
1328
+
1329
+ template <>
1330
+ Vectorized<c10::quint8> inline maximum(const Vectorized<c10::quint8>& a, const Vectorized<c10::quint8>& b) {
1331
+ return a.maximum(b);
1332
+ }
1333
+
1334
+ #endif // if defined(CPU_CAPABILITY_AVX2) && !defined(_MSC_VER)
1335
+ }} // namespace at::vec::CPU_CAPABILITY
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_bfloat16_vsx.h ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cpu/vec/intrinsics.h>
4
+ #include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
5
+ #include <ATen/cpu/vec/vec_base.h>
6
+ #include <c10/util/irange.h>
7
+
8
+ namespace at {
9
+ namespace vec {
10
+ // See Note [CPU_CAPABILITY namespace]
11
+ inline namespace CPU_CAPABILITY {
12
+
13
+ inline std::tuple<Vectorized<float>, Vectorized<float>> convert_bfloat16_float(
14
+ const Vectorized<BFloat16>& a) {
15
+ constexpr int64_t K = Vectorized<BFloat16>::size();
16
+ __at_align__ float arr[K];
17
+ __at_align__ BFloat16 arr2[K];
18
+ a.store(arr2);
19
+ convert(arr2, arr, K);
20
+ return std::make_tuple(
21
+ Vectorized<float>::loadu(arr),
22
+ Vectorized<float>::loadu(arr + Vectorized<float>::size()));
23
+ }
24
+
25
+ inline Vectorized<BFloat16> convert_float_bfloat16(
26
+ const Vectorized<float>& a,
27
+ const Vectorized<float>& b) {
28
+ constexpr int64_t K = Vectorized<BFloat16>::size();
29
+ __at_align__ float arr[K];
30
+ __at_align__ BFloat16 arr2[K];
31
+ a.store(arr);
32
+ b.store(arr + Vectorized<float>::size());
33
+ convert(arr, arr2, K);
34
+ return Vectorized<BFloat16>::loadu(arr2);
35
+ }
36
+
37
+ inline void load_fp32_from_bf16(const c10::BFloat16* data, Vectorized<float>& out) {
38
+ __at_align__ float values[Vectorized<float>::size()];
39
+ for (const auto k : c10::irange(Vectorized<float>::size())) {
40
+ values[k] = data[k];
41
+ }
42
+ out = Vectorized<float>::loadu(values);
43
+ }
44
+
45
+ inline void load_fp32_from_bf16(
46
+ const c10::BFloat16* data,
47
+ Vectorized<float>& out1,
48
+ Vectorized<float>& out2) {
49
+ load_fp32_from_bf16(data, out1);
50
+ data += Vectorized<float>::size();
51
+ load_fp32_from_bf16(data, out2);
52
+ }
53
+
54
+ inline void load_fp32_from_fp16(const c10::Half* data, Vectorized<float>& out) {
55
+ __at_align__ float values[Vectorized<float>::size()];
56
+ for (const auto k : c10::irange(Vectorized<float>::size())) {
57
+ values[k] = data[k];
58
+ }
59
+ out = Vectorized<float>::loadu(values);
60
+ }
61
+
62
+ inline void load_fp32_from_fp16(
63
+ const c10::Half* data,
64
+ Vectorized<float>& out1,
65
+ Vectorized<float>& out2) {
66
+ load_fp32_from_fp16(data, out1);
67
+ data += Vectorized<float>::size();
68
+ load_fp32_from_fp16(data, out2);
69
+ }
70
+
71
+ } // namespace
72
+ } // namespace vec
73
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_common_vsx.h ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cpu/vec/intrinsics.h>
4
+ #include <ATen/cpu/vec/vec_base.h>
5
+ #include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
6
+
7
+ // Note: header order is important here
8
+ #include <ATen/cpu/vec/vec256/vsx/vec256_double_vsx.h>
9
+ #include <ATen/cpu/vec/vec256/vsx/vec256_float_vsx.h>
10
+ #include <ATen/cpu/vec/vec256/vsx/vec256_int16_vsx.h>
11
+ #include <ATen/cpu/vec/vec256/vsx/vec256_int32_vsx.h>
12
+ #include <ATen/cpu/vec/vec256/vsx/vec256_int64_vsx.h>
13
+ #include <ATen/cpu/vec/vec256/vsx/vec256_qint32_vsx.h>
14
+ #include <ATen/cpu/vec/vec256/vsx/vec256_qint8_vsx.h>
15
+ #include <ATen/cpu/vec/vec256/vsx/vec256_quint8_vsx.h>
16
+
17
+ #include <ATen/cpu/vec/vec256/vsx/vec256_complex_float_vsx.h>
18
+ #include <ATen/cpu/vec/vec256/vsx/vec256_complex_double_vsx.h>
19
+
20
+ #include <ATen/cpu/vec/vec256/vsx/vec256_bfloat16_vsx.h>
21
+
22
+ namespace at {
23
+ namespace vec {
24
+
25
+ inline namespace CPU_CAPABILITY {
26
+
27
+ DEFINE_CLAMP_FUNCS(c10::quint8)
28
+ DEFINE_CLAMP_FUNCS(c10::qint8)
29
+ DEFINE_CLAMP_FUNCS(c10::qint32)
30
+ DEFINE_CLAMP_FUNCS(int16_t)
31
+ DEFINE_CLAMP_FUNCS(int32_t)
32
+ DEFINE_CLAMP_FUNCS(int64_t)
33
+ DEFINE_CLAMP_FUNCS(float)
34
+ DEFINE_CLAMP_FUNCS(double)
35
+
36
+ template <>
37
+ Vectorized<double> C10_ALWAYS_INLINE fmadd(
38
+ const Vectorized<double>& a,
39
+ const Vectorized<double>& b,
40
+ const Vectorized<double>& c) {
41
+ return Vectorized<double>{
42
+ vec_madd(a.vec0(), b.vec0(), c.vec0()),
43
+ vec_madd(a.vec1(), b.vec1(), c.vec1())};
44
+ }
45
+
46
+ template <>
47
+ Vectorized<int64_t> C10_ALWAYS_INLINE fmadd(
48
+ const Vectorized<int64_t>& a,
49
+ const Vectorized<int64_t>& b,
50
+ const Vectorized<int64_t>& c) {
51
+ return Vectorized<int64_t>{
52
+ a.vec0() * b.vec0() + c.vec0(), a.vec1() * b.vec1() + c.vec1()};
53
+ }
54
+ template <>
55
+ Vectorized<int32_t> C10_ALWAYS_INLINE fmadd(
56
+ const Vectorized<int32_t>& a,
57
+ const Vectorized<int32_t>& b,
58
+ const Vectorized<int32_t>& c) {
59
+ return Vectorized<int32_t>{
60
+ a.vec0() * b.vec0() + c.vec0(), a.vec1() * b.vec1() + c.vec1()};
61
+ }
62
+ template <>
63
+ Vectorized<int16_t> C10_ALWAYS_INLINE fmadd(
64
+ const Vectorized<int16_t>& a,
65
+ const Vectorized<int16_t>& b,
66
+ const Vectorized<int16_t>& c) {
67
+ return Vectorized<int16_t>{
68
+ a.vec0() * b.vec0() + c.vec0(), a.vec1() * b.vec1() + c.vec1()};
69
+ }
70
+
71
+ DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(float)
72
+ DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(double)
73
+ DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(int64_t)
74
+ DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(int32_t)
75
+ DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(int16_t)
76
+
77
+ template <>
78
+ Vectorized<int64_t> C10_ALWAYS_INLINE
79
+ convert_to_int_of_same_size<double>(const Vectorized<double>& src) {
80
+ return Vectorized<int64_t>{vec_signed(src.vec0()), vec_signed(src.vec1())};
81
+ }
82
+
83
+ template <>
84
+ Vectorized<int32_t> C10_ALWAYS_INLINE
85
+ convert_to_int_of_same_size<float>(
86
+ const Vectorized<float>& src) {
87
+ return Vectorized<int32_t>{vec_signed(src.vec0()), vec_signed(src.vec1())};
88
+ }
89
+
90
+ template <>
91
+ inline void convert(const int32_t* src, float* dst, int64_t n) {
92
+ // int32_t and float have same size
93
+ int64_t i;
94
+ for (i = 0; i <= (n - Vectorized<float>::size()); i += Vectorized<float>::size()) {
95
+ const int32_t* src_a = src + i;
96
+ float* dst_a = dst + i;
97
+ vint32 input_vec0 = vec_vsx_ld(offset0, reinterpret_cast<const vint32*>(src_a));
98
+ vint32 input_vec1 =
99
+ vec_vsx_ld(offset16, reinterpret_cast<const vint32*>(src_a));
100
+ vfloat32 c0 = vec_float(input_vec0);
101
+ vfloat32 c1 = vec_float(input_vec1);
102
+ vec_vsx_st(c0, offset0, dst_a);
103
+ vec_vsx_st(c1, offset16, dst_a);
104
+ }
105
+
106
+ for (; i < n; i++) {
107
+ dst[i] = static_cast<float>(src[i]);
108
+ }
109
+ }
110
+
111
+ template <>
112
+ inline void convert(const int64_t* src, double* dst, int64_t n) {
113
+ int64_t i;
114
+ for (i = 0; i <= (n - Vectorized<double>::size()); i += Vectorized<double>::size()) {
115
+ const int64_t* src_a = src + i;
116
+ double* dst_a = dst + i;
117
+ vint64 input_vec0 =
118
+ vec_vsx_ld(offset0, reinterpret_cast<const vint64*>(src_a));
119
+ vint64 input_vec1 =
120
+ vec_vsx_ld(offset16, reinterpret_cast<const vint64*>(src_a));
121
+ vfloat64 c0 = vec_double(input_vec0);
122
+ vfloat64 c1 = vec_double(input_vec1);
123
+ vec_vsx_st(c0, offset0, reinterpret_cast<double*>(dst_a));
124
+ vec_vsx_st(c1, offset16, reinterpret_cast<double*>(dst_a));
125
+ }
126
+ for (; i < n; i++) {
127
+ dst[i] = static_cast<double>(src[i]);
128
+ }
129
+ }
130
+ //Generic implementation to fix compiler error
131
+ //TO-DO : Add optimized version for ppc64
132
+ inline std::tuple<Vectorized<float>, Vectorized<float>> convert_half_float(
133
+ const Vectorized<Half>& a) {
134
+ constexpr int64_t K = Vectorized<Half>::size();
135
+ __at_align__ float arr[K];
136
+ __at_align__ Half arr2[K];
137
+ a.store(arr2);
138
+ convert(arr2, arr, K);
139
+ return std::make_tuple(
140
+ Vectorized<float>::loadu(arr),
141
+ Vectorized<float>::loadu(arr + Vectorized<float>::size()));
142
+ }
143
+
144
+ inline Vectorized<Half> convert_float_half(
145
+ const Vectorized<float>& a, const Vectorized<float>& b) {
146
+ constexpr int64_t K = Vectorized<Half>::size();
147
+ __at_align__ float arr[K];
148
+ __at_align__ Half arr2[K];
149
+ a.store(arr);
150
+ b.store(arr + Vectorized<float>::size());
151
+ convert(arr, arr2, K);
152
+ return Vectorized<Half>::loadu(arr2);
153
+ };
154
+
155
+ template <>
156
+ std::pair<Vectorized<double>, Vectorized<double>> inline interleave2<double>(
157
+ const Vectorized<double>& a,
158
+ const Vectorized<double>& b) {
159
+ // inputs:
160
+ // a = {a0, a1, a2, a3}
161
+ // b = {b0, b1, b2, b3}
162
+
163
+ vfloat64 ab00 = vec_xxpermdi(a.vec0(), b.vec0(), 0);
164
+ vfloat64 ab11 = vec_xxpermdi(a.vec0(), b.vec0(), 3);
165
+ vfloat64 ab2_00 = vec_xxpermdi(a.vec1(), b.vec1(), 0);
166
+ vfloat64 ab2_11 = vec_xxpermdi(a.vec1(), b.vec1(), 3);
167
+ // return {a0, b0, a1, b1}
168
+ // {a2, b2, a3, b3}
169
+ return std::make_pair(
170
+ Vectorized<double>{ab00, ab11}, Vectorized<double>{ab2_00, ab2_11});
171
+ }
172
+
173
+ template <>
174
+ std::pair<Vectorized<double>, Vectorized<double>> inline deinterleave2<double>(
175
+ const Vectorized<double>& a,
176
+ const Vectorized<double>& b) {
177
+ // inputs:
178
+ // a = {a0, b0, a1, b1}
179
+ // b = {a2, b2, a3, b3}
180
+ vfloat64 aa01 = vec_xxpermdi(a.vec0(), a.vec1(), 0);
181
+ vfloat64 aa23 = vec_xxpermdi(b.vec0(), b.vec1(), 0);
182
+
183
+ vfloat64 bb_01 = vec_xxpermdi(a.vec0(), a.vec1(), 3);
184
+ vfloat64 bb_23 = vec_xxpermdi(b.vec0(), b.vec1(), 3);
185
+
186
+ // swap lanes:
187
+ // return {a0, a1, a2, a3}
188
+ // {b0, b1, b2, b3}
189
+ return std::make_pair(
190
+ Vectorized<double>{aa01, aa23}, Vectorized<double>{bb_01, bb_23});
191
+ }
192
+
193
+ template <>
194
+ std::pair<Vectorized<float>, Vectorized<float>> inline interleave2<float>(
195
+ const Vectorized<float>& a,
196
+ const Vectorized<float>& b) {
197
+ // inputs:
198
+ // a = {a0, a1, a2, a3,, a4, a5, a6, a7}
199
+ // b = {b0, b1, b2, b3,, b4, b5, b6, b7}
200
+
201
+ vfloat32 ab0011 = vec_mergeh(a.vec0(), b.vec0());
202
+ vfloat32 ab2233 = vec_mergel(a.vec0(), b.vec0());
203
+
204
+ vfloat32 ab2_0011 = vec_mergeh(a.vec1(), b.vec1());
205
+ vfloat32 ab2_2233 = vec_mergel(a.vec1(), b.vec1());
206
+ // group cols crossing lanes:
207
+ // return {a0, b0, a1, b1,, a2, b2, a3, b3}
208
+ // {a4, b4, a5, b5,, a6, b6, a7, b7}
209
+
210
+ return std::make_pair(
211
+ Vectorized<float>{ab0011, ab2233}, Vectorized<float>{ab2_0011, ab2_2233});
212
+ }
213
+
214
+ template <>
215
+ std::pair<Vectorized<float>, Vectorized<float>> inline deinterleave2<float>(
216
+ const Vectorized<float>& a,
217
+ const Vectorized<float>& b) {
218
+ // inputs:
219
+ // a = {a0, b0, a1, b1,, a2, b2, a3, b3}
220
+ // b = {a4, b4, a5, b5,, a6, b6, a7, b7}
221
+
222
+ // {a0,a2,b0,b2} {a1,a3,b1,b3}
223
+ vfloat32 a0a2b0b2 = vec_mergeh(a.vec0(), a.vec1());
224
+ vfloat32 a1a3b1b3 = vec_mergel(a.vec0(), a.vec1());
225
+
226
+ vfloat32 aa0123 = vec_mergeh(a0a2b0b2, a1a3b1b3);
227
+ vfloat32 bb0123 = vec_mergel(a0a2b0b2, a1a3b1b3);
228
+
229
+ vfloat32 a0a2b0b2_2 = vec_mergeh(b.vec0(), b.vec1());
230
+ vfloat32 a1a3b1b3_2 = vec_mergel(b.vec0(), b.vec1());
231
+
232
+ vfloat32 aa0123_2 = vec_mergeh(a0a2b0b2_2, a1a3b1b3_2);
233
+ vfloat32 bb0123_2 = vec_mergel(a0a2b0b2_2, a1a3b1b3_2);
234
+
235
+ // it could be done with vec_perm ,too
236
+ // swap lanes:
237
+ // return {a0, a1, a2, a3,, a4, a5, a6, a7}
238
+ // {b0, b1, b2, b3,, b4, b5, b6, b7}
239
+
240
+ return std::make_pair(
241
+ Vectorized<float>{aa0123, aa0123_2}, Vectorized<float>{bb0123, bb0123_2});
242
+ }
243
+
244
+ } // namespace
245
+ } // namespace vec
246
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_complex_double_vsx.h ADDED
@@ -0,0 +1,560 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/cpu/vec/intrinsics.h>
3
+ #include <ATen/cpu/vec/vec_base.h>
4
+ #include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
5
+ #include <c10/util/complex.h>
6
+ #include <c10/util/irange.h>
7
+
8
+ namespace at {
9
+ namespace vec {
10
+ // See Note [CPU_CAPABILITY namespace]
11
+ inline namespace CPU_CAPABILITY {
12
+ using ComplexDbl = c10::complex<double>;
13
+
14
+ template <>
15
+ class Vectorized<ComplexDbl> {
16
+ union {
17
+ struct {
18
+ vfloat64 _vec0;
19
+ vfloat64 _vec1;
20
+ };
21
+ struct {
22
+ vbool64 _vecb0;
23
+ vbool64 _vecb1;
24
+ };
25
+
26
+ } __attribute__((__may_alias__));
27
+
28
+ public:
29
+ using value_type = ComplexDbl;
30
+ using vec_internal_type = vfloat64;
31
+ using vec_internal_mask_type = vbool64;
32
+ using size_type = int;
33
+ static constexpr size_type size() {
34
+ return 2;
35
+ }
36
+ Vectorized() {}
37
+ C10_ALWAYS_INLINE Vectorized(vfloat64 v) : _vec0{v}, _vec1{v} {}
38
+ C10_ALWAYS_INLINE Vectorized(vbool64 vmask) : _vecb0{vmask}, _vecb1{vmask} {}
39
+ C10_ALWAYS_INLINE Vectorized(vfloat64 v1, vfloat64 v2) : _vec0{v1}, _vec1{v2} {}
40
+ C10_ALWAYS_INLINE Vectorized(vbool64 v1, vbool64 v2) : _vecb0{v1}, _vecb1{v2} {}
41
+
42
+ Vectorized(ComplexDbl val) {
43
+ double real_value = val.real();
44
+ double imag_value = val.imag();
45
+ _vec0 = vfloat64{real_value, imag_value};
46
+ _vec1 = vfloat64{real_value, imag_value};
47
+ }
48
+ Vectorized(ComplexDbl val1, ComplexDbl val2) {
49
+ _vec0 = vfloat64{val1.real(), val1.imag()};
50
+ _vec1 = vfloat64{val2.real(), val2.imag()};
51
+ }
52
+
53
+ C10_ALWAYS_INLINE const vec_internal_type& vec0() const {
54
+ return _vec0;
55
+ }
56
+ C10_ALWAYS_INLINE const vec_internal_type& vec1() const {
57
+ return _vec1;
58
+ }
59
+
60
+ template <int64_t mask>
61
+ static std::enable_if_t<blendChoiceComplexDbl(mask) == 0, Vectorized<ComplexDbl>>
62
+ C10_ALWAYS_INLINE
63
+ blend(const Vectorized<ComplexDbl>& a, const Vectorized<ComplexDbl>& b) {
64
+ return a;
65
+ }
66
+
67
+ template <int64_t mask>
68
+ static std::enable_if_t<blendChoiceComplexDbl(mask) == 1, Vectorized<ComplexDbl>>
69
+ C10_ALWAYS_INLINE
70
+ blend(const Vectorized<ComplexDbl>& a, const Vectorized<ComplexDbl>& b) {
71
+ return b;
72
+ }
73
+
74
+ template <int64_t mask>
75
+ static std::enable_if_t<blendChoiceComplexDbl(mask) == 2, Vectorized<ComplexDbl>>
76
+ C10_ALWAYS_INLINE
77
+ blend(const Vectorized<ComplexDbl>& a, const Vectorized<ComplexDbl>& b) {
78
+ return {b._vec0, a._vec1};
79
+ }
80
+
81
+ template <int64_t mask>
82
+ static std::enable_if_t<blendChoiceComplexDbl(mask) == 3, Vectorized<ComplexDbl>>
83
+ C10_ALWAYS_INLINE
84
+ blend(const Vectorized<ComplexDbl>& a, const Vectorized<ComplexDbl>& b) {
85
+ return {a._vec0, b._vec1};
86
+ }
87
+
88
+ template <int64_t mask>
89
+ static Vectorized<ComplexDbl> C10_ALWAYS_INLINE
90
+ el_blend(const Vectorized<ComplexDbl>& a, const Vectorized<ComplexDbl>& b) {
91
+ const vbool64 mask_1st = VsxDblMask1(mask);
92
+ const vbool64 mask_2nd = VsxDblMask2(mask);
93
+ return {
94
+ (vfloat64)vec_sel(a._vec0, b._vec0, mask_1st),
95
+ (vfloat64)vec_sel(a._vec1, b._vec1, mask_2nd)};
96
+ }
97
+
98
+ static Vectorized<ComplexDbl> blendv(
99
+ const Vectorized<ComplexDbl>& a,
100
+ const Vectorized<ComplexDbl>& b,
101
+ const Vectorized<ComplexDbl>& mask) {
102
+ // convert std::complex<V> index mask to V index mask: xy -> xxyy
103
+ auto mask_complex =
104
+ Vectorized<ComplexDbl>(vec_splat(mask._vec0, 0), vec_splat(mask._vec1, 0));
105
+ return {
106
+ vec_sel(a._vec0, b._vec0, mask_complex._vecb0),
107
+ vec_sel(a._vec1, b._vec1, mask_complex._vecb1)};
108
+ }
109
+
110
+ static Vectorized<ComplexDbl> C10_ALWAYS_INLINE elwise_blendv(
111
+ const Vectorized<ComplexDbl>& a,
112
+ const Vectorized<ComplexDbl>& b,
113
+ const Vectorized<ComplexDbl>& mask) {
114
+ return {
115
+ vec_sel(a._vec0, b._vec0, mask._vecb0),
116
+ vec_sel(a._vec1, b._vec1, mask._vecb1)};
117
+ }
118
+ template <typename step_t>
119
+ static Vectorized<ComplexDbl> arange(
120
+ ComplexDbl base = 0.,
121
+ step_t step = static_cast<step_t>(1)) {
122
+ return Vectorized<ComplexDbl>(base, base + step);
123
+ }
124
+ static Vectorized<ComplexDbl> set(
125
+ const Vectorized<ComplexDbl>& a,
126
+ const Vectorized<ComplexDbl>& b,
127
+ int64_t count = size()) {
128
+ switch (count) {
129
+ case 0:
130
+ return a;
131
+ case 1:
132
+ return blend<1>(a, b);
133
+ }
134
+ return b;
135
+ }
136
+
137
+ static Vectorized<value_type> C10_ALWAYS_INLINE
138
+ loadu(const void* ptr, int count = size()) {
139
+ if (count == size()) {
140
+ return {
141
+ vec_vsx_ld(offset0, reinterpret_cast<const double*>(ptr)),
142
+ vec_vsx_ld(offset16, reinterpret_cast<const double*>(ptr))};
143
+ }
144
+
145
+ __at_align__ value_type tmp_values[size()] = {};
146
+ std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type));
147
+
148
+ return {
149
+ vec_vsx_ld(offset0, reinterpret_cast<const double*>(tmp_values)),
150
+ vec_vsx_ld(offset16, reinterpret_cast<const double*>(tmp_values))};
151
+ }
152
+ void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const {
153
+ if (count == size()) {
154
+ vec_vsx_st(_vec0, offset0, reinterpret_cast<double*>(ptr));
155
+ vec_vsx_st(_vec1, offset16, reinterpret_cast<double*>(ptr));
156
+ } else if (count > 0) {
157
+ __at_align__ value_type tmp_values[size()];
158
+ vec_vsx_st(_vec0, offset0, reinterpret_cast<double*>(tmp_values));
159
+ vec_vsx_st(_vec1, offset16, reinterpret_cast<double*>(tmp_values));
160
+ std::memcpy(
161
+ ptr, tmp_values, std::min(count, size()) * sizeof(value_type));
162
+ }
163
+ }
164
+
165
+ const ComplexDbl& operator[](int idx) const = delete;
166
+ ComplexDbl& operator[](int idx) = delete;
167
+
168
+ Vectorized<ComplexDbl> map(ComplexDbl (*const f)(ComplexDbl)) const {
169
+ __at_align__ ComplexDbl tmp[size()];
170
+ store(tmp);
171
+ for (const auto i : c10::irange(size())) {
172
+ tmp[i] = f(tmp[i]);
173
+ }
174
+ return loadu(tmp);
175
+ }
176
+
177
+ Vectorized<ComplexDbl> map(ComplexDbl (*const f)(const ComplexDbl&)) const {
178
+ __at_align__ ComplexDbl tmp[size()];
179
+ store(tmp);
180
+ for (const auto i : c10::irange(size())) {
181
+ tmp[i] = f(tmp[i]);
182
+ }
183
+ return loadu(tmp);
184
+ }
185
+
186
+ Vectorized<ComplexDbl> el_swapped() const {
187
+ vfloat64 v0 = vec_xxpermdi(_vec0, _vec0, 2);
188
+ vfloat64 v1 = vec_xxpermdi(_vec1, _vec1, 2);
189
+ return {v0, v1};
190
+ }
191
+
192
+ Vectorized<ComplexDbl> el_madd(
193
+ const Vectorized<ComplexDbl>& multiplier,
194
+ const Vectorized<ComplexDbl>& val) const {
195
+ return {
196
+ vec_madd(_vec0, multiplier._vec0, val._vec0),
197
+ vec_madd(_vec1, multiplier._vec1, val._vec1)};
198
+ }
199
+
200
+ Vectorized<ComplexDbl> el_mergeo() const {
201
+ vfloat64 v0 = vec_splat(_vec0, 1);
202
+ vfloat64 v1 = vec_splat(_vec1, 1);
203
+ return {v0, v1};
204
+ }
205
+
206
+ Vectorized<ComplexDbl> el_mergee() const {
207
+ vfloat64 v0 = vec_splat(_vec0, 0);
208
+ vfloat64 v1 = vec_splat(_vec1, 0);
209
+ return {v0, v1};
210
+ }
211
+
212
+ static Vectorized<ComplexDbl> el_mergee(
213
+ Vectorized<ComplexDbl>& first,
214
+ Vectorized<ComplexDbl>& second) {
215
+ return {
216
+ vec_mergeh(first._vec0, second._vec0),
217
+ vec_mergeh(first._vec1, second._vec1)};
218
+ }
219
+
220
+ static Vectorized<ComplexDbl> el_mergeo(
221
+ Vectorized<ComplexDbl>& first,
222
+ Vectorized<ComplexDbl>& second) {
223
+ return {
224
+ vec_mergel(first._vec0, second._vec0),
225
+ vec_mergel(first._vec1, second._vec1)};
226
+ }
227
+
228
+ Vectorized<ComplexDbl> abs_2_() const {
229
+ auto a = (*this).elwise_mult(*this);
230
+ auto permuted = a.el_swapped();
231
+ a = a + permuted;
232
+ return a;
233
+ }
234
+
235
+ Vectorized<ComplexDbl> abs_() const {
236
+ auto vi = el_mergeo();
237
+ auto vr = el_mergee();
238
+ return {Sleef_hypotd2_u05vsx(vr._vec0, vi._vec0), Sleef_hypotd2_u05vsx(vr._vec1, vi._vec1)};
239
+ }
240
+
241
+ Vectorized<ComplexDbl> abs() const {
242
+ return abs_() & vd_real_mask;
243
+ }
244
+
245
+ Vectorized<ComplexDbl> angle_() const {
246
+ // angle = atan2(b/a)
247
+ // auto b_a = _mm256_permute_pd(values, 0x05); // b a
248
+ // return Sleef_atan2d4_u10(values, b_a); // 90-angle angle
249
+ Vectorized<ComplexDbl> ret;
250
+ ret._vec0[0] = std::atan2(_vec0[1], _vec0[0]);
251
+ ret._vec1[0] = std::atan2(_vec1[1], _vec1[0]);
252
+ return ret;
253
+ }
254
+
255
+ Vectorized<ComplexDbl> angle() const {
256
+ return angle_() & vd_real_mask;
257
+ }
258
+
259
+ Vectorized<ComplexDbl> real_() const {
260
+ return *this & vd_real_mask;
261
+ }
262
+ Vectorized<ComplexDbl> real() const {
263
+ return *this & vd_real_mask;
264
+ }
265
+ Vectorized<ComplexDbl> imag_() const {
266
+ return *this & vd_imag_mask;
267
+ }
268
+ Vectorized<ComplexDbl> imag() const {
269
+ return imag_().el_swapped();
270
+ }
271
+
272
+ Vectorized<ComplexDbl> conj_() const {
273
+ return *this ^ vd_isign_mask;
274
+ }
275
+ Vectorized<ComplexDbl> conj() const {
276
+ return *this ^ vd_isign_mask;
277
+ }
278
+
279
+ Vectorized<ComplexDbl> log() const {
280
+ // Most trigonomic ops use the log() op to improve complex number
281
+ // performance.
282
+ return map(std::log);
283
+ }
284
+
285
+ Vectorized<ComplexDbl> log2() const {
286
+ // log2eB_inv
287
+ auto ret = log();
288
+ return ret.elwise_mult(vd_log2e_inv);
289
+ }
290
+ Vectorized<ComplexDbl> log10() const {
291
+ auto ret = log();
292
+ return ret.elwise_mult(vd_log10e_inv);
293
+ }
294
+
295
+ Vectorized<ComplexDbl> log1p() const {
296
+ return map(std::log1p);
297
+ }
298
+
299
+ Vectorized<ComplexDbl> asin() const {
300
+ // asin(x)
301
+ // = -i*ln(iz + sqrt(1 -z^2))
302
+ // = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi)))
303
+ // = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi))
304
+ auto conj = conj_();
305
+ auto b_a = conj.el_swapped();
306
+ auto ab = conj.elwise_mult(b_a);
307
+ auto im = ab + ab;
308
+ auto val_2 = (*this).elwise_mult(*this);
309
+ auto val_2_swapped = val_2.el_swapped();
310
+ auto re = horizontal_sub(val_2, val_2_swapped);
311
+ re = Vectorized<ComplexDbl>(vd_one) - re;
312
+ auto root = el_blend<0x0A>(re, im).sqrt();
313
+ auto ln = (b_a + root).log();
314
+ return ln.el_swapped().conj();
315
+ }
316
+
317
+ Vectorized<ComplexDbl> acos() const {
318
+ // acos(x) = pi/2 - asin(x)
319
+ return Vectorized(vd_pi_2) - asin();
320
+ }
321
+
322
+ Vectorized<ComplexDbl> atan() const {
323
+ // atan(x) = i/2 * ln((i + z)/(i - z))
324
+ auto ione = Vectorized(vd_imag_one);
325
+ auto sum = ione + *this;
326
+ auto sub = ione - *this;
327
+ auto ln = (sum / sub).log(); // ln((i + z)/(i - z))
328
+ return ln * vd_imag_half; // i/2*ln()
329
+ }
330
+ Vectorized<ComplexDbl> atanh() const {
331
+ return map(std::atanh);
332
+ }
333
+
334
+ Vectorized<ComplexDbl> sin() const {
335
+ return map(std::sin);
336
+ }
337
+ Vectorized<ComplexDbl> sinh() const {
338
+ return map(std::sinh);
339
+ }
340
+ Vectorized<ComplexDbl> cos() const {
341
+ return map(std::cos);
342
+ }
343
+ Vectorized<ComplexDbl> cosh() const {
344
+ return map(std::cosh);
345
+ }
346
+
347
+ Vectorized<ComplexDbl> tan() const {
348
+ return map(std::tan);
349
+ }
350
+ Vectorized<ComplexDbl> tanh() const {
351
+ return map(std::tanh);
352
+ }
353
+ Vectorized<ComplexDbl> ceil() const {
354
+ return {vec_ceil(_vec0), vec_ceil(_vec1)};
355
+ }
356
+ Vectorized<ComplexDbl> floor() const {
357
+ return {vec_floor(_vec0), vec_floor(_vec1)};
358
+ }
359
+ Vectorized<ComplexDbl> neg() const {
360
+ auto z = Vectorized<ComplexDbl>(vd_zero);
361
+ return z - *this;
362
+ }
363
+ Vectorized<ComplexDbl> round() const {
364
+ return {vec_rint(_vec0), vec_rint(_vec1)};
365
+ }
366
+
367
+ Vectorized<ComplexDbl> trunc() const {
368
+ return {vec_trunc(_vec0), vec_trunc(_vec1)};
369
+ }
370
+
371
+ Vectorized<ComplexDbl> elwise_sqrt() const {
372
+ return {vec_sqrt(_vec0), vec_sqrt(_vec1)};
373
+ }
374
+
375
+ Vectorized<ComplexDbl> sqrt() const {
376
+ return map(std::sqrt);
377
+ }
378
+
379
+ Vectorized<ComplexDbl> reciprocal() const {
380
+ // re + im*i = (a + bi) / (c + di)
381
+ // re = (ac + bd)/abs_2() = c/abs_2()
382
+ // im = (bc - ad)/abs_2() = d/abs_2()
383
+ auto c_d = *this ^ vd_isign_mask; // c -d
384
+ auto abs = abs_2_();
385
+ return c_d.elwise_div(abs);
386
+ }
387
+
388
+ Vectorized<ComplexDbl> rsqrt() const {
389
+ return sqrt().reciprocal();
390
+ }
391
+
392
+ static Vectorized<ComplexDbl> horizontal_add(
393
+ Vectorized<ComplexDbl>& first,
394
+ Vectorized<ComplexDbl>& second) {
395
+ // Operates on individual floats, see _mm_hadd_ps
396
+ // {f0+f1, s0+s1, f2+f3, s2+s3, ...}
397
+ // i.e. it sums the re and im of each value and interleaves first and second:
398
+ // {f_re0 + f_im0, s_re0 + s_im0, f_re1 + f_im1, s_re1 + s_im1, ...}
399
+ return el_mergee(first, second) + el_mergeo(first, second);
400
+ }
401
+
402
+ static Vectorized<ComplexDbl> horizontal_sub(
403
+ Vectorized<ComplexDbl>& first,
404
+ Vectorized<ComplexDbl>& second) {
405
+ // we will simulate it differently with 6 instructions total
406
+ // lets permute second so that we can add it getting horizontal sums
407
+ auto first_perm = first.el_swapped(); // 2perm
408
+ auto second_perm = second.el_swapped(); // 2perm
409
+ // summ
410
+ auto first_ret = first - first_perm; // 2sub
411
+ auto second_ret = second - second_perm; // 2 sub
412
+ // now lets choose evens
413
+ return el_mergee(first_ret, second_ret); // 2 mergee's
414
+ }
415
+
416
+ Vectorized<ComplexDbl> inline operator*(const Vectorized<ComplexDbl>& b) const {
417
+ //(a + bi) * (c + di) = (ac - bd) + (ad + bc)i
418
+ #if 1
419
+ // this is more vsx friendly than simulating horizontal from x86
420
+ auto vi = b.el_mergeo();
421
+ auto vr = b.el_mergee();
422
+ vi = vi ^ vd_rsign_mask;
423
+ auto ret = elwise_mult(vr);
424
+ auto vx_swapped = el_swapped();
425
+ ret = vx_swapped.el_madd(vi, ret);
426
+ #else
427
+ auto ac_bd = elwise_mult(b);
428
+ auto d_c = b.el_swapped();
429
+ d_c = d_c ^ vd_isign_mask;
430
+ auto ad_bc = elwise_mult(d_c);
431
+ auto ret = horizontal_sub(ac_bd, ad_bc);
432
+ #endif
433
+ return ret;
434
+ }
435
+
436
+ Vectorized<ComplexDbl> inline operator/(const Vectorized<ComplexDbl>& b) const {
437
+ // re + im*i = (a + bi) / (c + di)
438
+ // re = (ac + bd)/abs_2()
439
+ // im = (bc - ad)/abs_2()
440
+ auto fabs_cd = Vectorized{
441
+ vec_andc(b._vec0, vd_sign_mask),
442
+ vec_andc(b._vec1, vd_sign_mask)}; // |c| |d|
443
+ auto fabs_dc = fabs_cd.el_swapped(); // |d| |c|
444
+ auto scale = fabs_cd.elwise_max(fabs_dc); // sc = max(|c|, |d|)
445
+ auto a2 = elwise_div(scale); // a/sc b/sc
446
+ auto b2 = b.elwise_div(scale); // c/sc d/sc
447
+ auto acbd2 = a2.elwise_mult(b2); // ac/sc^2 bd/sc^2
448
+ auto dc2 = b2.el_swapped(); // d/sc c/sc
449
+ dc2 = dc2 ^ vd_rsign_mask; // -d/sc c/sc
450
+ auto adbc2 = a2.elwise_mult(dc2); // -ad/sc^2 bc/sc^2
451
+ auto ret = horizontal_add(acbd2, adbc2); // (ac+bd)/sc^2 (bc-ad)/sc^2
452
+ auto denom2 = b2.abs_2_(); // (c^2+d^2)/sc^2 (c^2+d^2)/sc^2
453
+ ret = ret.elwise_div(denom2);
454
+ return ret;
455
+ }
456
+
457
+ Vectorized<ComplexDbl> exp() const {
458
+ return map(std::exp);
459
+ }
460
+ Vectorized<ComplexDbl> exp2() const {
461
+ return map(exp2_impl);
462
+ }
463
+ Vectorized<ComplexDbl> expm1() const {
464
+ return map(std::expm1);
465
+ }
466
+
467
+ Vectorized<ComplexDbl> pow(const Vectorized<ComplexDbl>& exp) const {
468
+ __at_align__ ComplexDbl x_tmp[size()];
469
+ __at_align__ ComplexDbl y_tmp[size()];
470
+ store(x_tmp);
471
+ exp.store(y_tmp);
472
+ for (const auto i : c10::irange(size())) {
473
+ x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]);
474
+ }
475
+ return loadu(x_tmp);
476
+ }
477
+
478
+ Vectorized<ComplexDbl> sgn() const {
479
+ return map(at::native::sgn_impl);
480
+ }
481
+
482
+ Vectorized<ComplexDbl> operator<(const Vectorized<ComplexDbl>& other) const {
483
+ TORCH_CHECK(false, "not supported for complex numbers");
484
+ }
485
+ Vectorized<ComplexDbl> operator<=(const Vectorized<ComplexDbl>& other) const {
486
+ TORCH_CHECK(false, "not supported for complex numbers");
487
+ }
488
+ Vectorized<ComplexDbl> operator>(const Vectorized<ComplexDbl>& other) const {
489
+ TORCH_CHECK(false, "not supported for complex numbers");
490
+ }
491
+ Vectorized<ComplexDbl> operator>=(const Vectorized<ComplexDbl>& other) const {
492
+ TORCH_CHECK(false, "not supported for complex numbers");
493
+ }
494
+
495
+ Vectorized<ComplexDbl> eq(const Vectorized<ComplexDbl>& other) const {
496
+ auto eq = (*this == other); // compares real and imag individually
497
+ // If both real numbers and imag numbers are equal, then the complex numbers are equal
498
+ return (eq.real() & eq.imag()) & vd_one;
499
+ }
500
+ Vectorized<ComplexDbl> ne(const Vectorized<ComplexDbl>& other) const {
501
+ auto ne = (*this != other); // compares real and imag individually
502
+ // If either real numbers or imag numbers are not equal, then the complex numbers are not equal
503
+ return (ne.real() | ne.imag()) & vd_one;
504
+ }
505
+
506
+ DEFINE_MEMBER_OP(operator==, ComplexDbl, vec_cmpeq)
507
+ DEFINE_MEMBER_OP(operator!=, ComplexDbl, vec_cmpne)
508
+
509
+ DEFINE_MEMBER_OP(operator+, ComplexDbl, vec_add)
510
+ DEFINE_MEMBER_OP(operator-, ComplexDbl, vec_sub)
511
+ DEFINE_MEMBER_OP(operator&, ComplexDbl, vec_and)
512
+ DEFINE_MEMBER_OP(operator|, ComplexDbl, vec_or)
513
+ DEFINE_MEMBER_OP(operator^, ComplexDbl, vec_xor)
514
+ // elementwise helpers
515
+ DEFINE_MEMBER_OP(elwise_mult, ComplexDbl, vec_mul)
516
+ DEFINE_MEMBER_OP(elwise_div, ComplexDbl, vec_div)
517
+ DEFINE_MEMBER_OP(elwise_gt, ComplexDbl, vec_cmpgt)
518
+ DEFINE_MEMBER_OP(elwise_ge, ComplexDbl, vec_cmpge)
519
+ DEFINE_MEMBER_OP(elwise_lt, ComplexDbl, vec_cmplt)
520
+ DEFINE_MEMBER_OP(elwise_le, ComplexDbl, vec_cmple)
521
+ DEFINE_MEMBER_OP(elwise_max, ComplexDbl, vec_max)
522
+ };
523
+
524
+ template <>
525
+ Vectorized<ComplexDbl> inline maximum(
526
+ const Vectorized<ComplexDbl>& a,
527
+ const Vectorized<ComplexDbl>& b) {
528
+ auto abs_a = a.abs_2_();
529
+ auto abs_b = b.abs_2_();
530
+ // auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_LT_OQ);
531
+ // auto max = _mm256_blendv_ps(a, b, mask);
532
+ auto mask = abs_a.elwise_lt(abs_b);
533
+ auto max = Vectorized<ComplexDbl>::elwise_blendv(a, b, mask);
534
+
535
+ return max;
536
+ // Exploit the fact that all-ones is a NaN.
537
+ // auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q);
538
+ // return _mm256_or_ps(max, isnan);
539
+ }
540
+
541
+ template <>
542
+ Vectorized<ComplexDbl> inline minimum(
543
+ const Vectorized<ComplexDbl>& a,
544
+ const Vectorized<ComplexDbl>& b) {
545
+ auto abs_a = a.abs_2_();
546
+ auto abs_b = b.abs_2_();
547
+ // auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_GT_OQ);
548
+ // auto min = _mm256_blendv_ps(a, b, mask);
549
+ auto mask = abs_a.elwise_gt(abs_b);
550
+ auto min = Vectorized<ComplexDbl>::elwise_blendv(a, b, mask);
551
+ return min;
552
+ // Exploit the fact that all-ones is a NaN.
553
+ // auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q);
554
+ // return _mm256_or_ps(min, isnan);
555
+ }
556
+
557
+
558
+ } // namespace
559
+ } // namespace vec
560
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_complex_float_vsx.h ADDED
@@ -0,0 +1,628 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ #pragma once
3
+ #include <ATen/cpu/vec/intrinsics.h>
4
+ #include <ATen/cpu/vec/vec_base.h>
5
+ #include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
6
+ #include <c10/util/complex.h>
7
+ #include <c10/util/irange.h>
8
+
9
+ namespace at {
10
+ namespace vec {
11
+ // See Note [CPU_CAPABILITY namespace]
12
+ inline namespace CPU_CAPABILITY {
13
+ using ComplexFlt = c10::complex<float>;
14
+
15
+ template <>
16
+ class Vectorized<ComplexFlt> {
17
+ private:
18
+ union {
19
+ struct {
20
+ vfloat32 _vec0;
21
+ vfloat32 _vec1;
22
+ };
23
+ struct {
24
+ vbool32 _vecb0;
25
+ vbool32 _vecb1;
26
+ };
27
+
28
+ } __attribute__((__may_alias__));
29
+
30
+ public:
31
+ using value_type = ComplexFlt;
32
+ using vec_internal_type = vfloat32;
33
+ using vec_internal_mask_type = vbool32;
34
+ using size_type = int;
35
+
36
+ static constexpr size_type size() {
37
+ return 4;
38
+ }
39
+ Vectorized() {}
40
+
41
+ C10_ALWAYS_INLINE Vectorized(vfloat32 v) : _vec0{v}, _vec1{v} {}
42
+ C10_ALWAYS_INLINE Vectorized(vbool32 vmask) : _vecb0{vmask}, _vecb1{vmask} {}
43
+ C10_ALWAYS_INLINE Vectorized(vfloat32 v1, vfloat32 v2) : _vec0{v1}, _vec1{v2} {}
44
+ C10_ALWAYS_INLINE Vectorized(vbool32 v1, vbool32 v2) : _vecb0{v1}, _vecb1{v2} {}
45
+
46
+ Vectorized(ComplexFlt val) {
47
+ float real_value = val.real();
48
+ float imag_value = val.imag();
49
+ _vec0 = vfloat32{real_value, imag_value, real_value, imag_value};
50
+ _vec1 = vfloat32{real_value, imag_value, real_value, imag_value};
51
+ }
52
+
53
+ Vectorized(ComplexFlt val1, ComplexFlt val2, ComplexFlt val3, ComplexFlt val4) {
54
+ _vec0 = vfloat32{val1.real(), val1.imag(), val2.real(), val2.imag()};
55
+ _vec1 = vfloat32{val3.real(), val3.imag(), val4.real(), val4.imag()};
56
+ }
57
+
58
+ template <uint64_t mask>
59
+ static std::enable_if_t<blendChoiceComplex(mask) == 0, Vectorized<ComplexFlt>>
60
+ C10_ALWAYS_INLINE
61
+ blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) {
62
+ return a;
63
+ }
64
+
65
+ template <uint64_t mask>
66
+ static std::enable_if_t<blendChoiceComplex(mask) == 1, Vectorized<ComplexFlt>>
67
+ C10_ALWAYS_INLINE
68
+ blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) {
69
+ return b;
70
+ }
71
+
72
+ template <uint64_t mask>
73
+ static std::enable_if_t<blendChoiceComplex(mask) == 2, Vectorized<ComplexFlt>>
74
+ C10_ALWAYS_INLINE
75
+ blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) {
76
+ return {b._vec0, a._vec1};
77
+ }
78
+
79
+ template <uint64_t mask>
80
+ static std::enable_if_t<blendChoiceComplex(mask) == 3, Vectorized<ComplexFlt>>
81
+ C10_ALWAYS_INLINE
82
+ blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) {
83
+ return {a._vec0, b._vec1};
84
+ }
85
+
86
+ template <uint64_t mask>
87
+ static std::enable_if_t<blendChoiceComplex(mask) == 4, Vectorized<ComplexFlt>>
88
+ C10_ALWAYS_INLINE
89
+ blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) {
90
+ const vbool32 mask_1st = VsxComplexMask1(mask);
91
+ return {(vfloat32)vec_sel(a._vec0, b._vec0, mask_1st), a._vec1};
92
+ }
93
+
94
+ template <uint64_t mask>
95
+ static std::enable_if_t<blendChoiceComplex(mask) == 5, Vectorized<ComplexFlt>>
96
+ C10_ALWAYS_INLINE
97
+ blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) {
98
+ const vbool32 mask_1st = VsxComplexMask1(mask);
99
+ return {(vfloat32)vec_sel(a._vec0, b._vec0, mask_1st), b._vec1};
100
+ }
101
+
102
+ template <uint64_t mask>
103
+ static std::enable_if_t<blendChoiceComplex(mask) == 6, Vectorized<ComplexFlt>>
104
+ C10_ALWAYS_INLINE
105
+ blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) {
106
+ const vbool32 mask_2nd = VsxComplexMask2(mask);
107
+ // generated masks
108
+ return {a._vec0, (vfloat32)vec_sel(a._vec1, b._vec1, mask_2nd)};
109
+ }
110
+
111
+ template <uint64_t mask>
112
+ static std::enable_if_t<blendChoiceComplex(mask) == 7, Vectorized<ComplexFlt>>
113
+ C10_ALWAYS_INLINE
114
+ blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) {
115
+ const vbool32 mask_2nd = VsxComplexMask2(mask);
116
+ // generated masks
117
+ return {b._vec0, (vfloat32)vec_sel(a._vec1, b._vec1, mask_2nd)};
118
+ }
119
+
120
+ template <uint64_t mask>
121
+ static std::enable_if_t<blendChoiceComplex(mask) == 8, Vectorized<ComplexFlt>>
122
+ C10_ALWAYS_INLINE
123
+ blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) {
124
+ const vbool32 mask_1st = VsxComplexMask1(mask);
125
+ const vbool32 mask_2nd = VsxComplexMask2(mask);
126
+ return {
127
+ (vfloat32)vec_sel(a._vec0, b._vec0, mask_1st),
128
+ (vfloat32)vec_sel(a._vec1, b._vec1, mask_2nd)};
129
+ }
130
+
131
+ template <int64_t mask>
132
+ static Vectorized<ComplexFlt> C10_ALWAYS_INLINE
133
+ el_blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) {
134
+ const vbool32 mask_1st = VsxMask1(mask);
135
+ const vbool32 mask_2nd = VsxMask2(mask);
136
+ return {
137
+ (vfloat32)vec_sel(a._vec0, b._vec0, mask_1st),
138
+ (vfloat32)vec_sel(a._vec1, b._vec1, mask_2nd)};
139
+ }
140
+
141
+ static Vectorized<ComplexFlt> blendv(
142
+ const Vectorized<ComplexFlt>& a,
143
+ const Vectorized<ComplexFlt>& b,
144
+ const Vectorized<ComplexFlt>& mask) {
145
+ // convert std::complex<V> index mask to V index mask: xy -> xxyy
146
+ auto mask_complex = Vectorized<ComplexFlt>(
147
+ vec_mergeh(mask._vec0, mask._vec0), vec_mergeh(mask._vec1, mask._vec1));
148
+ return {
149
+ vec_sel(a._vec0, b._vec0, reinterpret_cast<vbool32>(mask_complex._vec0)),
150
+ vec_sel(a._vec1, b._vec1, reinterpret_cast<vbool32>(mask_complex._vec1)),
151
+ };
152
+ }
153
+
154
+ static Vectorized<ComplexFlt> elwise_blendv(
155
+ const Vectorized<ComplexFlt>& a,
156
+ const Vectorized<ComplexFlt>& b,
157
+ const Vectorized<ComplexFlt>& mask) {
158
+ return {
159
+ vec_sel(a._vec0, b._vec0, reinterpret_cast<vbool32>(mask._vec0)),
160
+ vec_sel(a._vec1, b._vec1, reinterpret_cast<vbool32>(mask._vec1)),
161
+ };
162
+ }
163
+
164
+ template <typename step_t>
165
+ static Vectorized<ComplexFlt> arange(
166
+ ComplexFlt base = 0.,
167
+ step_t step = static_cast<step_t>(1)) {
168
+ return Vectorized<ComplexFlt>(
169
+ base,
170
+ base + step,
171
+ base + ComplexFlt(2) * step,
172
+ base + ComplexFlt(3) * step);
173
+ }
174
+ static Vectorized<ComplexFlt> set(
175
+ const Vectorized<ComplexFlt>& a,
176
+ const Vectorized<ComplexFlt>& b,
177
+ int64_t count = size()) {
178
+ switch (count) {
179
+ case 0:
180
+ return a;
181
+ case 1:
182
+ return blend<1>(a, b);
183
+ case 2:
184
+ return blend<3>(a, b);
185
+ case 3:
186
+ return blend<7>(a, b);
187
+ }
188
+ return b;
189
+ }
190
+
191
+ static Vectorized<value_type> C10_ALWAYS_INLINE
192
+ loadu(const void* ptr, int count = size()) {
193
+ if (count == size()) {
194
+ return {
195
+ vec_vsx_ld(offset0, reinterpret_cast<const float*>(ptr)),
196
+ vec_vsx_ld(offset16, reinterpret_cast<const float*>(ptr))};
197
+ }
198
+
199
+ __at_align__ value_type tmp_values[size()] = {};
200
+ std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type));
201
+
202
+ return {
203
+ vec_vsx_ld(offset0, reinterpret_cast<const float*>(tmp_values)),
204
+ vec_vsx_ld(offset16, reinterpret_cast<const float*>(tmp_values))};
205
+ }
206
+
207
+ void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const {
208
+ if (count == size()) {
209
+ vec_vsx_st(_vec0, offset0, reinterpret_cast<float*>(ptr));
210
+ vec_vsx_st(_vec1, offset16, reinterpret_cast<float*>(ptr));
211
+ } else if (count > 0) {
212
+ __at_align__ value_type tmp_values[size()];
213
+ vec_vsx_st(_vec0, offset0, reinterpret_cast<float*>(tmp_values));
214
+ vec_vsx_st(_vec1, offset16, reinterpret_cast<float*>(tmp_values));
215
+ std::memcpy(
216
+ ptr, tmp_values, std::min(count, size()) * sizeof(value_type));
217
+ }
218
+ }
219
+
220
+ const ComplexFlt& operator[](int idx) const = delete;
221
+ ComplexFlt& operator[](int idx) = delete;
222
+
223
+ Vectorized<ComplexFlt> map(ComplexFlt (*const f)(ComplexFlt)) const {
224
+ __at_align__ ComplexFlt tmp[size()];
225
+ store(tmp);
226
+ for (const auto i : c10::irange(size())) {
227
+ tmp[i] = f(tmp[i]);
228
+ }
229
+ return loadu(tmp);
230
+ }
231
+
232
+ Vectorized<ComplexFlt> map(ComplexFlt (*const f)(const ComplexFlt&)) const {
233
+ __at_align__ ComplexFlt tmp[size()];
234
+ store(tmp);
235
+ for (const auto i : c10::irange(size())) {
236
+ tmp[i] = f(tmp[i]);
237
+ }
238
+ return loadu(tmp);
239
+ }
240
+
241
+ static Vectorized<ComplexFlt> horizontal_add(
242
+ Vectorized<ComplexFlt>& first,
243
+ Vectorized<ComplexFlt>& second) {
244
+ // Operates on individual floats, see _mm_hadd_ps
245
+ // {f0+f1, s0+s1, f2+f3, s2+s3, ...}
246
+ // i.e. it sums the re and im of each value and interleaves first and second:
247
+ // {f_re0 + f_im0, s_re0 + s_im0, f_re1 + f_im1, s_re1 + s_im1, ...}
248
+ return el_mergee(first, second) + el_mergeo(first, second);
249
+ }
250
+
251
+ static Vectorized<ComplexFlt> horizontal_sub_permD8(
252
+ Vectorized<ComplexFlt>& first,
253
+ Vectorized<ComplexFlt>& second) {
254
+ // we will simulate it differently with 6 instructions total
255
+ // lets permute second so that we can add it getting horizontal sums
256
+ auto first_perm = first.el_swapped(); // 2perm
257
+ auto second_perm = second.el_swapped(); // 2perm
258
+ // sum
259
+ auto first_ret = first - first_perm; // 2sub
260
+ auto second_ret = second - second_perm; // 2 sub
261
+ // now lets choose evens
262
+ return el_mergee(first_ret, second_ret); // 2 mergee's
263
+ }
264
+
265
+ Vectorized<ComplexFlt> abs_2_() const {
266
+ auto a = (*this).elwise_mult(*this);
267
+ auto permuted = a.el_swapped();
268
+ a = a + permuted;
269
+ return a.el_mergee();
270
+ }
271
+
272
+ Vectorized<ComplexFlt> abs_() const {
273
+ auto vi = el_mergeo();
274
+ auto vr = el_mergee();
275
+ return {Sleef_hypotf4_u05vsx(vr._vec0, vi._vec0), Sleef_hypotf4_u05vsx(vr._vec1, vi._vec1)};
276
+ }
277
+
278
+ Vectorized<ComplexFlt> abs() const {
279
+ return abs_() & real_mask;
280
+ }
281
+
282
+ Vectorized<ComplexFlt> real_() const {
283
+ return *this & real_mask;
284
+ }
285
+ Vectorized<ComplexFlt> real() const {
286
+ return *this & real_mask;
287
+ }
288
+ Vectorized<ComplexFlt> imag_() const {
289
+ return *this & imag_mask;
290
+ }
291
+ Vectorized<ComplexFlt> imag() const {
292
+ // we can use swap_mask or sldwi
293
+ auto ret = imag_();
294
+ return {
295
+ vec_sldw(ret._vec0, ret._vec0, 3), vec_sldw(ret._vec1, ret._vec1, 3)};
296
+ }
297
+
298
+ Vectorized<ComplexFlt> conj_() const {
299
+ return *this ^ isign_mask;
300
+ }
301
+ Vectorized<ComplexFlt> conj() const {
302
+ return *this ^ isign_mask;
303
+ }
304
+
305
+ Vectorized<ComplexFlt> log() const {
306
+ // Most trigonomic ops use the log() op to improve complex number
307
+ // performance.
308
+ return map(std::log);
309
+ }
310
+
311
+ Vectorized<ComplexFlt> log2() const {
312
+ // log2eB_inv
313
+ auto ret = log();
314
+ return ret.elwise_mult(log2e_inv);
315
+ }
316
+ Vectorized<ComplexFlt> log10() const {
317
+ auto ret = log();
318
+ return ret.elwise_mult(log10e_inv);
319
+ }
320
+
321
+ Vectorized<ComplexFlt> log1p() const {
322
+ return map(std::log1p);
323
+ }
324
+
325
+ Vectorized<ComplexFlt> el_swapped() const {
326
+ vfloat32 v0 = vec_perm(_vec0, _vec0, swap_mask);
327
+ vfloat32 v1 = vec_perm(_vec1, _vec1, swap_mask);
328
+ return {v0, v1};
329
+ }
330
+
331
+ Vectorized<ComplexFlt> el_mergee() const {
332
+ // as mergee phased in , we can use vec_perm with mask
333
+ return {vec_mergee(_vecb0, _vecb0), vec_mergee(_vecb1, _vecb1)};
334
+ }
335
+
336
+ Vectorized<ComplexFlt> el_mergeo() const {
337
+ // as mergeo phased in , we can use vec_perm with mask
338
+ return {vec_mergeo(_vecb0, _vecb0), vec_mergeo(_vecb1, _vecb1)};
339
+ }
340
+
341
+ Vectorized<ComplexFlt> el_madd(
342
+ const Vectorized<ComplexFlt>& multiplier,
343
+ const Vectorized<ComplexFlt>& val) const {
344
+ return {
345
+ vec_madd(_vec0, multiplier._vec0, val._vec0),
346
+ vec_madd(_vec1, multiplier._vec1, val._vec1)};
347
+ }
348
+
349
+ static Vectorized<ComplexFlt> el_mergee(
350
+ Vectorized<ComplexFlt>& first,
351
+ Vectorized<ComplexFlt>& second) {
352
+ return {
353
+ vec_mergee(first._vecb0, second._vecb0),
354
+ vec_mergee(first._vecb1, second._vecb1)};
355
+ }
356
+
357
+ static Vectorized<ComplexFlt> el_mergeo(
358
+ Vectorized<ComplexFlt>& first,
359
+ Vectorized<ComplexFlt>& second) {
360
+ return {
361
+ vec_mergeo(first._vecb0, second._vecb0),
362
+ vec_mergeo(first._vecb1, second._vecb1)};
363
+ }
364
+
365
+ Vectorized<ComplexFlt> angle_() const {
366
+ // angle = atan2(b/a)
367
+ // auto b_a = _mm256_permute_ps(values, 0xB1); // b a
368
+ // return Sleef_atan2f8_u10(values, b_a); // 90-angle angle
369
+ Vectorized<ComplexFlt> ret;
370
+ for (int i = 0; i < 4; i += 2) {
371
+ ret._vec0[i] = std::atan2(_vec0[i + 1], _vec0[i]);
372
+ ret._vec1[i] = std::atan2(_vec1[i + 1], _vec1[i]);
373
+ }
374
+ return ret;
375
+ }
376
+
377
+ Vectorized<ComplexFlt> angle() const {
378
+ return angle_() & real_mask;
379
+ }
380
+
381
+ Vectorized<ComplexFlt> sin() const {
382
+ return map(std::sin);
383
+ }
384
+ Vectorized<ComplexFlt> sinh() const {
385
+ return map(std::sinh);
386
+ }
387
+ Vectorized<ComplexFlt> cos() const {
388
+ return map(std::cos);
389
+ }
390
+ Vectorized<ComplexFlt> cosh() const {
391
+ return map(std::cosh);
392
+ }
393
+ Vectorized<ComplexFlt> ceil() const {
394
+ return {vec_ceil(_vec0), vec_ceil(_vec1)};
395
+ }
396
+ Vectorized<ComplexFlt> floor() const {
397
+ return {vec_floor(_vec0), vec_floor(_vec1)};
398
+ }
399
+ Vectorized<ComplexFlt> neg() const {
400
+ auto z = Vectorized<ComplexFlt>(zero);
401
+ return z - *this;
402
+ }
403
+ Vectorized<ComplexFlt> round() const {
404
+ return {vec_round(_vec0), vec_round(_vec1)};
405
+ }
406
+ Vectorized<ComplexFlt> tan() const {
407
+ return map(std::tan);
408
+ }
409
+ Vectorized<ComplexFlt> tanh() const {
410
+ return map(std::tanh);
411
+ }
412
+ Vectorized<ComplexFlt> trunc() const {
413
+ return {vec_trunc(_vec0), vec_trunc(_vec1)};
414
+ }
415
+
416
+ Vectorized<ComplexFlt> elwise_sqrt() const {
417
+ return {vec_sqrt(_vec0), vec_sqrt(_vec1)};
418
+ }
419
+
420
+ Vectorized<ComplexFlt> sqrt() const {
421
+ return map(std::sqrt);
422
+ }
423
+
424
+ Vectorized<ComplexFlt> reciprocal() const {
425
+ // re + im*i = (a + bi) / (c + di)
426
+ // re = (ac + bd)/abs_2() = c/abs_2()
427
+ // im = (bc - ad)/abs_2() = d/abs_2()
428
+ auto c_d = *this ^ isign_mask; // c -d
429
+ auto abs = abs_2_();
430
+ return c_d.elwise_div(abs);
431
+ }
432
+
433
+ Vectorized<ComplexFlt> rsqrt() const {
434
+ return sqrt().reciprocal();
435
+ }
436
+
437
+ Vectorized<ComplexFlt> pow(const Vectorized<ComplexFlt>& exp) const {
438
+ __at_align__ ComplexFlt x_tmp[size()];
439
+ __at_align__ ComplexFlt y_tmp[size()];
440
+ store(x_tmp);
441
+ exp.store(y_tmp);
442
+ for (const auto i : c10::irange(size())) {
443
+ x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]);
444
+ }
445
+ return loadu(x_tmp);
446
+ }
447
+
448
+ Vectorized<ComplexFlt> atan() const {
449
+ // atan(x) = i/2 * ln((i + z)/(i - z))
450
+ auto ione = Vectorized(imag_one);
451
+ auto sum = ione + *this;
452
+ auto sub = ione - *this;
453
+ auto ln = (sum / sub).log(); // ln((i + z)/(i - z))
454
+ return ln * imag_half; // i/2*ln()
455
+ }
456
+ Vectorized<ComplexFlt> atanh() const {
457
+ return map(std::atanh);
458
+ }
459
+
460
+ Vectorized<ComplexFlt> acos() const {
461
+ // acos(x) = pi/2 - asin(x)
462
+ return Vectorized(pi_2) - asin();
463
+ }
464
+
465
+ Vectorized<ComplexFlt> inline operator*(const Vectorized<ComplexFlt>& b) const {
466
+ //(a + bi) * (c + di) = (ac - bd) + (ad + bc)i
467
+
468
+ #if 1
469
+ // this is more vsx friendly than simulating horizontal from x86
470
+
471
+ auto vi = b.el_mergeo();
472
+ auto vr = b.el_mergee();
473
+ vi = vi ^ rsign_mask;
474
+ auto ret = elwise_mult(vr);
475
+ auto vx_swapped = el_swapped();
476
+ ret = vx_swapped.el_madd(vi, ret);
477
+ return ret;
478
+
479
+ #else
480
+
481
+ auto ac_bd = elwise_mult(b);
482
+ auto d_c = b.el_swapped();
483
+ d_c = d_c ^ isign_mask;
484
+ auto ad_bc = elwise_mult(d_c);
485
+ auto ret = horizontal_sub_permD8(ac_bd, ad_bc);
486
+ return ret;
487
+ #endif
488
+ }
489
+
490
+ Vectorized<ComplexFlt> inline operator/(const Vectorized<ComplexFlt>& b) const {
491
+ // re + im*i = (a + bi) / (c + di)
492
+ // re = (ac + bd)/abs_2()
493
+ // im = (bc - ad)/abs_2()
494
+ auto fabs_cd = Vectorized{
495
+ vec_andc(b._vec0, sign_mask),
496
+ vec_andc(b._vec1, sign_mask)}; // |c| |d|
497
+ auto fabs_dc = fabs_cd.el_swapped(); // |d| |c|
498
+ auto scale = fabs_cd.elwise_max(fabs_dc); // sc = max(|c|, |d|)
499
+ auto a2 = elwise_div(scale); // a/sc b/sc
500
+ auto b2 = b.elwise_div(scale); // c/sc d/sc
501
+ auto acbd2 = a2.elwise_mult(b2); // ac/sc^2 bd/sc^2
502
+ auto dc2 = b2.el_swapped(); // d/sc c/sc
503
+ dc2 = dc2 ^ rsign_mask; // -d/sc c/sc
504
+ auto adbc2 = a2.elwise_mult(dc2); // -ad/sc^2 bc/sc^2
505
+ auto ret = horizontal_add(acbd2, adbc2); // (ac+bd)/sc^2 (bc-ad)/sc^2
506
+ auto denom2 = b2.abs_2_(); // (c^2+d^2)/sc^2 (c^2+d^2)/sc^2
507
+ ret = ret.elwise_div(denom2);
508
+ return ret;
509
+ }
510
+
511
+ Vectorized<ComplexFlt> asin() const {
512
+ // asin(x)
513
+ // = -i*ln(iz + sqrt(1 -z^2))
514
+ // = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi)))
515
+ // = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi))
516
+
517
+ #if 1
518
+ auto conj = conj_();
519
+ auto b_a = conj.el_swapped();
520
+ auto ab = conj.elwise_mult(b_a);
521
+ auto im = ab + ab;
522
+ auto val_2 = (*this).elwise_mult(*this);
523
+ auto val_2_swapped = val_2.el_swapped();
524
+ auto re = horizontal_sub_permD8(val_2, val_2_swapped);
525
+ re = Vectorized<ComplexFlt>(one) - re;
526
+ auto root = el_blend<0xAA>(re, im).sqrt();
527
+ auto ln = (b_a + root).log();
528
+ return ln.el_swapped().conj();
529
+ #else
530
+ return map(std::asin);
531
+ #endif
532
+ }
533
+
534
+ Vectorized<ComplexFlt> exp() const {
535
+ return map(std::exp);
536
+ }
537
+ Vectorized<ComplexFlt> exp2() const {
538
+ return map(exp2_impl);
539
+ }
540
+ Vectorized<ComplexFlt> expm1() const {
541
+ return map(std::expm1);
542
+ }
543
+
544
+ Vectorized<ComplexFlt> eq(const Vectorized<ComplexFlt>& other) const {
545
+ auto eq = (*this == other); // compares real and imag individually
546
+ // If both real numbers and imag numbers are equal, then the complex numbers are equal
547
+ return (eq.real() & eq.imag()) & one;
548
+ }
549
+ Vectorized<ComplexFlt> ne(const Vectorized<ComplexFlt>& other) const {
550
+ auto ne = (*this != other); // compares real and imag individually
551
+ // If either real numbers or imag numbers are not equal, then the complex numbers are not equal
552
+ return (ne.real() | ne.imag()) & one;
553
+ }
554
+
555
+ Vectorized<ComplexFlt> sgn() const {
556
+ return map(at::native::sgn_impl);
557
+ }
558
+
559
+ Vectorized<ComplexFlt> operator<(const Vectorized<ComplexFlt>& other) const {
560
+ TORCH_CHECK(false, "not supported for complex numbers");
561
+ }
562
+
563
+ Vectorized<ComplexFlt> operator<=(const Vectorized<ComplexFlt>& other) const {
564
+ TORCH_CHECK(false, "not supported for complex numbers");
565
+ }
566
+
567
+ Vectorized<ComplexFlt> operator>(const Vectorized<ComplexFlt>& other) const {
568
+ TORCH_CHECK(false, "not supported for complex numbers");
569
+ }
570
+
571
+ Vectorized<ComplexFlt> operator>=(const Vectorized<ComplexFlt>& other) const {
572
+ TORCH_CHECK(false, "not supported for complex numbers");
573
+ }
574
+
575
+ DEFINE_MEMBER_OP(operator==, ComplexFlt, vec_cmpeq)
576
+ DEFINE_MEMBER_OP(operator!=, ComplexFlt, vec_cmpne)
577
+
578
+ DEFINE_MEMBER_OP(operator+, ComplexFlt, vec_add)
579
+ DEFINE_MEMBER_OP(operator-, ComplexFlt, vec_sub)
580
+ DEFINE_MEMBER_OP(operator&, ComplexFlt, vec_and)
581
+ DEFINE_MEMBER_OP(operator|, ComplexFlt, vec_or)
582
+ DEFINE_MEMBER_OP(operator^, ComplexFlt, vec_xor)
583
+ // elementwise helpers
584
+ DEFINE_MEMBER_OP(elwise_mult, ComplexFlt, vec_mul)
585
+ DEFINE_MEMBER_OP(elwise_div, ComplexFlt, vec_div)
586
+ DEFINE_MEMBER_OP(elwise_gt, ComplexFlt, vec_cmpgt)
587
+ DEFINE_MEMBER_OP(elwise_ge, ComplexFlt, vec_cmpge)
588
+ DEFINE_MEMBER_OP(elwise_lt, ComplexFlt, vec_cmplt)
589
+ DEFINE_MEMBER_OP(elwise_le, ComplexFlt, vec_cmple)
590
+ DEFINE_MEMBER_OP(elwise_max, ComplexFlt, vec_max)
591
+ };
592
+
593
+ template <>
594
+ Vectorized<ComplexFlt> inline maximum(
595
+ const Vectorized<ComplexFlt>& a,
596
+ const Vectorized<ComplexFlt>& b) {
597
+ auto abs_a = a.abs_2_();
598
+ auto abs_b = b.abs_2_();
599
+ // auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_LT_OQ);
600
+ // auto max = _mm256_blendv_ps(a, b, mask);
601
+ auto mask = abs_a.elwise_lt(abs_b);
602
+ auto max = Vectorized<ComplexFlt>::elwise_blendv(a, b, mask);
603
+
604
+ return max;
605
+ // Exploit the fact that all-ones is a NaN.
606
+ // auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q);
607
+ // return _mm256_or_ps(max, isnan);
608
+ }
609
+
610
+ template <>
611
+ Vectorized<ComplexFlt> inline minimum(
612
+ const Vectorized<ComplexFlt>& a,
613
+ const Vectorized<ComplexFlt>& b) {
614
+ auto abs_a = a.abs_2_();
615
+ auto abs_b = b.abs_2_();
616
+ // auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_GT_OQ);
617
+ // auto min = _mm256_blendv_ps(a, b, mask);
618
+ auto mask = abs_a.elwise_gt(abs_b);
619
+ auto min = Vectorized<ComplexFlt>::elwise_blendv(a, b, mask);
620
+ return min;
621
+ // Exploit the fact that all-ones is a NaN.
622
+ // auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q);
623
+ // return _mm256_or_ps(min, isnan);
624
+ }
625
+
626
+ } // namespace
627
+ } // namespace vec
628
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_double_vsx.h ADDED
@@ -0,0 +1,438 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cpu/vec/intrinsics.h>
4
+ #include <ATen/cpu/vec/vec_base.h>
5
+ #include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
6
+ #include <c10/util/irange.h>
7
+
8
+ #include <sleef.h>
9
+
10
+ namespace at {
11
+ namespace vec {
12
+
13
+ inline namespace CPU_CAPABILITY {
14
+
15
+
16
+ template <>
17
+ class Vectorized<double> {
18
+ private:
19
+ union {
20
+ struct {
21
+ vfloat64 _vec0;
22
+ vfloat64 _vec1;
23
+ };
24
+ struct {
25
+ vbool64 _vecb0;
26
+ vbool64 _vecb1;
27
+ };
28
+
29
+ } __attribute__((__may_alias__));
30
+
31
+ public:
32
+ using value_type = double;
33
+ using vec_internal_type = vfloat64;
34
+ using vec_internal_mask_type = vbool64;
35
+ using size_type = int;
36
+ static constexpr size_type size() {
37
+ return 4;
38
+ }
39
+ Vectorized() {}
40
+ C10_ALWAYS_INLINE Vectorized(vfloat64 v) : _vec0{v}, _vec1{v} {}
41
+ C10_ALWAYS_INLINE Vectorized(vbool64 vmask) : _vecb0{vmask}, _vecb1{vmask} {}
42
+ C10_ALWAYS_INLINE Vectorized(vfloat64 v1, vfloat64 v2) : _vec0{v1}, _vec1{v2} {}
43
+ C10_ALWAYS_INLINE Vectorized(vbool64 v1, vbool64 v2) : _vecb0{v1}, _vecb1{v2} {}
44
+ C10_ALWAYS_INLINE Vectorized(double scalar)
45
+ : _vec0{vec_splats(scalar)}, _vec1{vec_splats(scalar)} {}
46
+ C10_ALWAYS_INLINE Vectorized(
47
+ double scalar1,
48
+ double scalar2,
49
+ double scalar3,
50
+ double scalar4)
51
+ : _vec0{vfloat64{scalar1, scalar2}}, _vec1{vfloat64{scalar3, scalar4}} {}
52
+ C10_ALWAYS_INLINE const vec_internal_type& vec0() const {
53
+ return _vec0;
54
+ }
55
+ C10_ALWAYS_INLINE const vec_internal_type& vec1() const {
56
+ return _vec1;
57
+ }
58
+
59
+ int zero_mask() const {
60
+ auto cmp = (*this == vd_zero);
61
+ return (cmp._vecb0[0] & 1) | (cmp._vecb0[1] & 2) | (cmp._vecb1[0] & 4) |
62
+ (cmp._vecb1[1] & 8);
63
+ }
64
+
65
+ template <int64_t mask>
66
+ static std::enable_if_t<blendChoiceDbl(mask) == 0, Vectorized<double>> C10_ALWAYS_INLINE
67
+ blend(const Vectorized<double>& a, const Vectorized<double>& b) {
68
+ return a;
69
+ }
70
+
71
+ template <int64_t mask>
72
+ static std::enable_if_t<blendChoiceDbl(mask) == 1, Vectorized<double>> C10_ALWAYS_INLINE
73
+ blend(const Vectorized<double>& a, const Vectorized<double>& b) {
74
+ return b;
75
+ }
76
+
77
+ template <int64_t mask>
78
+ static std::enable_if_t<blendChoiceDbl(mask) == 2, Vectorized<double>> C10_ALWAYS_INLINE
79
+ blend(const Vectorized<double>& a, const Vectorized<double>& b) {
80
+ return { b._vec0, a._vec1 };
81
+ }
82
+
83
+ template <int64_t mask>
84
+ static std::enable_if_t<blendChoiceDbl(mask) == 3, Vectorized<double>> C10_ALWAYS_INLINE
85
+ blend(const Vectorized<double>& a, const Vectorized<double>& b) {
86
+ return { a._vec0, b._vec1 };
87
+ }
88
+
89
+
90
+ template <int64_t mask>
91
+ static std::enable_if_t<blendChoiceDbl(mask) == 4, Vectorized<double>> C10_ALWAYS_INLINE
92
+ blend(const Vectorized<double>& a, const Vectorized<double>& b) {
93
+ const vbool64 mask_1st = VsxDblMask1(mask);
94
+ return { (vfloat64)vec_sel(a._vec0, b._vec0, mask_1st), a._vec1 };
95
+ }
96
+
97
+ template <int64_t mask>
98
+ static std::enable_if_t<blendChoiceDbl(mask) == 5, Vectorized<double>> C10_ALWAYS_INLINE
99
+ blend(const Vectorized<double>& a, const Vectorized<double>& b) {
100
+ const vbool64 mask_1st = VsxDblMask1(mask);
101
+ return { (vfloat64)vec_sel(a._vec0, b._vec0, mask_1st), b._vec1 };
102
+ }
103
+
104
+
105
+ template <int64_t mask>
106
+ static std::enable_if_t<blendChoiceDbl(mask) == 6,
107
+ Vectorized<double>>
108
+ C10_ALWAYS_INLINE blend(const Vectorized<double>& a, const Vectorized<double>& b) {
109
+ const vbool64 mask_2nd = VsxDblMask2(mask);
110
+ // generated masks
111
+ return { a._vec0,
112
+ (vfloat64)vec_sel(a._vec1, b._vec1, mask_2nd) };
113
+ }
114
+
115
+ template <int64_t mask>
116
+ static std::enable_if_t<blendChoiceDbl(mask) == 7,
117
+ Vectorized<double>>
118
+ C10_ALWAYS_INLINE blend(const Vectorized<double>& a, const Vectorized<double>& b) {
119
+ const vbool64 mask_2nd = VsxDblMask2(mask);
120
+ // generated masks
121
+ return { b._vec0,
122
+ (vfloat64)vec_sel(a._vec1, b._vec1, mask_2nd) };
123
+ }
124
+
125
+ template <int64_t mask>
126
+ static std::enable_if_t<blendChoiceDbl(mask) == 8, Vectorized<double>>
127
+ C10_ALWAYS_INLINE blend(const Vectorized<double>& a, const Vectorized<double>& b) {
128
+ const vbool64 mask_1st = VsxDblMask1(mask);
129
+ const vbool64 mask_2nd = VsxDblMask2(mask);
130
+ return {
131
+ (vfloat64)vec_sel(a._vec0, b._vec0, mask_1st),
132
+ (vfloat64)vec_sel(a._vec1, b._vec1, mask_2nd) };
133
+ }
134
+
135
+
136
+ static Vectorized<double> C10_ALWAYS_INLINE blendv(
137
+ const Vectorized<double>& a,
138
+ const Vectorized<double>& b,
139
+ const Vectorized<double>& mask) {
140
+ // the mask used here returned by comparision of vec256
141
+
142
+ return {
143
+ vec_sel(a._vec0, b._vec0, mask._vecb0),
144
+ vec_sel(a._vec1, b._vec1, mask._vecb1)};
145
+ }
146
+ template <typename step_t>
147
+ static Vectorized<double> arange(double base = 0., step_t step = static_cast<step_t>(1)) {
148
+ return Vectorized<double>(base, base + step, base + 2 * step, base + 3 * step);
149
+ }
150
+
151
+ static Vectorized<double> C10_ALWAYS_INLINE
152
+ set(const Vectorized<double>& a, const Vectorized<double>& b, size_t count = size()) {
153
+ switch (count) {
154
+ case 0:
155
+ return a;
156
+ case 1:
157
+ return blend<1>(a, b);
158
+ case 2:
159
+ return blend<3>(a, b);
160
+ case 3:
161
+ return blend<7>(a, b);
162
+ }
163
+
164
+ return b;
165
+ }
166
+ static Vectorized<value_type> C10_ALWAYS_INLINE
167
+ loadu(const void* ptr, int count = size()) {
168
+ if (count == size()) {
169
+ return {
170
+ vec_vsx_ld(offset0, reinterpret_cast<const value_type*>(ptr)),
171
+ vec_vsx_ld(offset16, reinterpret_cast<const value_type*>(ptr))};
172
+ }
173
+
174
+ __at_align__ value_type tmp_values[size()] = {};
175
+ std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type));
176
+
177
+ return {vec_vsx_ld(offset0, tmp_values), vec_vsx_ld(offset16, tmp_values)};
178
+ }
179
+ void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const {
180
+ if (count == size()) {
181
+ vec_vsx_st(_vec0, offset0, reinterpret_cast<value_type*>(ptr));
182
+ vec_vsx_st(_vec1, offset16, reinterpret_cast<value_type*>(ptr));
183
+ } else if (count > 0) {
184
+ __at_align__ value_type tmp_values[size()];
185
+ vec_vsx_st(_vec0, offset0, tmp_values);
186
+ vec_vsx_st(_vec1, offset16, tmp_values);
187
+ std::memcpy(
188
+ ptr, tmp_values, std::min(count, size()) * sizeof(value_type));
189
+ }
190
+ }
191
+ const double& operator[](int idx) const = delete;
192
+ double& operator[](int idx) = delete;
193
+ Vectorized<double> map(double (*const f)(double)) const {
194
+ Vectorized<double> ret;
195
+ for (const auto i : c10::irange(size()/2)) {
196
+ ret._vec0[i] = f(_vec0[i]);
197
+ }
198
+ for (const auto i : c10::irange(size()/2)) {
199
+ ret._vec1[i] = f(_vec1[i]);
200
+ }
201
+ return ret;
202
+ }
203
+
204
+ Vectorized<double> mapbi(double (*const f)(double, double), const Vectorized<double>& other)
205
+ const {
206
+ Vectorized<double> ret;
207
+ for (const auto i : c10::irange(size()/2)) {
208
+ ret._vec0[i] = f(_vec0[i], other._vec0[i]);
209
+ }
210
+ for (const auto i : c10::irange(size()/2)) {
211
+ ret._vec1[i] = f(_vec1[i], other._vec1[i]);
212
+ }
213
+ return ret;
214
+ }
215
+ Vectorized<double> C10_ALWAYS_INLINE abs() const {
216
+ return {vec_abs(_vec0), vec_abs(_vec1)};
217
+ }
218
+
219
+ Vectorized<double> C10_ALWAYS_INLINE acos() const {
220
+ return {Sleef_acosd2_u10(_vec0), Sleef_acosd2_u10(_vec1)};
221
+ }
222
+ Vectorized<double> C10_ALWAYS_INLINE asin() const {
223
+ return {Sleef_asind2_u10(_vec0), Sleef_asind2_u10(_vec1)};
224
+ }
225
+ Vectorized<double> atan() const {
226
+ return {Sleef_atand2_u10(_vec0), Sleef_atand2_u10(_vec1)};
227
+ }
228
+ Vectorized<double> atanh() const {
229
+ return {Sleef_atanhd2_u10(_vec0), Sleef_atanhd2_u10(_vec1)};
230
+ }
231
+ Vectorized<double> atan2(const Vectorized<double>& b) const {
232
+ return {Sleef_atan2d2_u10(_vec0, b._vec0), Sleef_atan2d2_u10(_vec1, b._vec1)};
233
+ }
234
+ Vectorized<double> copysign(const Vectorized<double> &sign) const {
235
+ return {Sleef_copysignd2(_vec0, sign._vec0), Sleef_copysignd2(_vec1, sign._vec1)};
236
+ }
237
+ Vectorized<double> erf() const {
238
+ return {Sleef_erfd2_u10(_vec0), Sleef_erfd2_u10(_vec1)};
239
+ }
240
+ Vectorized<double> erfc() const {
241
+ return {Sleef_erfcd2_u15(_vec0), Sleef_erfcd2_u15(_vec1)};
242
+ }
243
+ Vectorized<double> C10_ALWAYS_INLINE exp() const {
244
+ return {Sleef_expd2_u10(_vec0), Sleef_expd2_u10(_vec1)};
245
+ }
246
+ Vectorized<double> C10_ALWAYS_INLINE exp2() const {
247
+ return {Sleef_exp2d2_u10(_vec0), Sleef_exp2d2_u10(_vec1)};
248
+ }
249
+ Vectorized<double> expm1() const {
250
+ return {Sleef_expm1d2_u10(_vec0), Sleef_expm1d2_u10(_vec1)};
251
+ }
252
+ Vectorized<double> C10_ALWAYS_INLINE exp_u20() const {
253
+ return exp();
254
+ }
255
+
256
+ Vectorized<double> lgamma() const __ubsan_ignore_undefined__ {
257
+ return {Sleef_lgammad2_u10(_vec0), Sleef_lgammad2_u10(_vec1)};
258
+ }
259
+
260
+ Vectorized<double> erfinv() const {
261
+ return map(calc_erfinv);
262
+ }
263
+
264
+ Vectorized<double> angle() const {
265
+ auto tmp = blendv(
266
+ Vectorized<double>(0), Vectorized<double>(c10::pi<double>), *this < Vectorized<double>(0));
267
+ return blendv(tmp, *this, isnan());
268
+ }
269
+ Vectorized<double> real() const {
270
+ return *this;
271
+ }
272
+ Vectorized<double> imag() const {
273
+ return Vectorized<double>{0};
274
+ }
275
+ Vectorized<double> conj() const {
276
+ return *this;
277
+ }
278
+
279
+ Vectorized<double> C10_ALWAYS_INLINE log() const {
280
+ return {Sleef_logd2_u10(_vec0), Sleef_logd2_u10(_vec1)};
281
+ }
282
+ Vectorized<double> C10_ALWAYS_INLINE log10() const {
283
+ return {Sleef_log10d2_u10(_vec0), Sleef_log10d2_u10(_vec1)};
284
+ }
285
+ Vectorized<double> C10_ALWAYS_INLINE log1p() const {
286
+ return {Sleef_log1pd2_u10(_vec0), Sleef_log1pd2_u10(_vec1)};
287
+ }
288
+ Vectorized<double> C10_ALWAYS_INLINE log2() const {
289
+ return {Sleef_log2d2_u10(_vec0), Sleef_log2d2_u10(_vec1)};
290
+ }
291
+ Vectorized<double> C10_ALWAYS_INLINE ceil() const {
292
+ return {vec_ceil(_vec0), vec_ceil(_vec1)};
293
+ }
294
+ Vectorized<double> C10_ALWAYS_INLINE cos() const {
295
+ return {Sleef_cosd2_u10(_vec0), Sleef_cosd2_u10(_vec1)};
296
+ }
297
+ Vectorized<double> C10_ALWAYS_INLINE cosh() const {
298
+ return {Sleef_coshd2_u10(_vec0), Sleef_coshd2_u10(_vec1)};
299
+ }
300
+ Vectorized<double> C10_ALWAYS_INLINE floor() const {
301
+ return {vec_floor(_vec0), vec_floor(_vec1)};
302
+ }
303
+ Vectorized<double> C10_ALWAYS_INLINE neg() const {
304
+ return {vec_neg(_vec0), vec_neg(_vec1)};
305
+ }
306
+ Vectorized<double> C10_ALWAYS_INLINE round() const {
307
+ return {vec_rint(_vec0), vec_rint(_vec1)};
308
+ }
309
+ Vectorized<double> C10_ALWAYS_INLINE sin() const {
310
+ return {Sleef_sind2_u10(_vec0), Sleef_sind2_u10(_vec1)};
311
+ }
312
+ Vectorized<double> C10_ALWAYS_INLINE sinh() const {
313
+ return {Sleef_sinhd2_u10(_vec0), Sleef_sinhd2_u10(_vec1)};
314
+ }
315
+ Vectorized<double> C10_ALWAYS_INLINE tan() const {
316
+ return {Sleef_tand2_u10(_vec0), Sleef_tand2_u10(_vec1)};
317
+ }
318
+ Vectorized<double> C10_ALWAYS_INLINE tanh() const {
319
+ return {Sleef_tanhd2_u10(_vec0), Sleef_tanhd2_u10(_vec1)};
320
+ }
321
+ Vectorized<double> C10_ALWAYS_INLINE trunc() const {
322
+ return {vec_trunc(_vec0), vec_trunc(_vec1)};
323
+ }
324
+
325
+ Vectorized<double> C10_ALWAYS_INLINE frac() const {
326
+ return *this - trunc();
327
+ }
328
+
329
+ Vectorized<double> C10_ALWAYS_INLINE sqrt() const {
330
+ return {vec_sqrt(_vec0), vec_sqrt(_vec1)};
331
+ }
332
+ Vectorized<double> C10_ALWAYS_INLINE reciprocal() const {
333
+ return {
334
+ vec_div(vd_one, _vec0), // vec_re(_vec0) is estimated one.
335
+ vec_div(vd_one, _vec1)};
336
+ }
337
+ Vectorized<double> C10_ALWAYS_INLINE rsqrt() const {
338
+ return sqrt().reciprocal();
339
+ }
340
+
341
+ Vectorized<double> C10_ALWAYS_INLINE pow(const Vectorized<double>& b) const {
342
+ return {Sleef_powd2_u10(_vec0, b._vec0), Sleef_powd2_u10(_vec1, b._vec1)};
343
+ }
344
+ Vectorized<double> C10_ALWAYS_INLINE fmod(const Vectorized<double>& b) const {
345
+ return {Sleef_fmodd2(_vec0, b._vec0),Sleef_fmodd2(_vec1, b._vec1)};
346
+ }
347
+
348
+ Vectorized<double> hypot(const Vectorized<double>& b) const {
349
+ return {Sleef_hypotd2_u05(_vec0, b._vec0), Sleef_hypotd2_u05(_vec1, b._vec1)};
350
+ }
351
+
352
+ Vectorized<double> nextafter(const Vectorized<double>& b) const {
353
+ return {Sleef_nextafterd2(_vec0, b._vec0), Sleef_nextafterd2(_vec1, b._vec1)};
354
+ }
355
+
356
+ Vectorized<double> igamma(const Vectorized<double>& x) const {
357
+ return mapbi(calc_igamma, x);
358
+ }
359
+
360
+ Vectorized<double> igammac(const Vectorized<double>& x) const {
361
+ return mapbi(calc_igammac, x);
362
+ }
363
+
364
+
365
+ Vectorized<double> i0() const {
366
+ return map(calc_i0);
367
+ }
368
+
369
+ Vectorized<double> i0e() const {
370
+ return map(calc_i0e);
371
+ }
372
+
373
+ Vectorized<double> digamma() const {
374
+ return map(calc_digamma);
375
+ }
376
+
377
+ Vectorized<double> _nor() const {
378
+ return {vec_nor(_vec0, _vec0), vec_nor(_vec1, _vec1)};
379
+ }
380
+
381
+ Vectorized<double> isnan() const {
382
+ auto x = *this;
383
+ auto ret = (x == x);
384
+ return ret._nor();
385
+ }
386
+ bool has_inf_nan() const {
387
+ for (const auto i : c10::irange(size()/2)) {
388
+ if(_isnan(_vec0[i]) || _isinf(_vec0[i])) {
389
+ return true;
390
+ }
391
+ }
392
+ for (const auto i : c10::irange(size()/2)) {
393
+ if(_isnan(_vec1[i]) || _isinf(_vec1[i])) {
394
+ return true;
395
+ }
396
+ }
397
+ return false;
398
+ }
399
+
400
+ DEFINE_MEMBER_OP(operator==, double, vec_cmpeq)
401
+ DEFINE_MEMBER_OP(operator!=, double, vec_cmpne)
402
+ DEFINE_MEMBER_OP(operator<, double, vec_cmplt)
403
+ DEFINE_MEMBER_OP(operator<=, double, vec_cmple)
404
+ DEFINE_MEMBER_OP(operator>, double, vec_cmpgt)
405
+ DEFINE_MEMBER_OP(operator>=, double, vec_cmpge)
406
+ DEFINE_MEMBER_OP_AND_ONE(eq, double, vec_cmpeq)
407
+ DEFINE_MEMBER_OP_AND_ONE(ne, double, vec_cmpne)
408
+ DEFINE_MEMBER_OP_AND_ONE(lt, double, vec_cmplt)
409
+ DEFINE_MEMBER_OP_AND_ONE(le, double, vec_cmple)
410
+ DEFINE_MEMBER_OP_AND_ONE(gt, double, vec_cmpgt)
411
+ DEFINE_MEMBER_OP_AND_ONE(ge, double, vec_cmpge)
412
+ DEFINE_MEMBER_OP(operator+, double, vec_add)
413
+ DEFINE_MEMBER_OP(operator-, double, vec_sub)
414
+ DEFINE_MEMBER_OP(operator*, double, vec_mul)
415
+ DEFINE_MEMBER_OP(operator/, double, vec_div)
416
+ DEFINE_MEMBER_OP(maximum, double, vec_max_nan2)
417
+ DEFINE_MEMBER_OP(minimum, double, vec_min_nan2)
418
+ DEFINE_MEMBER_OP(operator&, double, vec_and)
419
+ DEFINE_MEMBER_OP(operator|, double, vec_or)
420
+ DEFINE_MEMBER_OP(operator^, double, vec_xor)
421
+ DEFINE_MEMBER_TERNARY_OP(madd, double, vec_madd)
422
+ };
423
+ template <>
424
+ Vectorized<double> inline maximum(
425
+ const Vectorized<double>& a,
426
+ const Vectorized<double>& b) {
427
+ return a.maximum(b);
428
+ }
429
+
430
+ template <>
431
+ Vectorized<double> inline minimum(
432
+ const Vectorized<double>& a,
433
+ const Vectorized<double>& b) {
434
+ return a.minimum(b);
435
+ }
436
+ } // namespace
437
+ } // namespace vec
438
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_float_vsx.h ADDED
@@ -0,0 +1,461 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cpu/vec/intrinsics.h>
4
+ #include <ATen/cpu/vec/vec_base.h>
5
+ #include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
6
+ #include <sleef.h>
7
+ namespace at {
8
+ namespace vec {
9
+ // See Note [CPU_CAPABILITY namespace]
10
+
11
+ inline namespace CPU_CAPABILITY {
12
+
13
+ template <>
14
+ class Vectorized<float> {
15
+ private:
16
+ union {
17
+ struct {
18
+ vfloat32 _vec0;
19
+ vfloat32 _vec1;
20
+ };
21
+ struct {
22
+ vbool32 _vecb0;
23
+ vbool32 _vecb1;
24
+ };
25
+
26
+ } __attribute__((__may_alias__));
27
+
28
+ public:
29
+ using value_type = float;
30
+ using vec_internal_type = vfloat32;
31
+ using vec_internal_mask_type = vbool32;
32
+ using size_type = int;
33
+
34
+ static constexpr size_type size() {
35
+ return 8;
36
+ }
37
+ Vectorized() {}
38
+
39
+ C10_ALWAYS_INLINE Vectorized(vfloat32 v) : _vec0{v}, _vec1{v} {}
40
+ C10_ALWAYS_INLINE Vectorized(vbool32 vmask) : _vecb0{vmask}, _vecb1{vmask} {}
41
+ C10_ALWAYS_INLINE Vectorized(vfloat32 v1, vfloat32 v2) : _vec0{v1}, _vec1{v2} {}
42
+ C10_ALWAYS_INLINE Vectorized(vbool32 v1, vbool32 v2) : _vecb0{v1}, _vecb1{v2} {}
43
+ C10_ALWAYS_INLINE Vectorized(float scalar)
44
+ : _vec0{vec_splats(scalar)}, _vec1{vec_splats(scalar)} {}
45
+ C10_ALWAYS_INLINE Vectorized(
46
+ float scalar1,
47
+ float scalar2,
48
+ float scalar3,
49
+ float scalar4,
50
+ float scalar5,
51
+ float scalar6,
52
+ float scalar7,
53
+ float scalar8)
54
+ : _vec0{vfloat32{scalar1, scalar2, scalar3, scalar4}},
55
+ _vec1{vfloat32{scalar5, scalar6, scalar7, scalar8}} {}
56
+ C10_ALWAYS_INLINE const vec_internal_type& vec0() const {
57
+ return _vec0;
58
+ }
59
+ C10_ALWAYS_INLINE const vec_internal_type& vec1() const {
60
+ return _vec1;
61
+ }
62
+
63
+ template <int64_t mask>
64
+ static std::enable_if_t<blendChoice(mask) == 0, Vectorized<float>> C10_ALWAYS_INLINE
65
+ blend(const Vectorized<float>& a, const Vectorized<float>& b) {
66
+ return a;
67
+ }
68
+
69
+ template <int64_t mask>
70
+ static std::enable_if_t<blendChoice(mask) == 1, Vectorized<float>> C10_ALWAYS_INLINE
71
+ blend(const Vectorized<float>& a, const Vectorized<float>& b) {
72
+ return b;
73
+ }
74
+
75
+ template <int64_t mask>
76
+ static std::enable_if_t<blendChoice(mask) == 2, Vectorized<float>> C10_ALWAYS_INLINE
77
+ blend(const Vectorized<float>& a, const Vectorized<float>& b) {
78
+ return {b._vec0, a._vec1};
79
+ }
80
+
81
+ template <int64_t mask>
82
+ static std::enable_if_t<blendChoice(mask) == 3, Vectorized<float>> C10_ALWAYS_INLINE
83
+ blend(const Vectorized<float>& a, const Vectorized<float>& b) {
84
+ return {a._vec0, b._vec1};
85
+ }
86
+
87
+ template <int64_t mask>
88
+ static std::enable_if_t<blendChoice(mask) == 4, Vectorized<float>> C10_ALWAYS_INLINE
89
+ blend(const Vectorized<float>& a, const Vectorized<float>& b) {
90
+ const vbool32 mask_1st = VsxMask1(mask);
91
+ return {(vfloat32)vec_sel(a._vec0, b._vec0, mask_1st), a._vec1};
92
+ }
93
+
94
+ template <int64_t mask>
95
+ static std::enable_if_t<blendChoice(mask) == 5, Vectorized<float>> C10_ALWAYS_INLINE
96
+ blend(const Vectorized<float>& a, const Vectorized<float>& b) {
97
+ const vbool32 mask_1st = VsxMask1(mask);
98
+ return {(vfloat32)vec_sel(a._vec0, b._vec0, mask_1st), b._vec1};
99
+ }
100
+
101
+ template <int64_t mask>
102
+ static std::enable_if_t<blendChoice(mask) == 6, Vectorized<float>> C10_ALWAYS_INLINE
103
+ blend(const Vectorized<float>& a, const Vectorized<float>& b) {
104
+ const vbool32 mask_2nd = VsxMask2(mask);
105
+ // generated masks
106
+ return {a._vec0, (vfloat32)vec_sel(a._vec1, b._vec1, mask_2nd)};
107
+ }
108
+
109
+ template <int64_t mask>
110
+ static std::enable_if_t<blendChoice(mask) == 7, Vectorized<float>> C10_ALWAYS_INLINE
111
+ blend(const Vectorized<float>& a, const Vectorized<float>& b) {
112
+ const vbool32 mask_2nd = VsxMask2(mask);
113
+ // generated masks
114
+ return {b._vec0, (vfloat32)vec_sel(a._vec1, b._vec1, mask_2nd)};
115
+ }
116
+
117
+ template <int64_t mask>
118
+ static std::enable_if_t<blendChoice(mask) == 8, Vectorized<float>> C10_ALWAYS_INLINE
119
+ blend(const Vectorized<float>& a, const Vectorized<float>& b) {
120
+ const vbool32 mask_1st = VsxMask1(mask);
121
+ const vbool32 mask_2nd = VsxMask2(mask);
122
+ return {
123
+ (vfloat32)vec_sel(a._vec0, b._vec0, mask_1st),
124
+ (vfloat32)vec_sel(a._vec1, b._vec1, mask_2nd)};
125
+ }
126
+
127
+ static Vectorized<float> C10_ALWAYS_INLINE blendv(
128
+ const Vectorized<float>& a,
129
+ const Vectorized<float>& b,
130
+ const Vectorized<float>& mask) {
131
+ // the mask used here returned by comparision of vec256
132
+ // assuming this we can use the same mask directly with vec_sel
133
+ return {
134
+ vec_sel(a._vec0, b._vec0, mask._vecb0),
135
+ vec_sel(a._vec1, b._vec1, mask._vecb1)};
136
+ }
137
+
138
+ template <typename step_t>
139
+ static Vectorized<float> arange(float base = 0.f, step_t step = static_cast<step_t>(1)) {
140
+ return Vectorized<float>(
141
+ base,
142
+ base + step,
143
+ base + 2 * step,
144
+ base + 3 * step,
145
+ base + 4 * step,
146
+ base + 5 * step,
147
+ base + 6 * step,
148
+ base + 7 * step);
149
+ }
150
+ static Vectorized<float> set(
151
+ const Vectorized<float>& a,
152
+ const Vectorized<float>& b,
153
+ size_t count = size()) {
154
+ switch (count) {
155
+ case 0:
156
+ return a;
157
+ case 1:
158
+ return blend<1>(a, b);
159
+ case 2:
160
+ return blend<3>(a, b);
161
+ case 3:
162
+ return blend<7>(a, b);
163
+ case 4:
164
+ return blend<15>(a, b);
165
+ case 5:
166
+ return blend<31>(a, b);
167
+ case 6:
168
+ return blend<63>(a, b);
169
+ case 7:
170
+ return blend<127>(a, b);
171
+ }
172
+
173
+ return b;
174
+ }
175
+ static Vectorized<value_type> C10_ALWAYS_INLINE
176
+ loadu(const void* ptr, int count = size()) {
177
+ if (count == size()) {
178
+ return {
179
+ vec_vsx_ld(offset0, reinterpret_cast<const value_type*>(ptr)),
180
+ vec_vsx_ld(offset16, reinterpret_cast<const value_type*>(ptr))};
181
+ }
182
+
183
+ __at_align__ value_type tmp_values[size()] = {};
184
+ std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type));
185
+
186
+ return {vec_vsx_ld(offset0, tmp_values), vec_vsx_ld(offset16, tmp_values)};
187
+ }
188
+ void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const {
189
+ if (count == size()) {
190
+ vec_vsx_st(_vec0, offset0, reinterpret_cast<value_type*>(ptr));
191
+ vec_vsx_st(_vec1, offset16, reinterpret_cast<value_type*>(ptr));
192
+ } else if (count > 0) {
193
+ __at_align__ value_type tmp_values[size()];
194
+ vec_vsx_st(_vec0, offset0, tmp_values);
195
+ vec_vsx_st(_vec1, offset16, tmp_values);
196
+ std::memcpy(
197
+ ptr, tmp_values, std::min(count, size()) * sizeof(value_type));
198
+ }
199
+ }
200
+
201
+ const float& operator[](int idx) const = delete;
202
+ float& operator[](int idx) = delete;
203
+
204
+ Vectorized<float> map(float (*const f)(float)) const {
205
+ Vectorized<float> ret;
206
+ for (int i = 0; i < size() / 2; i++) {
207
+ ret._vec0[i] = f(_vec0[i]);
208
+ }
209
+ for (int i = 0; i < size() / 2; i++) {
210
+ ret._vec1[i] = f(_vec1[i]);
211
+ }
212
+ return ret;
213
+ }
214
+
215
+ Vectorized<float> mapbi(float (*const f)(float, float), const Vectorized<float>& other)
216
+ const {
217
+ Vectorized<float> ret;
218
+ for (int i = 0; i < size() / 2; i++) {
219
+ ret._vec0[i] = f(_vec0[i], other._vec0[i]);
220
+ }
221
+ for (int i = 0; i < size() / 2; i++) {
222
+ ret._vec1[i] = f(_vec1[i], other._vec1[i]);
223
+ }
224
+ return ret;
225
+ }
226
+
227
+ Vectorized<float> _nor() const {
228
+ return {vec_nor(_vec0, _vec0), vec_nor(_vec1, _vec1)};
229
+ }
230
+
231
+ Vectorized<float> isnan() const {
232
+ auto x = *this;
233
+ auto ret = (x == x);
234
+ return ret._nor();
235
+ }
236
+
237
+ bool has_inf_nan() const {
238
+ for (const auto i : c10::irange(size()/2)) {
239
+ if(_isnan(_vec0[i]) || _isinf(_vec0[i])) {
240
+ return true;
241
+ }
242
+ }
243
+ for (const auto i : c10::irange(size()/2)) {
244
+ if(_isnan(_vec1[i]) || _isinf(_vec1[i])) {
245
+ return true;
246
+ }
247
+ }
248
+ return false;
249
+ }
250
+
251
+ int zero_mask() const {
252
+ // returns an integer mask where all zero elements are translated to 1-bit
253
+ // and others are translated to 0-bit
254
+ //__m256 cmp = _mm256_cmp_ps(values, _mm256_set1_ps(0.0f), _CMP_EQ_OQ);
255
+ auto cmp = (*this == zero);
256
+ // return _mm256_movemask_ps(cmp);
257
+ // possible simulation //mask= lvsl ( 0 ) vbpermq( vec, mask <<5)
258
+ vuint64 result0 = vec_vbpermq((vuint8)cmp._vecb0, mask_zero_bits);
259
+ vuint64 result1 = vec_vbpermq((vuint8)cmp._vecb1, mask_zero_bits);
260
+ return (result0[1] >> 12 | (result1[1] >> 8));
261
+ }
262
+
263
+ Vectorized<float> C10_ALWAYS_INLINE abs() const {
264
+ return {vec_abs(_vec0), vec_abs(_vec1)};
265
+ }
266
+
267
+ Vectorized<float> C10_ALWAYS_INLINE acos() const {
268
+ return {Sleef_acosf4_u10(_vec0), Sleef_acosf4_u10(_vec1)};
269
+ }
270
+ Vectorized<float> C10_ALWAYS_INLINE asin() const {
271
+ return {Sleef_asinf4_u10(_vec0), Sleef_asinf4_u10(_vec1)};
272
+ }
273
+ Vectorized<float> atan() const {
274
+ return {Sleef_atanf4_u10(_vec0), Sleef_atanf4_u10(_vec1)};
275
+ }
276
+ Vectorized<float> atanh() const {
277
+ return {Sleef_atanhf4_u10(_vec0), Sleef_atanhf4_u10(_vec1)};
278
+ }
279
+ Vectorized<float> atan2(const Vectorized<float>& b) const {
280
+ return {Sleef_atan2f4_u10(_vec0, b._vec0), Sleef_atan2f4_u10(_vec1, b._vec1)};
281
+ }
282
+ Vectorized<float> copysign(const Vectorized<float> &sign) const {
283
+ return {Sleef_copysignf4(_vec0, sign._vec0), Sleef_copysignf4(_vec1, sign._vec1)};
284
+ }
285
+ Vectorized<float> lgamma() const {
286
+ return {Sleef_lgammaf4_u10(_vec0), Sleef_lgammaf4_u10(_vec1)};
287
+ }
288
+ Vectorized<float> erf() const {
289
+ return {Sleef_erff4_u10(_vec0), Sleef_erff4_u10(_vec1)};
290
+ }
291
+
292
+ Vectorized<float> erfc() const {
293
+ return {Sleef_erfcf4_u15(_vec0), Sleef_erfcf4_u15(_vec1)};
294
+ }
295
+
296
+ Vectorized<float> erfinv() const {
297
+ return map(calc_erfinv);
298
+ }
299
+
300
+ Vectorized<float> angle() const {
301
+ auto tmp = blendv(
302
+ Vectorized<float>(0), Vectorized<float>(c10::pi<float>), *this < Vectorized<float>(0));
303
+ return blendv(tmp, *this, isnan());
304
+ }
305
+ Vectorized<float> real() const {
306
+ return *this;
307
+ }
308
+ Vectorized<float> imag() const {
309
+ return Vectorized<float>{0};
310
+ }
311
+ Vectorized<float> conj() const {
312
+ return *this;
313
+ }
314
+
315
+ Vectorized<float> C10_ALWAYS_INLINE exp() const {
316
+ return {Sleef_expf4_u10(_vec0), Sleef_expf4_u10(_vec1)};
317
+ }
318
+ Vectorized<float> C10_ALWAYS_INLINE exp2() const {
319
+ return {Sleef_exp2f4_u10(_vec0), Sleef_exp2f4_u10(_vec1)};
320
+ }
321
+ Vectorized<float> expm1() const {
322
+ return {Sleef_expm1f4_u10(_vec0), Sleef_expm1f4_u10(_vec1)};
323
+ }
324
+ Vectorized<float> C10_ALWAYS_INLINE exp_u20() const {
325
+ return exp();
326
+ }
327
+
328
+ Vectorized<float> C10_ALWAYS_INLINE log() const {
329
+ return {Sleef_logf4_u10(_vec0), Sleef_logf4_u10(_vec1)};
330
+ }
331
+ Vectorized<float> C10_ALWAYS_INLINE log10() const {
332
+ return {Sleef_log10f4_u10(_vec0), Sleef_log10f4_u10(_vec1)};
333
+ }
334
+ Vectorized<float> C10_ALWAYS_INLINE log1p() const {
335
+ return {Sleef_log1pf4_u10(_vec0), Sleef_log1pf4_u10(_vec1)};
336
+ }
337
+ Vectorized<float> C10_ALWAYS_INLINE log2() const {
338
+ return {Sleef_log2f4_u10(_vec0), Sleef_log2f4_u10(_vec1)};
339
+ }
340
+ Vectorized<float> C10_ALWAYS_INLINE ceil() const {
341
+ return {vec_ceil(_vec0), vec_ceil(_vec1)};
342
+ }
343
+ Vectorized<float> C10_ALWAYS_INLINE cos() const {
344
+ return {Sleef_cosf4_u10(_vec0), Sleef_cosf4_u10(_vec1)};
345
+ }
346
+ Vectorized<float> C10_ALWAYS_INLINE cosh() const {
347
+ return {Sleef_coshf4_u10(_vec0), Sleef_coshf4_u10(_vec1)};
348
+ }
349
+ Vectorized<float> C10_ALWAYS_INLINE floor() const {
350
+ return {vec_floor(_vec0), vec_floor(_vec1)};
351
+ }
352
+ Vectorized<float> C10_ALWAYS_INLINE neg() const {
353
+ return {vec_neg(_vec0), vec_neg(_vec1)};
354
+ }
355
+
356
+ Vectorized<float> C10_ALWAYS_INLINE round() const {
357
+ return {vec_round(_vec0), vec_round(_vec1)};
358
+ }
359
+ Vectorized<float> C10_ALWAYS_INLINE sin() const {
360
+ return {Sleef_sinf4_u10(_vec0), Sleef_sinf4_u10(_vec1)};
361
+ }
362
+ Vectorized<float> C10_ALWAYS_INLINE sinh() const {
363
+ return {Sleef_sinhf4_u10(_vec0), Sleef_sinhf4_u10(_vec1)};
364
+ }
365
+ Vectorized<float> C10_ALWAYS_INLINE tan() const {
366
+ return {Sleef_tanf4_u10(_vec0), Sleef_tanf4_u10(_vec1)};
367
+ }
368
+ Vectorized<float> C10_ALWAYS_INLINE tanh() const {
369
+ return {Sleef_tanhf4_u10(_vec0), Sleef_tanhf4_u10(_vec1)};
370
+ }
371
+ Vectorized<float> C10_ALWAYS_INLINE trunc() const {
372
+ return {vec_trunc(_vec0), vec_trunc(_vec1)};
373
+ }
374
+
375
+ Vectorized<float> C10_ALWAYS_INLINE frac() const {
376
+ return *this - trunc();
377
+ }
378
+
379
+ Vectorized<float> C10_ALWAYS_INLINE sqrt() const {
380
+ return {vec_sqrt(_vec0), vec_sqrt(_vec1)};
381
+ }
382
+ Vectorized<float> C10_ALWAYS_INLINE reciprocal() const {
383
+ return Vectorized<float>(one) / (*this);
384
+ }
385
+ Vectorized<float> C10_ALWAYS_INLINE rsqrt() const {
386
+ return sqrt().reciprocal();
387
+ }
388
+
389
+ Vectorized<float> C10_ALWAYS_INLINE pow(const Vectorized<float>& exp) const {
390
+ return {Sleef_powf4_u10(_vec0, exp._vec0), Sleef_powf4_u10(_vec1, exp._vec1)};
391
+ }
392
+
393
+ Vectorized<float> fmod(const Vectorized<float>& b) const {
394
+ return {Sleef_fmodf4(_vec0, b._vec0),Sleef_fmodf4(_vec1, b._vec1)};
395
+ }
396
+
397
+ Vectorized<float> hypot(const Vectorized<float>& b) const {
398
+ return {Sleef_hypotf4_u05(_vec0, b._vec0), Sleef_hypotf4_u05(_vec1, b._vec1)};
399
+ }
400
+
401
+ Vectorized<float> nextafter(const Vectorized<float>& b) const {
402
+ return {Sleef_nextafterf4(_vec0, b._vec0), Sleef_nextafterf4(_vec1, b._vec1)};
403
+ }
404
+
405
+ Vectorized<float> igamma(const Vectorized<float>& x) const {
406
+ return mapbi(calc_igamma, x);
407
+ }
408
+
409
+ Vectorized<float> igammac(const Vectorized<float>& x) const {
410
+ return mapbi(calc_igammac, x);
411
+ }
412
+
413
+ Vectorized<float> i0() const {
414
+ return map(calc_i0);
415
+ }
416
+
417
+ Vectorized<float> i0e() const {
418
+ return map(calc_i0e);
419
+ }
420
+
421
+ Vectorized<float> digamma() const {
422
+ return map(calc_digamma);
423
+ }
424
+
425
+ DEFINE_MEMBER_OP(operator==, float, vec_cmpeq)
426
+ DEFINE_MEMBER_OP(operator!=, float, vec_cmpne)
427
+ DEFINE_MEMBER_OP(operator<, float, vec_cmplt)
428
+ DEFINE_MEMBER_OP(operator<=, float, vec_cmple)
429
+ DEFINE_MEMBER_OP(operator>, float, vec_cmpgt)
430
+ DEFINE_MEMBER_OP(operator>=, float, vec_cmpge)
431
+ DEFINE_MEMBER_OP_AND_ONE(eq, float, vec_cmpeq)
432
+ DEFINE_MEMBER_OP_AND_ONE(ne, float, vec_cmpne)
433
+ DEFINE_MEMBER_OP_AND_ONE(lt, float, vec_cmplt)
434
+ DEFINE_MEMBER_OP_AND_ONE(le, float, vec_cmple)
435
+ DEFINE_MEMBER_OP_AND_ONE(gt, float, vec_cmpgt)
436
+ DEFINE_MEMBER_OP_AND_ONE(ge, float, vec_cmpge)
437
+ DEFINE_MEMBER_OP(operator+, float, vec_add)
438
+ DEFINE_MEMBER_OP(operator-, float, vec_sub)
439
+ DEFINE_MEMBER_OP(operator*, float, vec_mul)
440
+ DEFINE_MEMBER_OP(operator/, float, vec_div)
441
+ DEFINE_MEMBER_OP(maximum, float, vec_max_nan2)
442
+ DEFINE_MEMBER_OP(minimum, float, vec_min_nan2)
443
+ DEFINE_MEMBER_OP(operator&, float, vec_and)
444
+ DEFINE_MEMBER_OP(operator|, float, vec_or)
445
+ DEFINE_MEMBER_OP(operator^, float, vec_xor)
446
+ DEFINE_MEMBER_TERNARY_OP(madd, float, vec_madd)
447
+ };
448
+
449
+ template <>
450
+ Vectorized<float> inline maximum(const Vectorized<float>& a, const Vectorized<float>& b) {
451
+ return a.maximum(b);
452
+ }
453
+
454
+ template <>
455
+ Vectorized<float> inline minimum(const Vectorized<float>& a, const Vectorized<float>& b) {
456
+ return a.minimum(b);
457
+ }
458
+
459
+ } // namespace
460
+ } // namespace vec
461
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_int16_vsx.h ADDED
@@ -0,0 +1,368 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cpu/vec/intrinsics.h>
4
+ #include <ATen/cpu/vec/vec_base.h>
5
+ #include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
6
+ namespace at {
7
+ namespace vec {
8
+ // See Note [CPU_CAPABILITY namespace]
9
+ inline namespace CPU_CAPABILITY {
10
+
11
+ template <>
12
+ class Vectorized<int16_t> {
13
+ private:
14
+ union {
15
+ struct {
16
+ vint16 _vec0;
17
+ vint16 _vec1;
18
+ };
19
+ struct {
20
+ vbool16 _vecb0;
21
+ vbool16 _vecb1;
22
+ };
23
+
24
+ } __attribute__((__may_alias__));
25
+
26
+ public:
27
+ using value_type = int16_t;
28
+ using vec_internal_type = vint16;
29
+ using vec_internal_mask_type = vbool16;
30
+ using size_type = int;
31
+ static constexpr size_type size() {
32
+ return 16;
33
+ }
34
+ Vectorized() {}
35
+ C10_ALWAYS_INLINE Vectorized(vint16 v) : _vec0{v}, _vec1{v} {}
36
+ C10_ALWAYS_INLINE Vectorized(vbool16 vmask) : _vecb0{vmask}, _vecb1{vmask} {}
37
+ C10_ALWAYS_INLINE Vectorized(vint16 v1, vint16 v2) : _vec0{v1}, _vec1{v2} {}
38
+ C10_ALWAYS_INLINE Vectorized(vbool16 v1, vbool16 v2) : _vecb0{v1}, _vecb1{v2} {}
39
+ C10_ALWAYS_INLINE Vectorized(int16_t scalar)
40
+ : _vec0{vec_splats(scalar)}, _vec1{vec_splats(scalar)} {}
41
+
42
+ C10_ALWAYS_INLINE Vectorized(
43
+ int16_t scalar1,
44
+ int16_t scalar2,
45
+ int16_t scalar3,
46
+ int16_t scalar4,
47
+ int16_t scalar5,
48
+ int16_t scalar6,
49
+ int16_t scalar7,
50
+ int16_t scalar8,
51
+ int16_t scalar9,
52
+ int16_t scalar10,
53
+ int16_t scalar11,
54
+ int16_t scalar12,
55
+ int16_t scalar13,
56
+ int16_t scalar14,
57
+ int16_t scalar15,
58
+ int16_t scalar16)
59
+ : _vec0{vint16{
60
+ scalar1,
61
+ scalar2,
62
+ scalar3,
63
+ scalar4,
64
+ scalar5,
65
+ scalar6,
66
+ scalar7,
67
+ scalar8}},
68
+ _vec1{vint16{
69
+ scalar9,
70
+ scalar10,
71
+ scalar11,
72
+ scalar12,
73
+ scalar13,
74
+ scalar14,
75
+ scalar15,
76
+ scalar16}} {}
77
+ C10_ALWAYS_INLINE const vec_internal_type& vec0() const {
78
+ return _vec0;
79
+ }
80
+ C10_ALWAYS_INLINE const vec_internal_type& vec1() const {
81
+ return _vec1;
82
+ }
83
+
84
+ template <uint64_t mask>
85
+ static std::enable_if_t<mask == 0, Vectorized<int16_t>> C10_ALWAYS_INLINE
86
+ blend(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
87
+ return a;
88
+ }
89
+
90
+ template <uint64_t mask>
91
+ static std::enable_if_t<(mask & 65535) == 65535, Vectorized<int16_t>>
92
+ C10_ALWAYS_INLINE blend(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
93
+ return b;
94
+ }
95
+
96
+ template <uint64_t mask>
97
+ static std::enable_if_t<mask == 255, Vectorized<int16_t>> C10_ALWAYS_INLINE
98
+ blend(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
99
+ return {b._vec0, a._vec1};
100
+ }
101
+
102
+ template <uint64_t mask>
103
+ static std::enable_if_t<(mask > 0 && mask < 255), Vectorized<int16_t>>
104
+ C10_ALWAYS_INLINE blend(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
105
+ constexpr int16_t g0 = (mask & 1) * 0xffff;
106
+ constexpr int16_t g1 = ((mask & 2) >> 1) * 0xffff;
107
+ constexpr int16_t g2 = ((mask & 4) >> 2) * 0xffff;
108
+ constexpr int16_t g3 = ((mask & 8) >> 3) * 0xffff;
109
+ constexpr int16_t g4 = ((mask & 16) >> 4) * 0xffff;
110
+ constexpr int16_t g5 = ((mask & 32) >> 5) * 0xffff;
111
+ constexpr int16_t g6 = ((mask & 64) >> 6) * 0xffff;
112
+ constexpr int16_t g7 = ((mask & 128) >> 7) * 0xffff;
113
+ const vint16 mask_1st = vint16{g0, g1, g2, g3, g4, g5, g6, g7};
114
+
115
+ return {(vint16)vec_sel(a._vec0, b._vec0, (vbool16)mask_1st), a._vec1};
116
+ }
117
+
118
+ template <uint64_t mask>
119
+ static std::enable_if_t<
120
+ (mask > 255 && (mask & 65535) != 65535 && ((mask & 255) == 255)),
121
+ Vectorized<int16_t>>
122
+ C10_ALWAYS_INLINE blend(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
123
+ constexpr int16_t g0_2 = (mask & 1) * 0xffff;
124
+ constexpr int16_t g1_2 = ((mask & 2) >> 1) * 0xffff;
125
+ constexpr int16_t g2_2 = ((mask & 4) >> 2) * 0xffff;
126
+ constexpr int16_t g3_2 = ((mask & 8) >> 3) * 0xffff;
127
+ constexpr int16_t g4_2 = ((mask & 16) >> 4) * 0xffff;
128
+ constexpr int16_t g5_2 = ((mask & 32) >> 5) * 0xffff;
129
+ constexpr int16_t g6_2 = ((mask & 64) >> 6) * 0xffff;
130
+ constexpr int16_t g7_2 = ((mask & 128) >> 7) * 0xffff;
131
+
132
+ const vint16 mask_2nd =
133
+ vint16{g0_2, g1_2, g2_2, g3_2, g4_2, g5_2, g6_2, g7_2};
134
+ // generated masks
135
+ return {b._vec0, (vint16)vec_sel(a._vec1, b._vec1, (vbool16)mask_2nd)};
136
+ }
137
+
138
+ template <uint64_t mask>
139
+ static std::enable_if_t<
140
+ (mask > 255 && ((mask & 65535) != 65535) && ((mask & 255) == 0)),
141
+ Vectorized<int16_t>>
142
+ C10_ALWAYS_INLINE blend(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
143
+ constexpr int16_t mask2 = (mask & 65535) >> 16;
144
+ constexpr int16_t g0_2 = (mask & 1) * 0xffff;
145
+ constexpr int16_t g1_2 = ((mask & 2) >> 1) * 0xffff;
146
+ constexpr int16_t g2_2 = ((mask & 4) >> 2) * 0xffff;
147
+ constexpr int16_t g3_2 = ((mask & 8) >> 3) * 0xffff;
148
+ constexpr int16_t g4_2 = ((mask & 16) >> 4) * 0xffff;
149
+ constexpr int16_t g5_2 = ((mask & 32) >> 5) * 0xffff;
150
+ constexpr int16_t g6_2 = ((mask & 64) >> 6) * 0xffff;
151
+ constexpr int16_t g7_2 = ((mask & 128) >> 7) * 0xffff;
152
+
153
+ const vint16 mask_2nd =
154
+ vint16{g0_2, g1_2, g2_2, g3_2, g4_2, g5_2, g6_2, g7_2};
155
+ // generated masks
156
+ return {a, (vint16)vec_sel(a._vec1, b._vec1, (vbool16)mask_2nd)};
157
+ }
158
+
159
+ template <uint64_t mask>
160
+ static std::enable_if_t<
161
+ (mask > 255 && ((mask & 65535) != 65535) && ((mask & 255) != 0) &&
162
+ ((mask & 255) != 255)),
163
+ Vectorized<int16_t>>
164
+ C10_ALWAYS_INLINE blend(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
165
+ constexpr int16_t g0 = (mask & 1) * 0xffff;
166
+ constexpr int16_t g1 = ((mask & 2) >> 1) * 0xffff;
167
+ constexpr int16_t g2 = ((mask & 4) >> 2) * 0xffff;
168
+ constexpr int16_t g3 = ((mask & 8) >> 3) * 0xffff;
169
+ constexpr int16_t g4 = ((mask & 16) >> 4) * 0xffff;
170
+ constexpr int16_t g5 = ((mask & 32) >> 5) * 0xffff;
171
+ constexpr int16_t g6 = ((mask & 64) >> 6) * 0xffff;
172
+ constexpr int16_t g7 = ((mask & 128) >> 7) * 0xffff;
173
+ constexpr int16_t mask2 = (mask & 65535) >> 16;
174
+ constexpr int16_t g0_2 = (mask & 1) * 0xffff;
175
+ constexpr int16_t g1_2 = ((mask & 2) >> 1) * 0xffff;
176
+ constexpr int16_t g2_2 = ((mask & 4) >> 2) * 0xffff;
177
+ constexpr int16_t g3_2 = ((mask & 8) >> 3) * 0xffff;
178
+ constexpr int16_t g4_2 = ((mask & 16) >> 4) * 0xffff;
179
+ constexpr int16_t g5_2 = ((mask & 32) >> 5) * 0xffff;
180
+ constexpr int16_t g6_2 = ((mask & 64) >> 6) * 0xffff;
181
+ constexpr int16_t g7_2 = ((mask & 128) >> 7) * 0xffff;
182
+
183
+ const vint16 mask_1st = vint16{g0, g1, g2, g3, g4, g5, g6, g7};
184
+ const vint16 mask_2nd =
185
+ vint16{g0_2, g1_2, g2_2, g3_2, g4_2, g5_2, g6_2, g7_2};
186
+ // generated masks
187
+ return {
188
+ (vint16)vec_sel(a._vec0, b._vec0, (vbool16)mask_1st),
189
+ (vint16)vec_sel(a._vec1, b._vec1, (vbool16)mask_2nd)};
190
+ }
191
+
192
+ static Vectorized<int16_t> C10_ALWAYS_INLINE blendv(
193
+ const Vectorized<int16_t>& a,
194
+ const Vectorized<int16_t>& b,
195
+ const Vectorized<int16_t>& mask) {
196
+ // the mask used here returned by comparision of vec256
197
+ // assuming this we can use the same mask directly with vec_sel
198
+ // warning intel style mask will not work properly
199
+ return {
200
+ vec_sel(a._vec0, b._vec0, mask._vecb0),
201
+ vec_sel(a._vec1, b._vec1, mask._vecb1)};
202
+ }
203
+
204
+ template <typename step_t>
205
+ static Vectorized<int16_t> arange(int16_t base = 0, step_t step = static_cast<step_t>(1)) {
206
+ return Vectorized<int16_t>(
207
+ base,
208
+ base + step,
209
+ base + 2 * step,
210
+ base + 3 * step,
211
+ base + 4 * step,
212
+ base + 5 * step,
213
+ base + 6 * step,
214
+ base + 7 * step,
215
+ base + 8 * step,
216
+ base + 9 * step,
217
+ base + 10 * step,
218
+ base + 11 * step,
219
+ base + 12 * step,
220
+ base + 13 * step,
221
+ base + 14 * step,
222
+ base + 15 * step);
223
+ }
224
+ static Vectorized<int16_t> set(
225
+ const Vectorized<int16_t>& a,
226
+ const Vectorized<int16_t>& b,
227
+ size_t count = size()) {
228
+ switch (count) {
229
+ case 0:
230
+ return a;
231
+ case 1:
232
+ return blend<1>(a, b);
233
+ case 2:
234
+ return blend<3>(a, b);
235
+ case 3:
236
+ return blend<7>(a, b);
237
+ case 4:
238
+ return blend<15>(a, b);
239
+ case 5:
240
+ return blend<31>(a, b);
241
+ case 6:
242
+ return blend<63>(a, b);
243
+ case 7:
244
+ return blend<127>(a, b);
245
+ case 8:
246
+ return blend<255>(a, b);
247
+ case 9:
248
+ return blend<511>(a, b);
249
+ case 10:
250
+ return blend<1023>(a, b);
251
+ case 11:
252
+ return blend<2047>(a, b);
253
+ case 12:
254
+ return blend<4095>(a, b);
255
+ case 13:
256
+ return blend<8191>(a, b);
257
+ case 14:
258
+ return blend<16383>(a, b);
259
+ case 15:
260
+ return blend<32767>(a, b);
261
+ }
262
+ return b;
263
+ }
264
+ static Vectorized<value_type> C10_ALWAYS_INLINE
265
+ loadu(const void* ptr, int count = size()) {
266
+ if (count == size()) {
267
+ return {
268
+ vec_vsx_ld(offset0, reinterpret_cast<const value_type*>(ptr)),
269
+ vec_vsx_ld(offset16, reinterpret_cast<const value_type*>(ptr))};
270
+ }
271
+
272
+ __at_align__ value_type tmp_values[size()] = {};
273
+ std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type));
274
+
275
+ return {vec_vsx_ld(offset0, tmp_values), vec_vsx_ld(offset16, tmp_values)};
276
+ }
277
+ void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const {
278
+ if (count == size()) {
279
+ vec_vsx_st(_vec0, offset0, reinterpret_cast<value_type*>(ptr));
280
+ vec_vsx_st(_vec1, offset16, reinterpret_cast<value_type*>(ptr));
281
+ } else if (count > 0) {
282
+ __at_align__ value_type tmp_values[size()];
283
+ vec_vsx_st(_vec0, offset0, tmp_values);
284
+ vec_vsx_st(_vec1, offset16, tmp_values);
285
+ std::memcpy(ptr, tmp_values, std::min(count, size()) * sizeof(value_type));
286
+ }
287
+ }
288
+ const int16_t& operator[](int idx) const = delete;
289
+ int16_t& operator[](int idx) = delete;
290
+
291
+ Vectorized<int16_t> angle() const {
292
+ return blendv(
293
+ Vectorized<int16_t>(0), Vectorized<int16_t>(c10::pi<int16_t>), *this < Vectorized<int16_t>(0));
294
+ }
295
+ Vectorized<int16_t> real() const {
296
+ return *this;
297
+ }
298
+ Vectorized<int16_t> imag() const {
299
+ return Vectorized<int16_t>{0};
300
+ }
301
+ Vectorized<int16_t> conj() const {
302
+ return *this;
303
+ }
304
+
305
+ Vectorized<int16_t> C10_ALWAYS_INLINE abs() const {
306
+ return {vec_abs(_vec0), vec_abs(_vec1)};
307
+ }
308
+
309
+ Vectorized<int16_t> C10_ALWAYS_INLINE neg() const {
310
+ return {vec_neg(_vec0), vec_neg(_vec1)};
311
+ }
312
+
313
+ DEFINE_MEMBER_UNARY_OP(operator~, int16_t, vec_not)
314
+ DEFINE_MEMBER_OP(operator==, int16_t, vec_cmpeq)
315
+ DEFINE_MEMBER_OP(operator!=, int16_t, vec_cmpne)
316
+ DEFINE_MEMBER_OP(operator<, int16_t, vec_cmplt)
317
+ DEFINE_MEMBER_OP(operator<=, int16_t, vec_cmple)
318
+ DEFINE_MEMBER_OP(operator>, int16_t, vec_cmpgt)
319
+ DEFINE_MEMBER_OP(operator>=, int16_t, vec_cmpge)
320
+ DEFINE_MEMBER_OP_AND_ONE(eq, int16_t, vec_cmpeq)
321
+ DEFINE_MEMBER_OP_AND_ONE(ne, int16_t, vec_cmpne)
322
+ DEFINE_MEMBER_OP_AND_ONE(lt, int16_t, vec_cmplt)
323
+ DEFINE_MEMBER_OP_AND_ONE(le, int16_t, vec_cmple)
324
+ DEFINE_MEMBER_OP_AND_ONE(gt, int16_t, vec_cmpgt)
325
+ DEFINE_MEMBER_OP_AND_ONE(ge, int16_t, vec_cmpge)
326
+ DEFINE_MEMBER_OP(operator+, int16_t, vec_add)
327
+ DEFINE_MEMBER_OP(operator-, int16_t, vec_sub)
328
+ DEFINE_MEMBER_OP(operator*, int16_t, vec_mul)
329
+ DEFINE_MEMBER_EMULATE_BINARY_OP(operator/, int16_t, /)
330
+ DEFINE_MEMBER_OP(maximum, int16_t, vec_max)
331
+ DEFINE_MEMBER_OP(minimum, int16_t, vec_min)
332
+ DEFINE_MEMBER_OP(operator&, int16_t, vec_and)
333
+ DEFINE_MEMBER_OP(operator|, int16_t, vec_or)
334
+ DEFINE_MEMBER_OP(operator^, int16_t, vec_xor)
335
+ };
336
+
337
+ template <>
338
+ Vectorized<int16_t> inline operator<<(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
339
+ vuint16 shift_vec0 = reinterpret_cast<vuint16>(b.vec0());
340
+ vuint16 shift_vec1 = reinterpret_cast<vuint16>(b.vec1());
341
+ return Vectorized<int16_t>{vec_sl(a.vec0(), shift_vec0), vec_sl(a.vec1(), shift_vec1)};
342
+ }
343
+
344
+ template <>
345
+ Vectorized<int16_t> inline operator>>(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
346
+ vuint16 shift_vec0 = reinterpret_cast<vuint16>(b.vec0());
347
+ vuint16 shift_vec1 = reinterpret_cast<vuint16>(b.vec1()) ;
348
+ return Vectorized<int16_t>{vec_sr(a.vec0(), shift_vec0), vec_sr(a.vec1(), shift_vec1)};
349
+ }
350
+
351
+ template <>
352
+ Vectorized<int16_t> inline maximum(
353
+ const Vectorized<int16_t>& a,
354
+ const Vectorized<int16_t>& b) {
355
+ return a.maximum(b);
356
+ }
357
+
358
+ template <>
359
+ Vectorized<int16_t> inline minimum(
360
+ const Vectorized<int16_t>& a,
361
+ const Vectorized<int16_t>& b) {
362
+ return a.minimum(b);
363
+ }
364
+
365
+
366
+ } // namespace
367
+ } // namespace vec
368
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_int32_vsx.h ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cpu/vec/intrinsics.h>
4
+ #include <ATen/cpu/vec/vec_base.h>
5
+ #include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
6
+ namespace at {
7
+ namespace vec {
8
+ // See Note [CPU_CAPABILITY namespace]
9
+ inline namespace CPU_CAPABILITY {
10
+
11
+ template <>
12
+ class Vectorized<int32_t> {
13
+ private:
14
+ union {
15
+ struct {
16
+ vint32 _vec0;
17
+ vint32 _vec1;
18
+ };
19
+ struct {
20
+ vbool32 _vecb0;
21
+ vbool32 _vecb1;
22
+ };
23
+
24
+ } __attribute__((__may_alias__));
25
+
26
+ public:
27
+ using value_type = int32_t;
28
+ using vec_internal_type = vint32;
29
+ using vec_internal_mask_type = vbool32;
30
+ using size_type = int;
31
+ static constexpr size_type size() {
32
+ return 8;
33
+ }
34
+ Vectorized() {}
35
+ C10_ALWAYS_INLINE Vectorized(vint32 v) : _vec0{v}, _vec1{v} {}
36
+ C10_ALWAYS_INLINE Vectorized(vbool32 vmask) : _vecb0{vmask}, _vecb1{vmask} {}
37
+ C10_ALWAYS_INLINE Vectorized(vint32 v1, vint32 v2) : _vec0{v1}, _vec1{v2} {}
38
+ C10_ALWAYS_INLINE Vectorized(vbool32 v1, vbool32 v2) : _vecb0{v1}, _vecb1{v2} {}
39
+ C10_ALWAYS_INLINE Vectorized(int32_t scalar)
40
+ : _vec0{vec_splats(scalar)}, _vec1{vec_splats(scalar)} {}
41
+ C10_ALWAYS_INLINE Vectorized(
42
+ int32_t scalar1,
43
+ int32_t scalar2,
44
+ int32_t scalar3,
45
+ int32_t scalar4,
46
+ int32_t scalar5,
47
+ int32_t scalar6,
48
+ int32_t scalar7,
49
+ int32_t scalar8)
50
+ : _vec0{vint32{scalar1, scalar2, scalar3, scalar4}},
51
+ _vec1{vint32{scalar5, scalar6, scalar7, scalar8}} {}
52
+ C10_ALWAYS_INLINE const vec_internal_type& vec0() const {
53
+ return _vec0;
54
+ }
55
+ C10_ALWAYS_INLINE const vec_internal_type& vec1() const {
56
+ return _vec1;
57
+ }
58
+
59
+ template <uint64_t mask>
60
+ static std::enable_if_t<mask == 0, Vectorized<int32_t>> C10_ALWAYS_INLINE
61
+ blend(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
62
+ return a;
63
+ }
64
+
65
+ template <uint64_t mask>
66
+ static std::enable_if_t<(mask & 255) == 255, Vectorized<int32_t>> C10_ALWAYS_INLINE
67
+ blend(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
68
+ return b;
69
+ }
70
+
71
+ template <uint64_t mask>
72
+ static std::enable_if_t<mask == 15, Vectorized<int32_t>> C10_ALWAYS_INLINE
73
+ blend(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
74
+ return {b._vec0, a._vec1};
75
+ }
76
+
77
+ template <uint64_t mask>
78
+ static std::enable_if_t<(mask > 0 && mask < 15), Vectorized<int32_t>>
79
+ C10_ALWAYS_INLINE blend(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
80
+ constexpr uint32_t g0 = (mask & 1) * 0xffffffff;
81
+ constexpr uint32_t g1 = ((mask & 2) >> 1) * 0xffffffff;
82
+ constexpr uint32_t g2 = ((mask & 4) >> 2) * 0xffffffff;
83
+ constexpr uint32_t g3 = ((mask & 8) >> 3) * 0xffffffff;
84
+ const vbool32 mask_1st = (vbool32){g0, g1, g2, g3};
85
+
86
+ return {(vint32)vec_sel(a._vec0, b._vec0, (vbool32)mask_1st), a._vec1};
87
+ }
88
+
89
+ template <uint64_t mask>
90
+ static std::enable_if_t<
91
+ (mask > 15 && (mask & 255) != 255 && ((mask & 15) == 15)),
92
+ Vectorized<int32_t>>
93
+ C10_ALWAYS_INLINE blend(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
94
+ constexpr uint32_t mask2 = (mask & 255) >> 4;
95
+ constexpr uint32_t g0_2 = (mask2 & 1) * 0xffffffff;
96
+ constexpr uint32_t g1_2 = ((mask2 & 2) >> 1) * 0xffffffff;
97
+ constexpr uint32_t g2_2 = ((mask2 & 4) >> 2) * 0xffffffff;
98
+ constexpr uint32_t g3_2 = ((mask2 & 8) >> 3) * 0xffffffff;
99
+
100
+ const vbool32 mask_2nd = (vbool32){g0_2, g1_2, g2_2, g3_2};
101
+ // generated masks
102
+ return {b._vec0, (vint32)vec_sel(a._vec1, b._vec1, (vbool32)mask_2nd)};
103
+ }
104
+
105
+ template <uint64_t mask>
106
+ static std::enable_if_t<
107
+ (mask > 15 && ((mask & 255) != 255) && ((mask & 15) == 0)),
108
+ Vectorized<int32_t>>
109
+ C10_ALWAYS_INLINE blend(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
110
+ constexpr uint32_t mask2 = (mask & 255) >> 4;
111
+ constexpr uint32_t g0_2 = (mask2 & 1) * 0xffffffff;
112
+ constexpr uint32_t g1_2 = ((mask2 & 2) >> 1) * 0xffffffff;
113
+ constexpr uint32_t g2_2 = ((mask2 & 4) >> 2) * 0xffffffff;
114
+ constexpr uint32_t g3_2 = ((mask2 & 8) >> 3) * 0xffffffff;
115
+
116
+ const vbool32 mask_2nd = (vbool32){g0_2, g1_2, g2_2, g3_2};
117
+ // generated masks
118
+ return {a, (vint32)vec_sel(a._vec1, b._vec1, (vbool32)mask_2nd)};
119
+ }
120
+
121
+ template <uint64_t mask>
122
+ static std::enable_if_t<
123
+ (mask > 15 && ((mask & 255) != 255) && ((mask & 15) != 0) &&
124
+ ((mask & 15) != 15)),
125
+ Vectorized<int32_t>>
126
+ C10_ALWAYS_INLINE blend(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
127
+ constexpr uint32_t g0 = (mask & 1) * 0xffffffff;
128
+ constexpr uint32_t g1 = ((mask & 2) >> 1) * 0xffffffff;
129
+ constexpr uint32_t g2 = ((mask & 4) >> 2) * 0xffffffff;
130
+ constexpr uint32_t g3 = ((mask & 8) >> 3) * 0xffffffff;
131
+ constexpr uint32_t mask2 = (mask & 255) >> 4;
132
+ constexpr uint32_t g0_2 = (mask2 & 1) * 0xffffffff;
133
+ constexpr uint32_t g1_2 = ((mask2 & 2) >> 1) * 0xffffffff;
134
+ constexpr uint32_t g2_2 = ((mask2 & 4) >> 2) * 0xffffffff;
135
+ constexpr uint32_t g3_2 = ((mask2 & 8) >> 3) * 0xffffffff;
136
+
137
+ const vbool32 mask_1st = (vbool32){g0, g1, g2, g3};
138
+ const vbool32 mask_2nd = (vbool32){g0_2, g1_2, g2_2, g3_2};
139
+ // generated masks
140
+ return {
141
+ (vint32)vec_sel(a._vec0, b._vec0, (vbool32)mask_1st),
142
+ (vint32)vec_sel(a._vec1, b._vec1, (vbool32)mask_2nd)};
143
+ }
144
+
145
+ static Vectorized<int32_t> C10_ALWAYS_INLINE blendv(
146
+ const Vectorized<int32_t>& a,
147
+ const Vectorized<int32_t>& b,
148
+ const Vectorized<int32_t>& mask) {
149
+ // the mask used here returned by comparision of vec256
150
+ // assuming this we can use the same mask directly with vec_sel
151
+ // warning intel style mask will not work properly
152
+ return {
153
+ vec_sel(a._vec0, b._vec0, mask._vecb0),
154
+ vec_sel(a._vec1, b._vec1, mask._vecb1)};
155
+ }
156
+
157
+ template <typename step_t>
158
+ static Vectorized<int32_t> arange(int32_t base = 0.f, step_t step = static_cast<step_t>(1)) {
159
+ return Vectorized<int32_t>(
160
+ base,
161
+ base + step,
162
+ base + 2 * step,
163
+ base + 3 * step,
164
+ base + 4 * step,
165
+ base + 5 * step,
166
+ base + 6 * step,
167
+ base + 7 * step);
168
+ }
169
+ static Vectorized<int32_t> set(
170
+ const Vectorized<int32_t>& a,
171
+ const Vectorized<int32_t>& b,
172
+ size_t count = size()) {
173
+ switch (count) {
174
+ case 0:
175
+ return a;
176
+ case 1:
177
+ return blend<1>(a, b);
178
+ case 2:
179
+ return blend<3>(a, b);
180
+ case 3:
181
+ return blend<7>(a, b);
182
+ case 4:
183
+ return blend<15>(a, b);
184
+ case 5:
185
+ return blend<31>(a, b);
186
+ case 6:
187
+ return blend<63>(a, b);
188
+ case 7:
189
+ return blend<127>(a, b);
190
+ }
191
+
192
+ return b;
193
+ }
194
+ static Vectorized<value_type> C10_ALWAYS_INLINE
195
+ loadu(const void* ptr, int count = size()) {
196
+ if (count == size()) {
197
+ return {
198
+ vec_vsx_ld(offset0, reinterpret_cast<const value_type*>(ptr)),
199
+ vec_vsx_ld(offset16, reinterpret_cast<const value_type*>(ptr))};
200
+ }
201
+
202
+ __at_align__ value_type tmp_values[size()] = {};
203
+ std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type));
204
+
205
+ return {vec_vsx_ld(offset0, tmp_values), vec_vsx_ld(offset16, tmp_values)};
206
+ }
207
+ void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const {
208
+ if (count == size()) {
209
+ vec_vsx_st(_vec0, offset0, reinterpret_cast<value_type*>(ptr));
210
+ vec_vsx_st(_vec1, offset16, reinterpret_cast<value_type*>(ptr));
211
+ } else if (count > 0) {
212
+ __at_align__ value_type tmp_values[size()];
213
+ vec_vsx_st(_vec0, offset0, tmp_values);
214
+ vec_vsx_st(_vec1, offset16, tmp_values);
215
+ std::memcpy(
216
+ ptr, tmp_values, std::min(count, size()) * sizeof(value_type));
217
+ }
218
+ }
219
+ const int32_t& operator[](int idx) const = delete;
220
+ int32_t& operator[](int idx) = delete;
221
+
222
+ Vectorized<int32_t> angle() const {
223
+ return blendv(
224
+ Vectorized<int32_t>(0), Vectorized<int32_t>(c10::pi<int32_t>), *this < Vectorized<int32_t>(0));
225
+ }
226
+ Vectorized<int32_t> real() const {
227
+ return *this;
228
+ }
229
+ Vectorized<int32_t> imag() const {
230
+ return Vectorized<int32_t>{0};
231
+ }
232
+ Vectorized<int32_t> conj() const {
233
+ return *this;
234
+ }
235
+
236
+ Vectorized<int32_t> C10_ALWAYS_INLINE abs() const {
237
+ return {vec_abs(_vec0), vec_abs(_vec1)};
238
+ }
239
+
240
+ Vectorized<int32_t> C10_ALWAYS_INLINE neg() const {
241
+ return {vec_neg(_vec0), vec_neg(_vec1)};
242
+ }
243
+
244
+ DEFINE_MEMBER_UNARY_OP(operator~, int32_t, vec_not)
245
+ DEFINE_MEMBER_OP(operator==, int32_t, vec_cmpeq)
246
+ DEFINE_MEMBER_OP(operator!=, int32_t, vec_cmpne)
247
+ DEFINE_MEMBER_OP(operator<, int32_t, vec_cmplt)
248
+ DEFINE_MEMBER_OP(operator<=, int32_t, vec_cmple)
249
+ DEFINE_MEMBER_OP(operator>, int32_t, vec_cmpgt)
250
+ DEFINE_MEMBER_OP(operator>=, int32_t, vec_cmpge)
251
+ DEFINE_MEMBER_OP_AND_ONE(eq, int32_t, vec_cmpeq)
252
+ DEFINE_MEMBER_OP_AND_ONE(ne, int32_t, vec_cmpne)
253
+ DEFINE_MEMBER_OP_AND_ONE(lt, int32_t, vec_cmplt)
254
+ DEFINE_MEMBER_OP_AND_ONE(le, int32_t, vec_cmple)
255
+ DEFINE_MEMBER_OP_AND_ONE(gt, int32_t, vec_cmpgt)
256
+ DEFINE_MEMBER_OP_AND_ONE(ge, int32_t, vec_cmpge)
257
+ DEFINE_MEMBER_OP(operator+, int32_t, vec_add)
258
+ DEFINE_MEMBER_OP(operator-, int32_t, vec_sub)
259
+ DEFINE_MEMBER_OP(operator*, int32_t, vec_mul)
260
+ DEFINE_MEMBER_EMULATE_BINARY_OP(operator/, int32_t, /)
261
+ DEFINE_MEMBER_OP(maximum, int32_t, vec_max)
262
+ DEFINE_MEMBER_OP(minimum, int32_t, vec_min)
263
+ DEFINE_MEMBER_OP(operator&, int32_t, vec_and)
264
+ DEFINE_MEMBER_OP(operator|, int32_t, vec_or)
265
+ DEFINE_MEMBER_OP(operator^, int32_t, vec_xor)
266
+ };
267
+
268
+ template <>
269
+ Vectorized<int32_t> inline operator<<(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
270
+ vuint32 shift_vec0 = reinterpret_cast<vuint32>(b.vec0());
271
+ vuint32 shift_vec1 = reinterpret_cast<vuint32>(b.vec1()) ;
272
+ return Vectorized<int32_t>{vec_sl(a.vec0(), shift_vec0), vec_sl(a.vec1(), shift_vec1)};
273
+ }
274
+
275
+ template <>
276
+ Vectorized<int32_t> inline operator>>(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
277
+ vuint32 shift_vec0 = reinterpret_cast<vuint32>(b.vec0());
278
+ vuint32 shift_vec1 = reinterpret_cast<vuint32>(b.vec1()) ;
279
+ return Vectorized<int32_t>{vec_sr(a.vec0(), shift_vec0), vec_sr(a.vec1(), shift_vec1)};
280
+ }
281
+
282
+ template <>
283
+ Vectorized<int32_t> inline maximum(
284
+ const Vectorized<int32_t>& a,
285
+ const Vectorized<int32_t>& b) {
286
+ return a.maximum(b);
287
+ }
288
+
289
+ template <>
290
+ Vectorized<int32_t> inline minimum(
291
+ const Vectorized<int32_t>& a,
292
+ const Vectorized<int32_t>& b) {
293
+ return a.minimum(b);
294
+ }
295
+
296
+ } // namespace
297
+ } // namespace vec
298
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_int64_vsx.h ADDED
@@ -0,0 +1,251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cpu/vec/intrinsics.h>
4
+ #include <ATen/cpu/vec/vec_base.h>
5
+ #include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
6
+ namespace at {
7
+ namespace vec {
8
+ // See Note [CPU_CAPABILITY namespace]
9
+ inline namespace CPU_CAPABILITY {
10
+
11
+ template <>
12
+ class Vectorized<int64_t> {
13
+ private:
14
+ union {
15
+ struct {
16
+ vint64 _vec0;
17
+ vint64 _vec1;
18
+ };
19
+ struct {
20
+ vbool64 _vecb0;
21
+ vbool64 _vecb1;
22
+ };
23
+
24
+ } __attribute__((__may_alias__));
25
+
26
+ public:
27
+ using value_type = int64_t;
28
+ using vec_internal_type = vint64;
29
+ using vec_internal_mask_type = vbool64;
30
+ using size_type = int;
31
+ using ElementType = signed long long;
32
+ static constexpr size_type size() {
33
+ return 4;
34
+ }
35
+ Vectorized() {}
36
+ C10_ALWAYS_INLINE Vectorized(vint64 v) : _vec0{v}, _vec1{v} {}
37
+ C10_ALWAYS_INLINE Vectorized(vbool64 vmask) : _vecb0{vmask}, _vecb1{vmask} {}
38
+ C10_ALWAYS_INLINE Vectorized(vint64 v1, vint64 v2) : _vec0{v1}, _vec1{v2} {}
39
+ C10_ALWAYS_INLINE Vectorized(vbool64 v1, vbool64 v2) : _vecb0{v1}, _vecb1{v2} {}
40
+ C10_ALWAYS_INLINE Vectorized(int64_t scalar)
41
+ : _vec0{vec_splats(scalar)}, _vec1{vec_splats(scalar)} {}
42
+ C10_ALWAYS_INLINE Vectorized(
43
+ int64_t scalar1,
44
+ int64_t scalar2,
45
+ int64_t scalar3,
46
+ int64_t scalar4)
47
+ : _vec0{vint64{scalar1, scalar2}}, _vec1{vint64{scalar3, scalar4}} {}
48
+
49
+ C10_ALWAYS_INLINE const vec_internal_type& vec0() const {
50
+ return _vec0;
51
+ }
52
+ C10_ALWAYS_INLINE const vec_internal_type& vec1() const {
53
+ return _vec1;
54
+ }
55
+
56
+ template <uint64_t mask>
57
+ static std::enable_if_t<mask == 0, Vectorized<int64_t>> C10_ALWAYS_INLINE
58
+ blend(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
59
+ return a;
60
+ }
61
+
62
+ template <uint64_t mask>
63
+ static std::enable_if_t<mask == 3, Vectorized<int64_t>> C10_ALWAYS_INLINE
64
+ blend(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
65
+ return {b._vec0, a._vec1};
66
+ }
67
+
68
+ template <uint64_t mask>
69
+ static std::enable_if_t<(mask & 15) == 15, Vectorized<int64_t>> C10_ALWAYS_INLINE
70
+ blend(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
71
+ return b;
72
+ }
73
+
74
+ template <uint64_t mask>
75
+ static std::enable_if_t<(mask > 0 && mask < 3), Vectorized<int64_t>> C10_ALWAYS_INLINE
76
+ blend(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
77
+ constexpr uint64_t g0 = (mask & 1) * 0xffffffffffffffff;
78
+ constexpr uint64_t g1 = ((mask & 2) >> 1) * 0xffffffffffffffff;
79
+ const vbool64 mask_1st = (vbool64){g0, g1};
80
+ return {(vint64)vec_sel(a._vec0, b._vec0, (vbool64)mask_1st), a._vec1};
81
+ }
82
+
83
+ template <uint64_t mask>
84
+ static std::enable_if_t<(mask > 3) && (mask & 3) == 0, Vectorized<int64_t>>
85
+ C10_ALWAYS_INLINE blend(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
86
+ constexpr uint64_t g0_2 = ((mask & 4) >> 2) * 0xffffffffffffffff;
87
+ constexpr uint64_t g1_2 = ((mask & 8) >> 3) * 0xffffffffffffffff;
88
+
89
+ const vbool64 mask_2nd = (vbool64){g0_2, g1_2};
90
+ return {a._vec0, (vint64)vec_sel(a._vec1, b._vec1, (vbool64)mask_2nd)};
91
+ }
92
+
93
+ template <uint64_t mask>
94
+ static std::enable_if_t<
95
+ (mask > 3) && (mask & 3) != 0 && (mask & 15) != 15,
96
+ Vectorized<int64_t>>
97
+ C10_ALWAYS_INLINE blend(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
98
+ constexpr uint64_t g0 = (mask & 1) * 0xffffffffffffffff;
99
+ constexpr uint64_t g1 = ((mask & 2) >> 1) * 0xffffffffffffffff;
100
+ constexpr uint64_t g0_2 = ((mask & 4) >> 2) * 0xffffffffffffffff;
101
+ constexpr uint64_t g1_2 = ((mask & 8) >> 3) * 0xffffffffffffffff;
102
+
103
+ const vbool64 mask_1st = (vbool64){g0, g1};
104
+ const vbool64 mask_2nd = (vbool64){g0_2, g1_2};
105
+ return {
106
+ (vint64)vec_sel(a._vec0, b._vec0, (vbool64)mask_1st),
107
+ (vint64)vec_sel(a._vec1, b._vec1, (vbool64)mask_2nd)};
108
+ }
109
+
110
+ static Vectorized<int64_t> C10_ALWAYS_INLINE blendv(
111
+ const Vectorized<int64_t>& a,
112
+ const Vectorized<int64_t>& b,
113
+ const Vectorized<int64_t>& mask) {
114
+ // the mask used here returned by comparision of vec256
115
+
116
+ return {
117
+ vec_sel(a._vec0, b._vec0, mask._vecb0),
118
+ vec_sel(a._vec1, b._vec1, mask._vecb1)};
119
+ }
120
+ template <typename step_t>
121
+ static Vectorized<int64_t> arange(int64_t base = 0., step_t step = static_cast<step_t>(1)) {
122
+ return Vectorized<int64_t>(base, base + step, base + 2 * step, base + 3 * step);
123
+ }
124
+
125
+ static Vectorized<int64_t> C10_ALWAYS_INLINE
126
+ set(const Vectorized<int64_t>& a,
127
+ const Vectorized<int64_t>& b,
128
+ size_t count = size()) {
129
+ switch (count) {
130
+ case 0:
131
+ return a;
132
+ case 1:
133
+ return blend<1>(a, b);
134
+ case 2:
135
+ return blend<3>(a, b);
136
+ case 3:
137
+ return blend<7>(a, b);
138
+ }
139
+
140
+ return b;
141
+ }
142
+ static Vectorized<value_type> C10_ALWAYS_INLINE
143
+ loadu(const void* ptr, int count = size()) {
144
+ if (count == size()) {
145
+ static_assert(sizeof(double) == sizeof(value_type));
146
+ const double* dptr = reinterpret_cast<const double*>(ptr);
147
+ return {// treat it as double load
148
+ (vint64)vec_vsx_ld(offset0, dptr),
149
+ (vint64)vec_vsx_ld(offset16, dptr)};
150
+ }
151
+
152
+ __at_align__ double tmp_values[size()] = {};
153
+ std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type));
154
+
155
+ return {
156
+ (vint64)vec_vsx_ld(offset0, tmp_values),
157
+ (vint64)vec_vsx_ld(offset16, tmp_values)};
158
+ }
159
+ void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const {
160
+ if (count == size()) {
161
+ double* dptr = reinterpret_cast<double*>(ptr);
162
+ vec_vsx_st((vfloat64)_vec0, offset0, dptr);
163
+ vec_vsx_st((vfloat64)_vec1, offset16, dptr);
164
+ } else if (count > 0) {
165
+ __at_align__ double tmp_values[size()];
166
+ vec_vsx_st((vfloat64)_vec0, offset0, tmp_values);
167
+ vec_vsx_st((vfloat64)_vec1, offset16, tmp_values);
168
+ std::memcpy(
169
+ ptr, tmp_values, std::min(count, size()) * sizeof(value_type));
170
+ }
171
+ }
172
+ const int64_t& operator[](int idx) const = delete;
173
+ int64_t& operator[](int idx) = delete;
174
+
175
+ Vectorized<int64_t> angle() const {
176
+ return blendv(
177
+ Vectorized<int64_t>(0), Vectorized<int64_t>(c10::pi<int64_t>), *this < Vectorized<int64_t>(0));
178
+ }
179
+ Vectorized<int64_t> real() const {
180
+ return *this;
181
+ }
182
+ Vectorized<int64_t> imag() const {
183
+ return Vectorized<int64_t>{0};
184
+ }
185
+ Vectorized<int64_t> conj() const {
186
+ return *this;
187
+ }
188
+
189
+ Vectorized<int64_t> C10_ALWAYS_INLINE abs() const {
190
+ return {vec_abs(_vec0), vec_abs(_vec1)};
191
+ }
192
+
193
+ Vectorized<int64_t> C10_ALWAYS_INLINE neg() const {
194
+ return {vec_neg(_vec0), vec_neg(_vec1)};
195
+ }
196
+
197
+ DEFINE_MEMBER_UNARY_OP(operator~, int64_t, vec_not)
198
+ DEFINE_MEMBER_OP(operator==, int64_t, vec_cmpeq)
199
+ DEFINE_MEMBER_OP(operator!=, int64_t, vec_cmpne)
200
+ DEFINE_MEMBER_OP(operator<, int64_t, vec_cmplt)
201
+ DEFINE_MEMBER_OP(operator<=, int64_t, vec_cmple)
202
+ DEFINE_MEMBER_OP(operator>, int64_t, vec_cmpgt)
203
+ DEFINE_MEMBER_OP(operator>=, int64_t, vec_cmpge)
204
+ DEFINE_MEMBER_OP_AND_ONE(eq, int64_t, vec_cmpeq)
205
+ DEFINE_MEMBER_OP_AND_ONE(ne, int64_t, vec_cmpne)
206
+ DEFINE_MEMBER_OP_AND_ONE(lt, int64_t, vec_cmplt)
207
+ DEFINE_MEMBER_OP_AND_ONE(le, int64_t, vec_cmple)
208
+ DEFINE_MEMBER_OP_AND_ONE(gt, int64_t, vec_cmpgt)
209
+ DEFINE_MEMBER_OP_AND_ONE(ge, int64_t, vec_cmpge)
210
+ DEFINE_MEMBER_OP(operator+, int64_t, vec_add)
211
+ DEFINE_MEMBER_OP(operator-, int64_t, vec_sub)
212
+ DEFINE_MEMBER_OP(operator*, int64_t, vec_mul)
213
+ DEFINE_MEMBER_OP(operator/, int64_t, vec_div)
214
+ DEFINE_MEMBER_OP(maximum, int64_t, vec_max)
215
+ DEFINE_MEMBER_OP(minimum, int64_t, vec_min)
216
+ DEFINE_MEMBER_OP(operator&, int64_t, vec_and)
217
+ DEFINE_MEMBER_OP(operator|, int64_t, vec_or)
218
+ DEFINE_MEMBER_OP(operator^, int64_t, vec_xor)
219
+ };
220
+
221
+ template <>
222
+ Vectorized<int64_t> inline operator<<(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
223
+ vuint64 shift_vec0 = reinterpret_cast<vuint64>(b.vec0());
224
+ vuint64 shift_vec1 = reinterpret_cast<vuint64>(b.vec1()) ;
225
+ return Vectorized<int64_t>{vec_sl(a.vec0(), shift_vec0), vec_sl(a.vec1(), shift_vec1)};
226
+ }
227
+
228
+ template <>
229
+ Vectorized<int64_t> inline operator>>(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
230
+ vuint64 shift_vec0 = reinterpret_cast<vuint64>(b.vec0());
231
+ vuint64 shift_vec1 = reinterpret_cast<vuint64>(b.vec1()) ;
232
+ return Vectorized<int64_t>{vec_sr(a.vec0(), shift_vec0), vec_sr(a.vec1(), shift_vec1)};
233
+ }
234
+
235
+ template <>
236
+ Vectorized<int64_t> inline maximum(
237
+ const Vectorized<int64_t>& a,
238
+ const Vectorized<int64_t>& b) {
239
+ return a.maximum(b);
240
+ }
241
+
242
+ template <>
243
+ Vectorized<int64_t> inline minimum(
244
+ const Vectorized<int64_t>& a,
245
+ const Vectorized<int64_t>& b) {
246
+ return a.minimum(b);
247
+ }
248
+
249
+ } // namespace
250
+ } // namespace vec
251
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_qint32_vsx.h ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cpu/vec/intrinsics.h>
4
+ #include <ATen/cpu/vec/vec_base.h>
5
+ #include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
6
+ #include <c10/util/qint32.h>
7
+ #include <array>
8
+
9
+ // This file defines Vectorized<> for the quantized types.
10
+ //
11
+ //
12
+ // Currently, we simply use these classes as efficient converters between
13
+ // the quantized types and Vectorized<float>, usually in bandwidth-bound cases
14
+ // where doing the arithmetic in full-precision is acceptable (e.g.
15
+ // elementwise operators).
16
+ //
17
+ //
18
+ // Conversions are as follows:
19
+ // Vectorized<qint32> -> 1x Vectorized<float>
20
+ //
21
+ // The size of the returned float vector is specified by the special
22
+ // constexpr function float_num_vecs. The type of the value returned
23
+ // from dequantize (and expected as an argument to quantize) is
24
+ // specified by float_vec_return_type.
25
+ //
26
+ // When writing kernels with these vectors, it is expected that floating-
27
+ // point operations will be carried out in a loop over Vectorized<T>::float_num_vecs
28
+ // iterations.
29
+
30
+ namespace at {
31
+ namespace vec {
32
+ inline namespace CPU_CAPABILITY {
33
+
34
+ template <>
35
+ struct Vectorized<c10::qint32> {
36
+ private:
37
+ union {
38
+ struct {
39
+ vint32 _vec0;
40
+ vint32 _vec1;
41
+ };
42
+ struct {
43
+ vbool32 _vecb0;
44
+ vbool32 _vecb1;
45
+ };
46
+
47
+ } __attribute__((__may_alias__));
48
+
49
+ public:
50
+ Vectorized() {}
51
+
52
+ using size_type = int;
53
+ static constexpr size_type size() {
54
+ return 8;
55
+ }
56
+
57
+ static constexpr size_t float_num_vecs() {
58
+ return 1;
59
+ }
60
+ static constexpr int int_num_vecs() {
61
+ return 1;
62
+ }
63
+ using float_vec_return_type = std::array<Vectorized<float>, 1>;
64
+ using int_vec_return_type = std::array<Vectorized<c10::qint32>, 1>;
65
+ using value_type = c10::qint32::underlying;
66
+ using vec_internal_type = vint32;
67
+ using vec_internal_mask_type = vbool32;
68
+ C10_ALWAYS_INLINE Vectorized(vint32 v) : _vec0{v}, _vec1{v} {}
69
+ C10_ALWAYS_INLINE Vectorized(vbool32 vmask) : _vecb0{vmask}, _vecb1{vmask} {}
70
+ C10_ALWAYS_INLINE Vectorized(vint32 v1, vint32 v2) : _vec0{v1}, _vec1{v2} {}
71
+ C10_ALWAYS_INLINE Vectorized(vbool32 v1, vbool32 v2) : _vecb0{v1}, _vecb1{v2} {}
72
+
73
+ Vectorized(const c10::qint32& val)
74
+ : _vec0(vec_splats(val.val_)), _vec1(vec_splats(val.val_)) {}
75
+
76
+ static Vectorized<c10::qint32> C10_ALWAYS_INLINE
77
+ loadu(const void* ptr, int count = size()) {
78
+ if (count == size()) {
79
+ return {
80
+ vec_vsx_ld(offset0, reinterpret_cast<const value_type*>(ptr)),
81
+ vec_vsx_ld(offset16, reinterpret_cast<const value_type*>(ptr))};
82
+ }
83
+
84
+ __at_align__ value_type tmp_values[size()] = {};
85
+ std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type));
86
+
87
+ return {vec_vsx_ld(offset0, tmp_values), vec_vsx_ld(offset16, tmp_values)};
88
+ }
89
+ void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const {
90
+ if (count == size()) {
91
+ vec_vsx_st(_vec0, offset0, reinterpret_cast<value_type*>(ptr));
92
+ vec_vsx_st(_vec1, offset16, reinterpret_cast<value_type*>(ptr));
93
+ } else if (count > 0) {
94
+ __at_align__ value_type tmp_values[size()];
95
+ vec_vsx_st(_vec0, offset0, tmp_values);
96
+ vec_vsx_st(_vec1, offset16, tmp_values);
97
+ std::memcpy(
98
+ ptr, tmp_values, std::min(count, size()) * sizeof(value_type));
99
+ }
100
+ }
101
+
102
+ C10_ALWAYS_INLINE const vec_internal_type& vec0() const {
103
+ return _vec0;
104
+ }
105
+ C10_ALWAYS_INLINE const vec_internal_type& vec1() const {
106
+ return _vec1;
107
+ }
108
+
109
+ float_vec_return_type dequantize(
110
+ Vectorized<float> scale,
111
+ Vectorized<float> zero_point,
112
+ Vectorized<float> scale_zp_premul) const {
113
+ vfloat32 float_vals0 = vec_float(_vec0);
114
+ vfloat32 float_vals1 = vec_float(_vec1);
115
+ vfloat32 scale_vec0 = scale.vec0();
116
+ vfloat32 scale_vec1 = scale.vec1();
117
+ vfloat32 scale_zp_premul0 = scale_zp_premul.vec0();
118
+ vfloat32 scale_zp_premul1 = scale_zp_premul.vec1();
119
+ return {Vectorized<float>{
120
+ vec_madd(scale_vec0, float_vals0, scale_zp_premul0),
121
+ vec_madd(scale_vec1, float_vals1, scale_zp_premul1)}};
122
+ }
123
+
124
+ float_vec_return_type dequantize(
125
+ Vectorized<float> scale,
126
+ Vectorized<float> zero_point) const {
127
+ vfloat32 float_vals0 = vec_float(_vec0);
128
+ vfloat32 float_vals1 = vec_float(_vec1);
129
+ vfloat32 scale_vec0 = scale.vec0();
130
+ vfloat32 scale_vec1 = scale.vec1();
131
+ vfloat32 zero_point0 = zero_point.vec0();
132
+ vfloat32 zero_point1 = zero_point.vec1();
133
+ return {Vectorized<float>{
134
+ (float_vals0 - zero_point0) * scale_vec0,
135
+ (float_vals1 - zero_point1) * scale_vec1}};
136
+ }
137
+
138
+ static Vectorized<c10::qint32> quantize(
139
+ const float_vec_return_type& rhs,
140
+ float scale,
141
+ int32_t zero_point,
142
+ float inverse_scale) {
143
+ Vectorized<c10::qint32> retval;
144
+
145
+ const vint32 vmin = vec_splats(std::numeric_limits<value_type>::min());
146
+ const vint32 vmax = vec_splats(std::numeric_limits<value_type>::max());
147
+ vfloat32 inverse_scale_v = vec_splats(inverse_scale);
148
+ vfloat32 vec_zero_point = vec_splats((float)(zero_point));
149
+ Vectorized<float> vf0 = rhs[0];
150
+
151
+ vfloat32 vecf0 = vf0.vec0();
152
+ vfloat32 vecf1 = vf0.vec1();
153
+ vecf0 = vec_mul(vecf0, inverse_scale_v);
154
+ vecf1 = vec_mul(vecf1, inverse_scale_v);
155
+ vecf0 = vec_add(vec_rint(vecf0), vec_zero_point);
156
+ vecf1 = vec_add(vec_rint(vecf1), vec_zero_point);
157
+ vint32 veci0 = vec_signed(vecf0);
158
+ vint32 veci1 = vec_signed(vecf1);
159
+
160
+ veci0 = vec_max(veci0, vmin);
161
+ veci1 = vec_max(veci1, vmin);
162
+ veci0 = vec_min(veci0, vmax);
163
+ veci1 = vec_min(veci1, vmax);
164
+
165
+ return {veci0, veci1};
166
+ }
167
+
168
+ Vectorized<c10::qint32> relu(Vectorized<c10::qint32> zero_point) const {
169
+ return {vec_max(_vec0, zero_point._vec0), vec_max(_vec1, zero_point._vec1)};
170
+ }
171
+
172
+ Vectorized<c10::qint32> relu6(
173
+ Vectorized<c10::qint32> zero_point,
174
+ Vectorized<c10::qint32> q_six) const {
175
+ vint32 max0 = vec_max(_vec0, zero_point._vec0);
176
+ vint32 max1 = vec_max(_vec1, zero_point._vec1);
177
+ return {vec_min(max0, q_six._vec0), vec_min(max1, q_six._vec1)};
178
+ }
179
+
180
+ int_vec_return_type widening_subtract(Vectorized<c10::qint32> b) const {
181
+ return {*this - b};
182
+ }
183
+
184
+ static Vectorized<c10::qint32> requantize_from_int(
185
+ const int_vec_return_type& inp,
186
+ float multiplier,
187
+ int32_t zero_point) {
188
+ const vint32 vmin = vec_splats(std::numeric_limits<value_type>::min());
189
+ const vint32 vmax = vec_splats(std::numeric_limits<value_type>::max());
190
+ vfloat32 vec_mult = vec_splats(multiplier);
191
+ vint32 vec_zero_point = vec_splats(zero_point);
192
+ Vectorized<c10::qint32> vi = inp[0];
193
+ vfloat32 vecf0 = vec_float(vi.vec0());
194
+ vfloat32 vecf1 = vec_float(vi.vec1());
195
+
196
+ vecf0 = vec_mul(vecf0, vec_mult);
197
+ vecf1 = vec_mul(vecf1, vec_mult);
198
+
199
+ vecf0 = vec_rint(vecf0);
200
+ vecf1 = vec_rint(vecf1);
201
+
202
+ vint32 veci0 = vec_add(vec_signed(vecf0),vec_zero_point);
203
+ vint32 veci1 = vec_add(vec_signed(vecf1),vec_zero_point);
204
+
205
+ veci0 = vec_max(veci0, vmin);
206
+ veci1 = vec_max(veci1, vmin);
207
+ veci0 = vec_min(veci0, vmax);
208
+ veci1 = vec_min(veci1, vmax);
209
+
210
+ return {veci0, veci1};
211
+ }
212
+
213
+ DEFINE_MEMBER_OP(operator==, c10::qint32, vec_cmpeq)
214
+ DEFINE_MEMBER_OP(operator!=, c10::qint32, vec_cmpne)
215
+ DEFINE_MEMBER_OP(operator<, c10::qint32, vec_cmplt)
216
+ DEFINE_MEMBER_OP(operator<=, c10::qint32, vec_cmple)
217
+ DEFINE_MEMBER_OP(operator>, c10::qint32, vec_cmpgt)
218
+ DEFINE_MEMBER_OP(operator>=, c10::qint32, vec_cmpge)
219
+ DEFINE_MEMBER_OP(operator+, c10::qint32, vec_add)
220
+ DEFINE_MEMBER_OP(operator-, c10::qint32, vec_sub)
221
+ DEFINE_MEMBER_OP(operator*, c10::qint32, vec_mul)
222
+ DEFINE_MEMBER_EMULATE_BINARY_OP(operator/, c10::qint32, /)
223
+ DEFINE_MEMBER_OP(maximum, c10::qint32, vec_max)
224
+ DEFINE_MEMBER_OP(minimum, c10::qint32, vec_min)
225
+ DEFINE_MEMBER_OP(operator&, c10::qint32, vec_and)
226
+ DEFINE_MEMBER_OP(operator|, c10::qint32, vec_or)
227
+ DEFINE_MEMBER_OP(operator^, c10::qint32, vec_xor)
228
+ };
229
+
230
+ template <>
231
+ Vectorized<c10::qint32> inline maximum(
232
+ const Vectorized<c10::qint32>& a,
233
+ const Vectorized<c10::qint32>& b) {
234
+ return a.maximum(b);
235
+ }
236
+
237
+ template <>
238
+ Vectorized<c10::qint32> inline minimum(
239
+ const Vectorized<c10::qint32>& a,
240
+ const Vectorized<c10::qint32>& b) {
241
+ return a.minimum(b);
242
+ }
243
+ } // namespace
244
+ } // namespace vec
245
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_qint8_vsx.h ADDED
@@ -0,0 +1,447 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cpu/vec/intrinsics.h>
4
+ #include <ATen/cpu/vec/vec_base.h>
5
+ #include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
6
+ #include <c10/util/qint8.h>
7
+ #include <array>
8
+
9
+ // This file defines Vectorized<> for the quantized types.
10
+ //
11
+ //
12
+ // Currently, we simply use these classes as efficient converters between
13
+ // the quantized types and Vectorized<float>, usually in bandwidth-bound cases
14
+ // where doing the arithmetic in full-precision is acceptable (e.g.
15
+ // elementwise operators).
16
+ //
17
+ //
18
+ // Conversions are as follows:
19
+ // Vectorized<qint8> -> 4x Vectorized<float>
20
+ //
21
+ // The size of the returned float vector is specified by the special
22
+ // constexpr function float_num_vecs. The type of the value returned
23
+ // from dequantize (and expected as an argument to quantize) is
24
+ // specified by float_vec_return_type.
25
+ //
26
+ // When writing kernels with these vectors, it is expected that floating-
27
+ // point operations will be carried out in a loop over Vectorized<T>::float_num_vecs
28
+ // iterations.
29
+
30
+ namespace at {
31
+ namespace vec {
32
+ inline namespace CPU_CAPABILITY {
33
+
34
+ template <>
35
+ struct Vectorized<c10::qint8> {
36
+ private:
37
+ union {
38
+ struct {
39
+ vint8 _vec0;
40
+ vint8 _vec1;
41
+ };
42
+ struct {
43
+ vbool8 _vecb0;
44
+ vbool8 _vecb1;
45
+ };
46
+
47
+ } __attribute__((__may_alias__));
48
+
49
+ public:
50
+ Vectorized() {}
51
+ using size_type = int;
52
+ static constexpr size_type size() {
53
+ return 32;
54
+ }
55
+
56
+ static constexpr size_t float_num_vecs() {
57
+ return 4;
58
+ }
59
+ static constexpr int int_num_vecs() {
60
+ return 4;
61
+ }
62
+ using float_vec_return_type = std::array<Vectorized<float>, 4>;
63
+ using int_vec_return_type = std::array<Vectorized<c10::qint32>, 4>;
64
+ using value_type = typename c10::qint8::underlying;
65
+ using vec_internal_type = vint8;
66
+ using vec_internal_mask_type = vbool8;
67
+ // Broadcast constructor
68
+ C10_ALWAYS_INLINE Vectorized(const c10::qint8& val)
69
+ : _vec0{vec_splats(val.val_)}, _vec1{vec_splats(val.val_)} {}
70
+
71
+ C10_ALWAYS_INLINE Vectorized(const Vectorized<c10::qint8>& other)
72
+ : _vec0{other._vec0}, _vec1(other._vec1) {}
73
+
74
+ C10_ALWAYS_INLINE Vectorized(vint8 v) : _vec0{v}, _vec1{v} {}
75
+ C10_ALWAYS_INLINE Vectorized(vbool8 vmask) : _vecb0{vmask}, _vecb1{vmask} {}
76
+ C10_ALWAYS_INLINE Vectorized(vint8 v1, vint8 v2) : _vec0{v1}, _vec1{v2} {}
77
+ C10_ALWAYS_INLINE Vectorized(vbool8 v1, vbool8 v2) : _vecb0{v1}, _vecb1{v2} {}
78
+
79
+ C10_ALWAYS_INLINE const vec_internal_type& vec0() const {
80
+ return _vec0;
81
+ }
82
+ C10_ALWAYS_INLINE const vec_internal_type& vec1() const {
83
+ return _vec1;
84
+ }
85
+
86
+ static C10_ALWAYS_INLINE Vectorized<c10::qint8> loadu(
87
+ const void* ptr,
88
+ int count = size()) {
89
+ if (count == size()) {
90
+ return {
91
+ vec_vsx_ld(offset0, reinterpret_cast<const vint8*>(ptr)),
92
+ vec_vsx_ld(offset16, reinterpret_cast<const vint8*>(ptr))};
93
+ }
94
+ __at_align__ value_type tmp_values[size()];
95
+ std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type));
96
+ return {vec_vsx_ld(offset0, tmp_values), vec_vsx_ld(offset16, tmp_values)};
97
+ }
98
+ void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const {
99
+ if (count == size()) {
100
+ vec_vsx_st(_vec0, offset0, reinterpret_cast<value_type*>(ptr));
101
+ vec_vsx_st(_vec1, offset16, reinterpret_cast<value_type*>(ptr));
102
+ } else if (count > 0) {
103
+ __at_align__ value_type tmp_values[size()];
104
+ vec_vsx_st(_vec0, offset0, tmp_values);
105
+ vec_vsx_st(_vec1, offset16, tmp_values);
106
+ std::memcpy(
107
+ ptr, tmp_values, std::min(count, size()) * sizeof(value_type));
108
+ }
109
+ }
110
+
111
+ public:
112
+ float_vec_return_type C10_ALWAYS_INLINE dequantize(
113
+ Vectorized<float> scale,
114
+ Vectorized<float> zero_point,
115
+ Vectorized<float> scale_zp_premul) const {
116
+ vint16 vecshi0 = vec_unpackh(_vec0);
117
+ vint16 vecshi1 = vec_unpackl(_vec0);
118
+
119
+ vint16 vecshi2 = vec_unpackh(_vec1);
120
+ vint16 vecshi3 = vec_unpackl(_vec1);
121
+
122
+ vint32 veci0 = vec_unpackh(vecshi0);
123
+ vint32 veci1 = vec_unpackl(vecshi0);
124
+
125
+ vint32 veci2 = vec_unpackh(vecshi1);
126
+ vint32 veci3 = vec_unpackl(vecshi1);
127
+
128
+ vint32 veci4 = vec_unpackh(vecshi2);
129
+ vint32 veci5 = vec_unpackl(vecshi2);
130
+
131
+ vint32 veci6 = vec_unpackh(vecshi3);
132
+ vint32 veci7 = vec_unpackl(vecshi3);
133
+
134
+ vfloat32 vecf0_0 = vec_float(veci0);
135
+ vfloat32 vecf1_0 = vec_float(veci1);
136
+
137
+ vfloat32 vecf0_1 = vec_float(veci2);
138
+ vfloat32 vecf1_1 = vec_float(veci3);
139
+
140
+ vfloat32 vecf0_2 = vec_float(veci4);
141
+ vfloat32 vecf1_2 = vec_float(veci5);
142
+
143
+ vfloat32 vecf0_3 = vec_float(veci6);
144
+ vfloat32 vecf1_3 = vec_float(veci7);
145
+ vfloat32 scale_vec0 = scale.vec0();
146
+ vfloat32 scale_vec1 = scale.vec1();
147
+ vfloat32 scale_zp_premul0 = scale_zp_premul.vec0();
148
+ vfloat32 scale_zp_premul1 = scale_zp_premul.vec1();
149
+ return {
150
+ Vectorized<float>{
151
+ vec_madd(scale_vec0, vecf0_0, scale_zp_premul0),
152
+ vec_madd(scale_vec1, vecf1_0, scale_zp_premul1)},
153
+ Vectorized<float>{
154
+ vec_madd(scale_vec0, vecf0_1, scale_zp_premul0),
155
+ vec_madd(scale_vec1, vecf1_1, scale_zp_premul1)},
156
+ Vectorized<float>{
157
+ vec_madd(scale_vec0, vecf0_2, scale_zp_premul0),
158
+ vec_madd(scale_vec1, vecf1_2, scale_zp_premul1)},
159
+ Vectorized<float>{
160
+ vec_madd(scale_vec0, vecf0_3, scale_zp_premul0),
161
+ vec_madd(scale_vec1, vecf1_3, scale_zp_premul1)}};
162
+ }
163
+
164
+ float_vec_return_type C10_ALWAYS_INLINE dequantize(
165
+ Vectorized<float> scale,
166
+ Vectorized<float> zero_point) const {
167
+ vint16 vecshi0 = vec_unpackh(_vec0);
168
+ vint16 vecshi1 = vec_unpackl(_vec0);
169
+
170
+ vint16 vecshi2 = vec_unpackh(_vec1);
171
+ vint16 vecshi3 = vec_unpackl(_vec1);
172
+
173
+ vint32 veci0 = vec_unpackh(vecshi0);
174
+ vint32 veci1 = vec_unpackl(vecshi0);
175
+
176
+ vint32 veci2 = vec_unpackh(vecshi1);
177
+ vint32 veci3 = vec_unpackl(vecshi1);
178
+
179
+ vint32 veci4 = vec_unpackh(vecshi2);
180
+ vint32 veci5 = vec_unpackl(vecshi2);
181
+
182
+ vint32 veci6 = vec_unpackh(vecshi3);
183
+ vint32 veci7 = vec_unpackl(vecshi3);
184
+
185
+ vfloat32 vecf0_0 = vec_float(veci0);
186
+ vfloat32 vecf1_0 = vec_float(veci1);
187
+
188
+ vfloat32 vecf0_1 = vec_float(veci2);
189
+ vfloat32 vecf1_1 = vec_float(veci3);
190
+
191
+ vfloat32 vecf0_2 = vec_float(veci4);
192
+ vfloat32 vecf1_2 = vec_float(veci5);
193
+
194
+ vfloat32 vecf0_3 = vec_float(veci6);
195
+ vfloat32 vecf1_3 = vec_float(veci7);
196
+ vfloat32 scale_vec0 = scale.vec0();
197
+ vfloat32 scale_vec1 = scale.vec1();
198
+ vfloat32 zero_point0 = zero_point.vec0();
199
+ vfloat32 zero_point1 = zero_point.vec1();
200
+ return {
201
+ Vectorized<float>{
202
+ (vecf0_0 - zero_point0) * scale_vec0,
203
+ (vecf1_0 - zero_point1) * scale_vec1},
204
+ Vectorized<float>{
205
+ (vecf0_1 - zero_point0) * scale_vec0,
206
+ (vecf1_1 - zero_point1) * scale_vec1},
207
+ Vectorized<float>{
208
+ (vecf0_2 - zero_point0) * scale_vec0,
209
+ (vecf1_2 - zero_point1) * scale_vec1},
210
+ Vectorized<float>{
211
+ (vecf0_3 - zero_point0) * scale_vec0,
212
+ (vecf1_3 - zero_point1) * scale_vec1}};
213
+ }
214
+
215
+ static Vectorized<c10::qint8> quantize(
216
+ const float_vec_return_type& rhs,
217
+ float scale,
218
+ int32_t zero_point,
219
+ float inverse_scale) {
220
+ // constexpr int32_t min_val = std::numeric_limits<value_type>::min();
221
+ // constexpr int32_t max_val = std::numeric_limits<value_type>::max();
222
+
223
+ vfloat32 inverse_scale_v = vec_splats(inverse_scale);
224
+ vfloat32 vec_zero_point = vec_splats((float)zero_point);
225
+ // vint32 vmin = vec_splats(min_val);
226
+ // vint32 vmax = vec_splats(max_val);
227
+
228
+ Vectorized<float> vf0 = rhs[0];
229
+ Vectorized<float> vf1 = rhs[1];
230
+ Vectorized<float> vf2 = rhs[2];
231
+ Vectorized<float> vf3 = rhs[3];
232
+ vfloat32 vecf0 = vf0.vec0();
233
+ vfloat32 vecf1 = vf0.vec1();
234
+ vfloat32 vecf2 = vf1.vec0();
235
+ vfloat32 vecf3 = vf1.vec1();
236
+
237
+ vfloat32 vecf4 = vf2.vec0();
238
+ vfloat32 vecf5 = vf2.vec1();
239
+ vfloat32 vecf6 = vf3.vec0();
240
+ vfloat32 vecf7 = vf3.vec1();
241
+
242
+ vecf0 = vec_mul(vecf0, inverse_scale_v);
243
+ vecf1 = vec_mul(vecf1, inverse_scale_v);
244
+ vecf2 = vec_mul(vecf2, inverse_scale_v);
245
+ vecf3 = vec_mul(vecf3, inverse_scale_v);
246
+
247
+ vecf4 = vec_mul(vecf4, inverse_scale_v);
248
+ vecf5 = vec_mul(vecf5, inverse_scale_v);
249
+ vecf6 = vec_mul(vecf6, inverse_scale_v);
250
+ vecf7 = vec_mul(vecf7, inverse_scale_v);
251
+
252
+ vecf0 = vec_add(vec_rint(vecf0), vec_zero_point);
253
+ vecf1 = vec_add(vec_rint(vecf1), vec_zero_point);
254
+ vecf2 = vec_add(vec_rint(vecf2), vec_zero_point);
255
+ vecf3 = vec_add(vec_rint(vecf3), vec_zero_point);
256
+
257
+ vecf4 = vec_add(vec_rint(vecf4), vec_zero_point);
258
+ vecf5 = vec_add(vec_rint(vecf5), vec_zero_point);
259
+ vecf6 = vec_add(vec_rint(vecf6), vec_zero_point);
260
+ vecf7 = vec_add(vec_rint(vecf7), vec_zero_point);
261
+
262
+ vint32 veci0 = vec_signed(vecf0);
263
+ vint32 veci1 = vec_signed(vecf1);
264
+ vint32 veci2 = vec_signed(vecf2);
265
+ vint32 veci3 = vec_signed(vecf3);
266
+
267
+ vint32 veci4 = vec_signed(vecf4);
268
+ vint32 veci5 = vec_signed(vecf5);
269
+ vint32 veci6 = vec_signed(vecf6);
270
+ vint32 veci7 = vec_signed(vecf7);
271
+
272
+ // veci0 = vec_min(vmax, vec_max( vmin, vecf0)) ;
273
+ // veci1 = vec_min(vmax, vec_max( vmin, vecf1)) ;
274
+ // veci2 = vec_min(vmax, vec_max( vmin, vecf2)) ;
275
+ // veci3 = vec_min(vmax, vec_max( vmin, vecf3)) ;
276
+
277
+ // veci4 = vec_min(vmax, vec_max( vmin, vecf4)) ;
278
+ // veci5 = vec_min(vmax, vec_max( vmin, vecf5)) ;
279
+ // veci6 = vec_min(vmax, vec_max( vmin, vecf6)) ;
280
+ // veci7 = vec_min(vmax, vec_max( vmin, vecf7)) ;
281
+ // vec_packs CLAMP already
282
+ vint16 vecshi0 = vec_packs(veci0, veci1);
283
+ vint16 vecshi1 = vec_packs(veci2, veci3);
284
+ vint16 vecshi2 = vec_packs(veci4, veci5);
285
+ vint16 vecshi3 = vec_packs(veci6, veci7);
286
+
287
+ vint8 vec0 = vec_packs(vecshi0, vecshi1);
288
+ vint8 vec1 = vec_packs(vecshi2, vecshi3);
289
+
290
+ return {vec0, vec1};
291
+ }
292
+
293
+ Vectorized<c10::qint8> C10_ALWAYS_INLINE relu(Vectorized<c10::qint8> zero_point) const {
294
+ return {vec_max(_vec0, zero_point._vec0), vec_max(_vec1, zero_point._vec1)};
295
+ }
296
+
297
+ Vectorized<c10::qint8> C10_ALWAYS_INLINE
298
+ relu6(Vectorized<c10::qint8> zero_point, Vectorized<c10::qint8> q_six) const {
299
+ vint8 max0 = vec_max(_vec0, zero_point._vec0);
300
+ vint8 max1 = vec_max(_vec1, zero_point._vec1);
301
+ return {vec_min(max0, q_six._vec0), vec_min(max1, q_six._vec1)};
302
+ }
303
+
304
+ int_vec_return_type widening_subtract(Vectorized<c10::qint8> b) const {
305
+ vint16 vecshi0 = vec_unpackh(_vec0);
306
+ vint16 vecBshi0 = vec_unpackh(b._vec0);
307
+ vint16 vecshi1 = vec_unpackl(_vec0);
308
+ vint16 vecBshi1 = vec_unpackl(b._vec0);
309
+
310
+ vint16 vecshi2 = vec_unpackh(_vec1);
311
+ vint16 vecBshi2 = vec_unpackh(b._vec1);
312
+ vint16 vecshi3 = vec_unpackl(_vec1);
313
+ vint16 vecBshi3 = vec_unpackl(b._vec1);
314
+
315
+ vint32 veci0 = vec_unpackh(vecshi0);
316
+ vint32 vecBi0 = vec_unpackh(vecBshi0);
317
+ vint32 veci1 = vec_unpackl(vecshi0);
318
+ vint32 vecBi1 = vec_unpackl(vecBshi0);
319
+
320
+ vint32 veci2 = vec_unpackh(vecshi1);
321
+ vint32 vecBi2 = vec_unpackh(vecBshi1);
322
+ vint32 veci3 = vec_unpackl(vecshi1);
323
+ vint32 vecBi3 = vec_unpackl(vecBshi1);
324
+
325
+ vint32 veci4 = vec_unpackh(vecshi2);
326
+ vint32 vecBi4 = vec_unpackh(vecBshi2);
327
+ vint32 veci5 = vec_unpackl(vecshi2);
328
+ vint32 vecBi5 = vec_unpackl(vecBshi2);
329
+
330
+ vint32 veci6 = vec_unpackh(vecshi3);
331
+ vint32 vecBi6 = vec_unpackh(vecBshi3);
332
+ vint32 veci7 = vec_unpackl(vecshi3);
333
+ vint32 vecBi7 = vec_unpackl(vecBshi3);
334
+
335
+ return {
336
+ Vectorized<c10::qint32>(veci0 - vecBi0, veci1 - vecBi1),
337
+ Vectorized<c10::qint32>(veci2 - vecBi2, veci3 - vecBi3),
338
+ Vectorized<c10::qint32>(veci4 - vecBi4, veci5 - vecBi5),
339
+ Vectorized<c10::qint32>(veci6 - vecBi6, veci7 - vecBi7)};
340
+ }
341
+
342
+ static Vectorized<c10::qint8> requantize_from_int(
343
+ const int_vec_return_type& inp,
344
+ float multiplier,
345
+ int32_t zero_point) {
346
+ vfloat32 vec_multiplier = vec_splats(multiplier);
347
+ vint32 vec_zero_point = vec_splats(zero_point);
348
+
349
+ Vectorized<c10::qint32> vi0 = inp[0];
350
+ Vectorized<c10::qint32> vi1 = inp[1];
351
+ Vectorized<c10::qint32> vi2 = inp[2];
352
+ Vectorized<c10::qint32> vi3 = inp[3];
353
+
354
+ vfloat32 vecf0 = vec_float(vi0.vec0());
355
+ vfloat32 vecf1 = vec_float(vi0.vec1());
356
+ vfloat32 vecf2 = vec_float(vi1.vec0());
357
+ vfloat32 vecf3 = vec_float(vi1.vec1());
358
+
359
+ vfloat32 vecf4 = vec_float(vi2.vec0());
360
+ vfloat32 vecf5 = vec_float(vi2.vec1());
361
+ vfloat32 vecf6 = vec_float(vi3.vec0());
362
+ vfloat32 vecf7 = vec_float(vi3.vec1());
363
+
364
+ vecf0 = vec_mul(vecf0, vec_multiplier);
365
+ vecf1 = vec_mul(vecf1, vec_multiplier);
366
+ vecf2 = vec_mul(vecf2, vec_multiplier);
367
+ vecf3 = vec_mul(vecf3, vec_multiplier);
368
+
369
+ vecf4 = vec_mul(vecf4, vec_multiplier);
370
+ vecf5 = vec_mul(vecf5, vec_multiplier);
371
+ vecf6 = vec_mul(vecf6, vec_multiplier);
372
+ vecf7 = vec_mul(vecf7, vec_multiplier);
373
+
374
+ vecf0 = vec_rint(vecf0);
375
+ vecf1 = vec_rint(vecf1);
376
+ vecf2 = vec_rint(vecf2);
377
+ vecf3 = vec_rint(vecf3);
378
+
379
+ vecf4 = vec_rint(vecf4);
380
+ vecf5 = vec_rint(vecf5);
381
+ vecf6 = vec_rint(vecf6);
382
+ vecf7 = vec_rint(vecf7);
383
+
384
+ vint32 veci0 = vec_signed(vecf0);
385
+ vint32 veci1 = vec_signed(vecf1);
386
+ vint32 veci2 = vec_signed(vecf2);
387
+ vint32 veci3 = vec_signed(vecf3);
388
+
389
+ vint32 veci4 = vec_signed(vecf4);
390
+ vint32 veci5 = vec_signed(vecf5);
391
+ vint32 veci6 = vec_signed(vecf6);
392
+ vint32 veci7 = vec_signed(vecf7);
393
+
394
+ veci0 = vec_add(veci0, vec_zero_point);
395
+ veci1 = vec_add(veci1, vec_zero_point);
396
+ veci2 = vec_add(veci2, vec_zero_point);
397
+ veci3 = vec_add(veci3, vec_zero_point);
398
+
399
+ veci4 = vec_add(veci4, vec_zero_point);
400
+ veci5 = vec_add(veci5, vec_zero_point);
401
+ veci6 = vec_add(veci6, vec_zero_point);
402
+ veci7 = vec_add(veci7, vec_zero_point);
403
+
404
+ vint16 vecshi0 = vec_packs(veci0, veci1);
405
+ vint16 vecshi1 = vec_packs(veci2, veci3);
406
+ vint16 vecshi2 = vec_packs(veci4, veci5);
407
+ vint16 vecshi3 = vec_packs(veci6, veci7);
408
+
409
+ vint8 vec0 = vec_packs(vecshi0, vecshi1);
410
+ vint8 vec1 = vec_packs(vecshi2, vecshi3);
411
+
412
+ return {vec0, vec1};
413
+ }
414
+
415
+ DEFINE_MEMBER_OP(operator==, c10::qint8, vec_cmpeq)
416
+ DEFINE_MEMBER_OP(operator!=, c10::qint8, vec_cmpne)
417
+ DEFINE_MEMBER_OP(operator<, c10::qint8, vec_cmplt)
418
+ DEFINE_MEMBER_OP(operator<=, c10::qint8, vec_cmple)
419
+ DEFINE_MEMBER_OP(operator>, c10::qint8, vec_cmpgt)
420
+ DEFINE_MEMBER_OP(operator>=, c10::qint8, vec_cmpge)
421
+ DEFINE_MEMBER_OP(operator+, c10::qint8, vec_add)
422
+ DEFINE_MEMBER_OP(operator-, c10::qint8, vec_sub)
423
+ DEFINE_MEMBER_OP(operator*, c10::qint8, vec_mul)
424
+ DEFINE_MEMBER_EMULATE_BINARY_OP(operator/, c10::qint8, /)
425
+ DEFINE_MEMBER_OP(maximum, c10::qint8, vec_max)
426
+ DEFINE_MEMBER_OP(minimum, c10::qint8, vec_min)
427
+ DEFINE_MEMBER_OP(operator&, c10::qint8, vec_and)
428
+ DEFINE_MEMBER_OP(operator|, c10::qint8, vec_or)
429
+ DEFINE_MEMBER_OP(operator^, c10::qint8, vec_xor)
430
+ };
431
+
432
+ template <>
433
+ Vectorized<c10::qint8> inline maximum(
434
+ const Vectorized<c10::qint8>& a,
435
+ const Vectorized<c10::qint8>& b) {
436
+ return a.maximum(b);
437
+ }
438
+
439
+ template <>
440
+ Vectorized<c10::qint8> inline minimum(
441
+ const Vectorized<c10::qint8>& a,
442
+ const Vectorized<c10::qint8>& b) {
443
+ return a.minimum(b);
444
+ }
445
+ } // namespace
446
+ } // namespace vec
447
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_quint8_vsx.h ADDED
@@ -0,0 +1,466 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cpu/vec/intrinsics.h>
4
+ #include <ATen/cpu/vec/vec_base.h>
5
+ #include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
6
+
7
+ #include <c10/util/irange.h>
8
+ #include <c10/util/quint8.h>
9
+ #include <array>
10
+
11
+ // This file defines Vectorized<> for the quantized types.
12
+ //
13
+ //
14
+ // Currently, we simply use these classes as efficient converters between
15
+ // the quantized types and Vectorized<float>, usually in bandwidth-bound cases
16
+ // where doing the arithmetic in full-precision is acceptable (e.g.
17
+ // elementwise operators).
18
+ //
19
+ //
20
+ // Conversions are as follows:
21
+ // Vectorized<quint8> -> 4x Vectorized<float>
22
+ //
23
+ // The size of the returned float vector is specified by the special
24
+ // constexpr function float_num_vecs. The type of the value returned
25
+ // from dequantize (and expected as an argument to quantize) is
26
+ // specified by float_vec_return_type.
27
+ //
28
+ // When writing kernels with these vectors, it is expected that floating-
29
+ // point operations will be carried out in a loop over Vectorized<T>::float_num_vecs
30
+ // iterations.
31
+
32
+ namespace at {
33
+ namespace vec {
34
+ inline namespace CPU_CAPABILITY {
35
+
36
+ const vint16 mask_unsigned = vec_splats((short int)0xFF);
37
+ template <>
38
+ struct Vectorized<c10::quint8> {
39
+ private:
40
+ union {
41
+ struct {
42
+ vuint8 _vec0;
43
+ vuint8 _vec1;
44
+ };
45
+ struct {
46
+ vbool8 _vecb0;
47
+ vbool8 _vecb1;
48
+ };
49
+
50
+ } __attribute__((__may_alias__));
51
+
52
+ public:
53
+ Vectorized() {}
54
+ using size_type = int;
55
+ static constexpr size_type size() {
56
+ return 32;
57
+ }
58
+
59
+ static constexpr size_t float_num_vecs() {
60
+ return 4;
61
+ }
62
+ static constexpr int int_num_vecs() {
63
+ return 4;
64
+ }
65
+ using float_vec_return_type = std::array<Vectorized<float>, 4>;
66
+ using int_vec_return_type = std::array<Vectorized<c10::qint32>, 4>;
67
+ using value_type = typename c10::quint8::underlying;
68
+ using vec_internal_type = vuint8;
69
+ using vec_internal_mask_type = vbool8;
70
+ // Broadcast constructor
71
+ C10_ALWAYS_INLINE Vectorized(const c10::quint8& val)
72
+ : _vec0(vec_splats(val.val_)), _vec1(vec_splats(val.val_)) {}
73
+
74
+ C10_ALWAYS_INLINE Vectorized(const Vectorized<c10::quint8>& other)
75
+ : _vec0{other._vec0}, _vec1(other._vec1) {}
76
+
77
+ C10_ALWAYS_INLINE Vectorized(vuint8 v) : _vec0{v}, _vec1{v} {}
78
+ C10_ALWAYS_INLINE Vectorized(vbool8 vmask) : _vecb0{vmask}, _vecb1{vmask} {}
79
+ C10_ALWAYS_INLINE Vectorized(vuint8 v1, vuint8 v2) : _vec0{v1}, _vec1{v2} {}
80
+ C10_ALWAYS_INLINE Vectorized(vbool8 v1, vbool8 v2) : _vecb0{v1}, _vecb1{v2} {}
81
+
82
+ C10_ALWAYS_INLINE const vec_internal_type& vec0() const {
83
+ return _vec0;
84
+ }
85
+ C10_ALWAYS_INLINE const vec_internal_type& vec1() const {
86
+ return _vec1;
87
+ }
88
+
89
+ static C10_ALWAYS_INLINE Vectorized<c10::quint8> loadu(
90
+ const void* ptr,
91
+ int count = size()) {
92
+ if (count == size()) {
93
+ return {
94
+ vec_vsx_ld(offset0, reinterpret_cast<const value_type*>(ptr)),
95
+ vec_vsx_ld(offset16, reinterpret_cast<const value_type*>(ptr))};
96
+ }
97
+ __at_align__ value_type tmp_values[size()];
98
+ std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type));
99
+ return {vec_vsx_ld(offset0, tmp_values), vec_vsx_ld(offset16, tmp_values)};
100
+ }
101
+ void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const {
102
+ if (count == size()) {
103
+ vec_vsx_st(_vec0, offset0, reinterpret_cast<value_type*>(ptr));
104
+ vec_vsx_st(_vec1, offset16, reinterpret_cast<value_type*>(ptr));
105
+ } else if (count > 0) {
106
+ __at_align__ value_type tmp_values[size()];
107
+ vec_vsx_st(_vec0, offset0, tmp_values);
108
+ vec_vsx_st(_vec1, offset16, tmp_values);
109
+ std::memcpy(
110
+ ptr, tmp_values, std::min(count, size()) * sizeof(value_type));
111
+ }
112
+ }
113
+
114
+ public:
115
+ float_vec_return_type C10_ALWAYS_INLINE dequantize(
116
+ Vectorized<float> scale,
117
+ Vectorized<float> zero_point,
118
+ Vectorized<float> scale_zp_premul) const {
119
+ // unpacking unsigned as signed
120
+ vint16 vecshi0 = vec_unpackh((vint8)_vec0);
121
+ vint16 vecshi1 = vec_unpackl((vint8)_vec0);
122
+
123
+ vint16 vecshi2 = vec_unpackh((vint8)_vec1);
124
+ vint16 vecshi3 = vec_unpackl((vint8)_vec1);
125
+
126
+ // signed -> unsigned
127
+ vecshi0 = vec_and(vecshi0, mask_unsigned);
128
+ vecshi1 = vec_and(vecshi1, mask_unsigned);
129
+
130
+ vecshi2 = vec_and(vecshi2, mask_unsigned);
131
+ vecshi3 = vec_and(vecshi3, mask_unsigned);
132
+
133
+ vint32 veci0 = vec_unpackh(vecshi0);
134
+ vint32 veci1 = vec_unpackl(vecshi0);
135
+
136
+ vint32 veci2 = vec_unpackh(vecshi1);
137
+ vint32 veci3 = vec_unpackl(vecshi1);
138
+
139
+ vint32 veci4 = vec_unpackh(vecshi2);
140
+ vint32 veci5 = vec_unpackl(vecshi2);
141
+
142
+ vint32 veci6 = vec_unpackh(vecshi3);
143
+ vint32 veci7 = vec_unpackl(vecshi3);
144
+
145
+ vfloat32 vecf0_0 = vec_float(veci0);
146
+ vfloat32 vecf1_0 = vec_float(veci1);
147
+
148
+ vfloat32 vecf0_1 = vec_float(veci2);
149
+ vfloat32 vecf1_1 = vec_float(veci3);
150
+
151
+ vfloat32 vecf0_2 = vec_float(veci4);
152
+ vfloat32 vecf1_2 = vec_float(veci5);
153
+
154
+ vfloat32 vecf0_3 = vec_float(veci6);
155
+ vfloat32 vecf1_3 = vec_float(veci7);
156
+ vfloat32 scale_vec0 = scale.vec0();
157
+ vfloat32 scale_vec1 = scale.vec1();
158
+ vfloat32 scale_zp_premul0 = scale_zp_premul.vec0();
159
+ vfloat32 scale_zp_premul1 = scale_zp_premul.vec1();
160
+ return {
161
+ Vectorized<float>{
162
+ vec_madd(scale_vec0, vecf0_0, scale_zp_premul0),
163
+ vec_madd(scale_vec1, vecf1_0, scale_zp_premul1)},
164
+ Vectorized<float>{
165
+ vec_madd(scale_vec0, vecf0_1, scale_zp_premul0),
166
+ vec_madd(scale_vec1, vecf1_1, scale_zp_premul1)},
167
+ Vectorized<float>{
168
+ vec_madd(scale_vec0, vecf0_2, scale_zp_premul0),
169
+ vec_madd(scale_vec1, vecf1_2, scale_zp_premul1)},
170
+ Vectorized<float>{
171
+ vec_madd(scale_vec0, vecf0_3, scale_zp_premul0),
172
+ vec_madd(scale_vec1, vecf1_3, scale_zp_premul1)}};
173
+ }
174
+
175
+ float_vec_return_type C10_ALWAYS_INLINE dequantize(
176
+ Vectorized<float> scale,
177
+ Vectorized<float> zero_point) const {
178
+ // unpacking unsigned as signed
179
+ vint16 vecshi0 = vec_unpackh((vint8)_vec0);
180
+ vint16 vecshi1 = vec_unpackl((vint8)_vec0);
181
+
182
+ vint16 vecshi2 = vec_unpackh((vint8)_vec1);
183
+ vint16 vecshi3 = vec_unpackl((vint8)_vec1);
184
+
185
+ // signed -> unsigned
186
+ vecshi0 = vec_and(vecshi0, mask_unsigned);
187
+ vecshi1 = vec_and(vecshi1, mask_unsigned);
188
+
189
+ vecshi2 = vec_and(vecshi2, mask_unsigned);
190
+ vecshi3 = vec_and(vecshi3, mask_unsigned);
191
+
192
+ vint32 veci0 = vec_unpackh(vecshi0);
193
+ vint32 veci1 = vec_unpackl(vecshi0);
194
+
195
+ vint32 veci2 = vec_unpackh(vecshi1);
196
+ vint32 veci3 = vec_unpackl(vecshi1);
197
+
198
+ vint32 veci4 = vec_unpackh(vecshi2);
199
+ vint32 veci5 = vec_unpackl(vecshi2);
200
+
201
+ vint32 veci6 = vec_unpackh(vecshi3);
202
+ vint32 veci7 = vec_unpackl(vecshi3);
203
+
204
+ vfloat32 vecf0_0 = vec_float(veci0);
205
+ vfloat32 vecf1_0 = vec_float(veci1);
206
+
207
+ vfloat32 vecf0_1 = vec_float(veci2);
208
+ vfloat32 vecf1_1 = vec_float(veci3);
209
+
210
+ vfloat32 vecf0_2 = vec_float(veci4);
211
+ vfloat32 vecf1_2 = vec_float(veci5);
212
+
213
+ vfloat32 vecf0_3 = vec_float(veci6);
214
+ vfloat32 vecf1_3 = vec_float(veci7);
215
+ vfloat32 scale_vec0 = scale.vec0();
216
+ vfloat32 scale_vec1 = scale.vec1();
217
+ vfloat32 zero_point0 = zero_point.vec0();
218
+ vfloat32 zero_point1 = zero_point.vec1();
219
+ return {
220
+ Vectorized<float>{
221
+ (vecf0_0 - zero_point0) * scale_vec0,
222
+ (vecf1_0 - zero_point1) * scale_vec1},
223
+ Vectorized<float>{
224
+ (vecf0_1 - zero_point0) * scale_vec0,
225
+ (vecf1_1 - zero_point1) * scale_vec1},
226
+ Vectorized<float>{
227
+ (vecf0_2 - zero_point0) * scale_vec0,
228
+ (vecf1_2 - zero_point1) * scale_vec1},
229
+ Vectorized<float>{
230
+ (vecf0_3 - zero_point0) * scale_vec0,
231
+ (vecf1_3 - zero_point1) * scale_vec1}};
232
+ }
233
+
234
+ static Vectorized<c10::quint8> quantize(
235
+ const float_vec_return_type& rhs,
236
+ float scale,
237
+ int32_t zero_point,
238
+ float inverse_scale) {
239
+ // constexpr int32_t min_val = std::numeric_limits<value_type>::min();
240
+ // constexpr int32_t max_val = std::numeric_limits<value_type>::max();
241
+
242
+ vfloat32 vec_inverse = vec_splats(inverse_scale);
243
+ vfloat32 vec_zero_point = vec_splats((float)zero_point);
244
+ // vuint32 vmin = vec_splats(min_val);
245
+ // vuint32 vmax = vec_splats(max_val);
246
+ Vectorized<float> vf0 = rhs[0];
247
+ Vectorized<float> vf1 = rhs[1];
248
+ Vectorized<float> vf2 = rhs[2];
249
+ Vectorized<float> vf3 = rhs[3];
250
+ vfloat32 vecf0 = vf0.vec0();
251
+ vfloat32 vecf1 = vf0.vec1();
252
+ vfloat32 vecf2 = vf1.vec0();
253
+ vfloat32 vecf3 = vf1.vec1();
254
+
255
+ vfloat32 vecf4 = vf2.vec0();
256
+ vfloat32 vecf5 = vf2.vec1();
257
+ vfloat32 vecf6 = vf3.vec0();
258
+ vfloat32 vecf7 = vf3.vec1();
259
+
260
+ vecf0 = vec_mul(vecf0, vec_inverse);
261
+ vecf1 = vec_mul(vecf1, vec_inverse);
262
+ vecf2 = vec_mul(vecf2, vec_inverse);
263
+ vecf3 = vec_mul(vecf3, vec_inverse);
264
+
265
+ vecf4 = vec_mul(vecf4, vec_inverse);
266
+ vecf5 = vec_mul(vecf5, vec_inverse);
267
+ vecf6 = vec_mul(vecf6, vec_inverse);
268
+ vecf7 = vec_mul(vecf7, vec_inverse);
269
+
270
+ vecf0 = vec_add(vec_rint(vecf0), vec_zero_point);
271
+ vecf1 = vec_add(vec_rint(vecf1), vec_zero_point);
272
+ vecf2 = vec_add(vec_rint(vecf2), vec_zero_point);
273
+ vecf3 = vec_add(vec_rint(vecf3), vec_zero_point);
274
+
275
+ vecf4 = vec_add(vec_rint(vecf4), vec_zero_point);
276
+ vecf5 = vec_add(vec_rint(vecf5), vec_zero_point);
277
+ vecf6 = vec_add(vec_rint(vecf6), vec_zero_point);
278
+ vecf7 = vec_add(vec_rint(vecf7), vec_zero_point);
279
+
280
+ vint32 veci0 = vec_signed(vecf0);
281
+ vint32 veci1 = vec_signed(vecf1);
282
+ vint32 veci2 = vec_signed(vecf2);
283
+ vint32 veci3 = vec_signed(vecf3);
284
+
285
+ vint32 veci4 = vec_signed(vecf4);
286
+ vint32 veci5 = vec_signed(vecf5);
287
+ vint32 veci6 = vec_signed(vecf6);
288
+ vint32 veci7 = vec_signed(vecf7);
289
+
290
+ vint16 vecshi0 = vec_packs(veci0, veci1);
291
+ vint16 vecshi1 = vec_packs(veci2, veci3);
292
+ vint16 vecshi2 = vec_packs(veci4, veci5);
293
+ vint16 vecshi3 = vec_packs(veci6, veci7);
294
+
295
+ vuint8 vec0 = vec_packsu(vecshi0, vecshi1);
296
+ vuint8 vec1 = vec_packsu(vecshi2, vecshi3);
297
+
298
+ return {vec0, vec1};
299
+ }
300
+
301
+ Vectorized<c10::quint8> C10_ALWAYS_INLINE relu(Vectorized<c10::quint8> zero_point) const {
302
+ return {vec_max(_vec0, zero_point._vec0), vec_max(_vec1, zero_point._vec1)};
303
+ }
304
+
305
+ Vectorized<c10::quint8> C10_ALWAYS_INLINE
306
+ relu6(Vectorized<c10::quint8> zero_point, Vectorized<c10::quint8> q_six) const {
307
+ vuint8 max0 = vec_max(_vec0, zero_point._vec0);
308
+ vuint8 max1 = vec_max(_vec1, zero_point._vec1);
309
+ return {vec_min(max0, q_six._vec0), vec_min(max1, q_six._vec1)};
310
+ }
311
+
312
+ int_vec_return_type widening_subtract(Vectorized<c10::quint8> b) const {
313
+ vint16 vecshi0 = vec_unpackh((vint8)_vec0);
314
+ vint16 vecBshi0 = vec_unpackh((vint8)b._vec0);
315
+ vint16 vecshi1 = vec_unpackl((vint8)_vec0);
316
+ vint16 vecBshi1 = vec_unpackl((vint8)b._vec0);
317
+
318
+ vint16 vecshi2 = vec_unpackh((vint8)_vec1);
319
+ vint16 vecBshi2 = vec_unpackh((vint8)b._vec1);
320
+ vint16 vecshi3 = vec_unpackl((vint8)_vec1);
321
+ vint16 vecBshi3 = vec_unpackl((vint8)b._vec1);
322
+
323
+ vecshi0 = vec_and(vecshi0, mask_unsigned);
324
+ vecBshi0 = vec_and(vecBshi0, mask_unsigned);
325
+ vecshi1 = vec_and(vecshi1, mask_unsigned);
326
+ vecBshi1 = vec_and(vecBshi1, mask_unsigned);
327
+
328
+ vecshi2 = vec_and(vecshi2, mask_unsigned);
329
+ vecBshi2 = vec_and(vecBshi2, mask_unsigned);
330
+ vecshi3 = vec_and(vecshi3, mask_unsigned);
331
+ vecBshi3 = vec_and(vecBshi3, mask_unsigned);
332
+
333
+ vint32 veci0 = vec_unpackh(vecshi0);
334
+ vint32 vecBi0 = vec_unpackh(vecBshi0);
335
+ vint32 veci1 = vec_unpackl(vecshi0);
336
+ vint32 vecBi1 = vec_unpackl(vecBshi0);
337
+
338
+ vint32 veci2 = vec_unpackh(vecshi1);
339
+ vint32 vecBi2 = vec_unpackh(vecBshi1);
340
+ vint32 veci3 = vec_unpackl(vecshi1);
341
+ vint32 vecBi3 = vec_unpackl(vecBshi1);
342
+
343
+ vint32 veci4 = vec_unpackh(vecshi2);
344
+ vint32 vecBi4 = vec_unpackh(vecBshi2);
345
+ vint32 veci5 = vec_unpackl(vecshi2);
346
+ vint32 vecBi5 = vec_unpackl(vecBshi2);
347
+
348
+ vint32 veci6 = vec_unpackh(vecshi3);
349
+ vint32 vecBi6 = vec_unpackh(vecBshi3);
350
+ vint32 veci7 = vec_unpackl(vecshi3);
351
+ vint32 vecBi7 = vec_unpackl(vecBshi3);
352
+
353
+ return {
354
+ Vectorized<c10::qint32>(veci0 - vecBi0, veci1 - vecBi1),
355
+ Vectorized<c10::qint32>(veci2 - vecBi2, veci3 - vecBi3),
356
+ Vectorized<c10::qint32>(veci4 - vecBi4, veci5 - vecBi5),
357
+ Vectorized<c10::qint32>(veci6 - vecBi6, veci7 - vecBi7)};
358
+ }
359
+
360
+ static Vectorized<c10::quint8> requantize_from_int(
361
+ const int_vec_return_type& inp,
362
+ float multiplier,
363
+ int32_t zero_point) {
364
+ vfloat32 vec_multiplier = vec_splats(multiplier);
365
+ vint32 vec_zero_point = vec_splats(zero_point);
366
+
367
+ Vectorized<c10::qint32> vi0 = inp[0];
368
+ Vectorized<c10::qint32> vi1 = inp[1];
369
+ Vectorized<c10::qint32> vi2 = inp[2];
370
+ Vectorized<c10::qint32> vi3 = inp[3];
371
+
372
+ vfloat32 vecf0 = vec_float(vi0.vec0());
373
+ vfloat32 vecf1 = vec_float(vi0.vec1());
374
+ vfloat32 vecf2 = vec_float(vi1.vec0());
375
+ vfloat32 vecf3 = vec_float(vi1.vec1());
376
+
377
+ vfloat32 vecf4 = vec_float(vi2.vec0());
378
+ vfloat32 vecf5 = vec_float(vi2.vec1());
379
+ vfloat32 vecf6 = vec_float(vi3.vec0());
380
+ vfloat32 vecf7 = vec_float(vi3.vec1());
381
+
382
+ vecf0 = vec_mul(vecf0, vec_multiplier);
383
+ vecf1 = vec_mul(vecf1, vec_multiplier);
384
+ vecf2 = vec_mul(vecf2, vec_multiplier);
385
+ vecf3 = vec_mul(vecf3, vec_multiplier);
386
+
387
+ vecf4 = vec_mul(vecf4, vec_multiplier);
388
+ vecf5 = vec_mul(vecf5, vec_multiplier);
389
+ vecf6 = vec_mul(vecf6, vec_multiplier);
390
+ vecf7 = vec_mul(vecf7, vec_multiplier);
391
+
392
+ vecf0 = vec_rint(vecf0);
393
+ vecf1 = vec_rint(vecf1);
394
+ vecf2 = vec_rint(vecf2);
395
+ vecf3 = vec_rint(vecf3);
396
+
397
+ vecf4 = vec_rint(vecf4);
398
+ vecf5 = vec_rint(vecf5);
399
+ vecf6 = vec_rint(vecf6);
400
+ vecf7 = vec_rint(vecf7);
401
+
402
+ vint32 veci0 = vec_signed(vecf0);
403
+ vint32 veci1 = vec_signed(vecf1);
404
+ vint32 veci2 = vec_signed(vecf2);
405
+ vint32 veci3 = vec_signed(vecf3);
406
+
407
+ vint32 veci4 = vec_signed(vecf4);
408
+ vint32 veci5 = vec_signed(vecf5);
409
+ vint32 veci6 = vec_signed(vecf6);
410
+ vint32 veci7 = vec_signed(vecf7);
411
+
412
+ veci0 = vec_add(veci0, vec_zero_point);
413
+ veci1 = vec_add(veci1, vec_zero_point);
414
+ veci2 = vec_add(veci2, vec_zero_point);
415
+ veci3 = vec_add(veci3, vec_zero_point);
416
+
417
+ veci4 = vec_add(veci4, vec_zero_point);
418
+ veci5 = vec_add(veci5, vec_zero_point);
419
+ veci6 = vec_add(veci6, vec_zero_point);
420
+ veci7 = vec_add(veci7, vec_zero_point);
421
+
422
+ vint16 vecshi0 = vec_packs(veci0, veci1);
423
+ vint16 vecshi1 = vec_packs(veci2, veci3);
424
+ vint16 vecshi2 = vec_packs(veci4, veci5);
425
+ vint16 vecshi3 = vec_packs(veci6, veci7);
426
+
427
+ vuint8 vec0 = vec_packsu(vecshi0, vecshi1);
428
+ vuint8 vec1 = vec_packsu(vecshi2, vecshi3);
429
+
430
+ return {vec0, vec1};
431
+ }
432
+
433
+ DEFINE_MEMBER_OP(operator==, c10::quint8, vec_cmpeq)
434
+ DEFINE_MEMBER_OP(operator!=, c10::quint8, vec_cmpne)
435
+ DEFINE_MEMBER_OP(operator<, c10::quint8, vec_cmplt)
436
+ DEFINE_MEMBER_OP(operator<=, c10::quint8, vec_cmple)
437
+ DEFINE_MEMBER_OP(operator>, c10::quint8, vec_cmpgt)
438
+ DEFINE_MEMBER_OP(operator>=, c10::quint8, vec_cmpge)
439
+ DEFINE_MEMBER_OP(operator+, c10::quint8, vec_add)
440
+ DEFINE_MEMBER_OP(operator-, c10::quint8, vec_sub)
441
+ DEFINE_MEMBER_OP(operator*, c10::quint8, vec_mul)
442
+ DEFINE_MEMBER_EMULATE_BINARY_OP(operator/, c10::quint8, /)
443
+ DEFINE_MEMBER_OP(maximum, c10::quint8, vec_max)
444
+ DEFINE_MEMBER_OP(minimum, c10::quint8, vec_min)
445
+ DEFINE_MEMBER_OP(operator&, c10::quint8, vec_and)
446
+ DEFINE_MEMBER_OP(operator|, c10::quint8, vec_or)
447
+ DEFINE_MEMBER_OP(operator^, c10::quint8, vec_xor)
448
+ };
449
+
450
+ template <>
451
+ Vectorized<c10::quint8> inline maximum(
452
+ const Vectorized<c10::quint8>& a,
453
+ const Vectorized<c10::quint8>& b) {
454
+ return a.maximum(b);
455
+ }
456
+
457
+ template <>
458
+ Vectorized<c10::quint8> inline minimum(
459
+ const Vectorized<c10::quint8>& a,
460
+ const Vectorized<c10::quint8>& b) {
461
+ return a.minimum(b);
462
+ }
463
+
464
+ } // namespace
465
+ } // namespace vec
466
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vsx_helpers.h ADDED
@@ -0,0 +1,474 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <cstdint>
3
+ #include <c10/macros/Macros.h>
4
+ #include <ATen/cpu/vec/intrinsics.h>
5
+
6
+ #if defined(__clang__)
7
+ typedef __vector __bool char vbool8;
8
+ typedef __vector __bool short vbool16;
9
+ typedef __vector __bool int vbool32;
10
+ typedef __vector __bool long long vbool64;
11
+ using vint8 = __attribute__((vector_size(16))) signed char;
12
+ using vint16 = __attribute__((vector_size(16))) signed short;
13
+ using vint32 = __attribute__((vector_size(16))) signed int;
14
+ using vint64 = __attribute__((vector_size(16))) signed long long;
15
+ using vuint8 = __attribute__((vector_size(16))) unsigned char;
16
+ using vuint16 = __attribute__((vector_size(16))) unsigned short;
17
+ using vuint32 = __attribute__((vector_size(16))) unsigned int;
18
+ using vuint64 = __attribute__((vector_size(16))) unsigned long long;
19
+ using vfloat32 = __attribute__((vector_size(16))) float;
20
+ using vfloat64 = __attribute__((vector_size(16))) double;
21
+ #else
22
+ using vbool8 = __attribute__((altivec(vector__))) __attribute__((altivec(bool__))) char;
23
+ using vbool16 = __attribute__((altivec(vector__))) __attribute__((altivec(bool__))) short;
24
+ using vbool32 = __attribute__((altivec(vector__))) __attribute__((altivec(bool__))) int;
25
+ using vbool64 = __attribute__((altivec(vector__))) __attribute__((altivec(bool__))) long long;
26
+ using vint8 = __attribute__((altivec(vector__))) signed char;
27
+ using vint16 = __attribute__((altivec(vector__))) signed short;
28
+ using vint32 = __attribute__((altivec(vector__))) signed int;
29
+ using vint64 = __attribute__((altivec(vector__))) signed long long;
30
+ using vuint8 = __attribute__((altivec(vector__))) unsigned char;
31
+ using vuint16 = __attribute__((altivec(vector__))) unsigned short;
32
+ using vuint32 = __attribute__((altivec(vector__))) unsigned int;
33
+ using vuint64 = __attribute__((altivec(vector__))) unsigned long long;
34
+ using vfloat32 = __attribute__((altivec(vector__))) float;
35
+ using vfloat64 = __attribute__((altivec(vector__))) double;
36
+ #endif
37
+
38
+ #if !defined(vec_float)
39
+ C10_ALWAYS_INLINE vfloat32 vec_float(const vint32& vec_in) {
40
+ vfloat32 vec_out;
41
+ __asm__("xvcvsxwsp %x0,%x1" : "=wf"(vec_out) : "wa"(vec_in));
42
+ return vec_out;
43
+ }
44
+ #endif
45
+
46
+ #if !defined(vec_signed)
47
+ C10_ALWAYS_INLINE vint32 vec_signed(const vfloat32& vec_in) {
48
+ vint32 vec_out;
49
+ __asm__("xvcvspsxws %x0,%x1" : "=wa"(vec_out) : "wf"(vec_in));
50
+ return vec_out;
51
+ }
52
+
53
+ C10_ALWAYS_INLINE vint64 vec_signed(const vfloat64& vec_in) {
54
+ vint64 vec_out;
55
+ __asm__("xvcvdpsxds %x0,%x1" : "=wa"(vec_out) : "wd"(vec_in));
56
+ return vec_out;
57
+ }
58
+ #endif
59
+
60
+ #if !defined(vec_neg)
61
+ C10_ALWAYS_INLINE vfloat32 vec_neg(const vfloat32& vec_in) {
62
+ vfloat32 vec_out;
63
+ __asm__("xvnegsp %x0,%x1" : "=wf"(vec_out) : "wf"(vec_in));
64
+ return vec_out;
65
+ }
66
+
67
+ C10_ALWAYS_INLINE vfloat64 vec_neg(const vfloat64& vec_in) {
68
+ vfloat64 vec_out;
69
+ __asm__("xvnegdp %x0,%x1" : "=wd"(vec_out) : "wd"(vec_in));
70
+ return vec_out;
71
+ }
72
+
73
+ C10_ALWAYS_INLINE vint16 vec_neg(const vint16& vec_in) {
74
+ vint16 vint0 = {0, 0, 0, 0 ,0, 0, 0, 0};
75
+ return vec_vsubuhm(vint0, vec_in);
76
+ }
77
+
78
+ C10_ALWAYS_INLINE vint32 vec_neg(const vint32& vec_in) {
79
+ vint32 vint0 = {0, 0, 0, 0};
80
+ return vec_vsubuwm(vint0, vec_in);
81
+ }
82
+
83
+ C10_ALWAYS_INLINE vint64 vec_neg(const vint64& vec_in) {
84
+ return -vec_in;
85
+ }
86
+ #endif
87
+
88
+ #if !defined(vec_sldw)
89
+ template <unsigned int C>
90
+ C10_ALWAYS_INLINE vfloat32
91
+ vec_sldw_aux(const vfloat32& vec_in0, const vfloat32& vec_in1) {
92
+ vfloat32 vec_out;
93
+ __asm("xxsldwi %x0, %x1, %x2, %3 "
94
+ : "=wa"(vec_out)
95
+ : "wa"(vec_in0), "wa"(vec_in1), "I"(C));
96
+ return vec_out;
97
+ }
98
+
99
+ #define vec_sldw(a, b, c) vec_sldw_aux<c>(a, b)
100
+ #endif
101
+
102
+ #define vec_not(a) vec_nor(a, a)
103
+ #if defined(__clang__) && !defined(vec_splats)
104
+ C10_ALWAYS_INLINE vint64 vec_splats(const int64_t& a) {
105
+ return vec_splats(a);
106
+ }
107
+ #endif
108
+ // Vectorized min/max which return a if any operand is nan
109
+ template <class T>
110
+ C10_ALWAYS_INLINE T vec_min_nan(const T& a, const T& b) {
111
+ return vec_min(a, b);
112
+ }
113
+ template <class T>
114
+ C10_ALWAYS_INLINE T vec_max_nan(const T& a, const T& b) {
115
+ return vec_max(a, b);
116
+ }
117
+
118
+ // Specializations for float/double taken from Eigen
119
+ template<>
120
+ C10_ALWAYS_INLINE vfloat32 vec_min_nan<vfloat32>(const vfloat32& a, const vfloat32& b)
121
+ {
122
+ // NOTE: about 10% slower than vec_min, but consistent with std::min and SSE regarding NaN
123
+ vfloat32 ret;
124
+ __asm__ ("xvcmpgesp %x0,%x1,%x2\n\txxsel %x0,%x1,%x2,%x0" : "=&wa" (ret) : "wa" (a), "wa" (b));
125
+ return ret;
126
+ }
127
+ // Specializations for float/double taken from Eigen
128
+ template<>
129
+ C10_ALWAYS_INLINE vfloat32 vec_max_nan<vfloat32>(const vfloat32& a, const vfloat32& b)
130
+ {
131
+ // NOTE: about 10% slower than vec_max, but consistent with std::min and SSE regarding NaN
132
+ vfloat32 ret;
133
+ __asm__ ("xvcmpgtsp %x0,%x2,%x1\n\txxsel %x0,%x1,%x2,%x0" : "=&wa" (ret) : "wa" (a), "wa" (b));
134
+ return ret;
135
+ }
136
+
137
+ template<>
138
+ C10_ALWAYS_INLINE vfloat64 vec_min_nan<vfloat64>(const vfloat64& a, const vfloat64& b)
139
+ {
140
+ // NOTE: about 10% slower than vec_min, but consistent with std::min and SSE regarding NaN
141
+ vfloat64 ret;
142
+ __asm__ ("xvcmpgedp %x0,%x1,%x2\n\txxsel %x0,%x1,%x2,%x0" : "=&wa" (ret) : "wa" (a), "wa" (b));
143
+ return ret;
144
+ }
145
+ template<>
146
+ C10_ALWAYS_INLINE vfloat64 vec_max_nan<vfloat64>(const vfloat64& a, const vfloat64& b)
147
+ {
148
+ // NOTE: about 10% slower than vec_max, but consistent with std::max and SSE regarding NaN
149
+ vfloat64 ret;
150
+ __asm__ ("xvcmpgtdp %x0,%x2,%x1\n\txxsel %x0,%x1,%x2,%x0" : "=&wa" (ret) : "wa" (a), "wa" (b));
151
+ return ret;
152
+ }
153
+
154
+ // Vectorizes min/max function which returns nan if any side is nan
155
+ #define C10_VSX_VEC_NAN_PROPAG(name, type, btype, func) \
156
+ C10_ALWAYS_INLINE type name(const type& a, const type& b) { \
157
+ type tmp = func(a, b); \
158
+ btype nan_a = vec_cmpne(a, a); \
159
+ btype nan_b = vec_cmpne(b, b); \
160
+ tmp = vec_sel(tmp, a, nan_a); \
161
+ return vec_sel(tmp, b, nan_b); \
162
+ }
163
+
164
+ C10_VSX_VEC_NAN_PROPAG(vec_min_nan2, vfloat32, vbool32, vec_min)
165
+ C10_VSX_VEC_NAN_PROPAG(vec_max_nan2, vfloat32, vbool32, vec_max)
166
+ C10_VSX_VEC_NAN_PROPAG(vec_min_nan2, vfloat64, vbool64, vec_min)
167
+ C10_VSX_VEC_NAN_PROPAG(vec_max_nan2, vfloat64, vbool64, vec_max)
168
+
169
+ #undef C10_VSX_VEC_NAN_PROPAG
170
+
171
+ #define DEFINE_MEMBER_UNARY_OP(op, op_type, func) \
172
+ Vectorized<op_type> C10_ALWAYS_INLINE op() const { \
173
+ return Vectorized<op_type>{func(_vec0), func(_vec1)}; \
174
+ }
175
+
176
+ #define DEFINE_MEMBER_OP(op, op_type, func) \
177
+ Vectorized<op_type> C10_ALWAYS_INLINE op(const Vectorized<op_type>& other) const { \
178
+ return Vectorized<op_type>{ \
179
+ func(_vec0, other._vec0), func(_vec1, other._vec1)}; \
180
+ }
181
+
182
+ #define DEFINE_MEMBER_BITWISE_OP(op, op_type, func) \
183
+ Vectorized<op_type> C10_ALWAYS_INLINE op(const Vectorized<op_type>& other) const { \
184
+ return Vectorized<op_type>{ \
185
+ func(_vecb0, other._vecb0), func(_vecb1, other._vecb1)}; \
186
+ }
187
+
188
+ #define DEFINE_MEMBER_TERNARY_OP(op, op_type, func) \
189
+ Vectorized<op_type> C10_ALWAYS_INLINE op( \
190
+ const Vectorized<op_type>& b, const Vectorized<op_type>& c) const { \
191
+ return Vectorized<op_type>{ \
192
+ func(_vec0, b._vec0, c._vec0), func(_vec1, b._vec1, c._vec1)}; \
193
+ }
194
+
195
+ #define DEFINE_MEMBER_EMULATE_BINARY_OP(op, op_type, binary_op) \
196
+ Vectorized<op_type> C10_ALWAYS_INLINE op(const Vectorized<op_type>& b) const { \
197
+ Vectorized<op_type>::vec_internal_type ret_0; \
198
+ Vectorized<op_type>::vec_internal_type ret_1; \
199
+ for (int i = 0; i < Vectorized<op_type>::size() / 2; i++) { \
200
+ ret_0[i] = _vec0[i] binary_op b._vec0[i]; \
201
+ ret_1[i] = _vec1[i] binary_op b._vec1[i]; \
202
+ } \
203
+ return Vectorized<op_type>{ret_0, ret_1}; \
204
+ }
205
+
206
+
207
+ #define DEFINE_MEMBER_OP_AND_ONE(op, op_type, func) \
208
+ Vectorized<op_type> C10_ALWAYS_INLINE op(const Vectorized<op_type>& other) const { \
209
+ using vvtype = Vectorized<op_type>::vec_internal_type; \
210
+ const vvtype v_one = vec_splats(static_cast<op_type>(1.0)); \
211
+ vvtype ret0 = (vvtype)func(_vec0, other._vec0); \
212
+ vvtype ret1 = (vvtype)func(_vec1, other._vec1); \
213
+ return Vectorized<op_type>{vec_and(ret0, v_one), vec_and(ret1, v_one)}; \
214
+ }
215
+
216
+ #define DEFINE_CLAMP_FUNCS(operand_type) \
217
+ template <> \
218
+ Vectorized<operand_type> C10_ALWAYS_INLINE clamp( \
219
+ const Vectorized<operand_type>& a, \
220
+ const Vectorized<operand_type>& min, \
221
+ const Vectorized<operand_type>& max) { \
222
+ return Vectorized<operand_type>{ \
223
+ vec_min_nan(vec_max_nan(a.vec0(), min.vec0()), max.vec0()), \
224
+ vec_min_nan(vec_max_nan(a.vec1(), min.vec1()), max.vec1())}; \
225
+ } \
226
+ template <> \
227
+ Vectorized<operand_type> C10_ALWAYS_INLINE clamp_min( \
228
+ const Vectorized<operand_type>& a, const Vectorized<operand_type>& min) { \
229
+ return Vectorized<operand_type>{ \
230
+ vec_max_nan(a.vec0(), min.vec0()), \
231
+ vec_max_nan(a.vec1(), min.vec1())}; \
232
+ } \
233
+ template <> \
234
+ Vectorized<operand_type> C10_ALWAYS_INLINE clamp_max( \
235
+ const Vectorized<operand_type>& a, const Vectorized<operand_type>& max) { \
236
+ return Vectorized<operand_type>{ \
237
+ vec_min_nan(a.vec0(), max.vec0()), \
238
+ vec_min_nan(a.vec1(), max.vec1())}; \
239
+ }
240
+
241
+ #define DEFINE_REINTERPRET_CAST_FUNCS( \
242
+ first_type, cast_type, cast_inner_vector_type) \
243
+ template <> \
244
+ C10_ALWAYS_INLINE Vectorized<cast_type> cast<cast_type, first_type>( \
245
+ const Vectorized<first_type>& src) { \
246
+ return Vectorized<cast_type>{(cast_inner_vector_type)src.vec0(), \
247
+ (cast_inner_vector_type)src.vec1()}; \
248
+ }
249
+
250
+ #define DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(first_type) \
251
+ DEFINE_REINTERPRET_CAST_FUNCS(first_type, double, vfloat64) \
252
+ DEFINE_REINTERPRET_CAST_FUNCS(first_type, float, vfloat32) \
253
+ DEFINE_REINTERPRET_CAST_FUNCS(first_type, int64_t, vint64) \
254
+ DEFINE_REINTERPRET_CAST_FUNCS(first_type, int32_t, vint32) \
255
+ DEFINE_REINTERPRET_CAST_FUNCS(first_type, int16_t, vint16)
256
+
257
+ // it can be used to emulate blend faster
258
+ constexpr int blendChoice(uint32_t mask, uint32_t half1 = 0xF, uint32_t half2 = 0xF0) {
259
+ uint32_t none = 0;
260
+ uint32_t both = half1 | half2;
261
+ // clamp it between 0 and both
262
+ mask = mask & both;
263
+ // return (a._vec0, a._vec1)
264
+ if (mask == none) return 0;
265
+ // return (b._vec0,b._vec1)
266
+ else if (mask == both)
267
+ return 1;
268
+ // return (b._vec0,a._vec1)
269
+ else if (mask == half1)
270
+ return 2;
271
+ // return (a._vec0,b._vec1)
272
+ else if (mask == half2)
273
+ return 3;
274
+ // return (*_vec0,a._vec1)
275
+ else if (mask > 0 && mask < half1)
276
+ return 4;
277
+ // return (*_vec0,b._vec1)
278
+ else if ((mask & half2) == half2)
279
+ return 5;
280
+ // return (a._vec0,*_vec1)
281
+ else if ((mask & half1) == 0 && mask > half1)
282
+ return 6;
283
+ // return (b._vec0,*_vec1)
284
+ else if ((mask & half1) == half1 && mask > half1)
285
+ return 7;
286
+ // return (*_vec0,*_vec1)
287
+ return 8;
288
+ }
289
+
290
+ // it can be used to emulate blend faster
291
+ constexpr int blendChoiceDbl(uint32_t mask) {
292
+ // clamp it 0 and 0xF
293
+ return blendChoice(mask, 0x3, 0xC);
294
+ }
295
+
296
+ constexpr vbool32 VsxMask1(uint32_t mask) {
297
+ uint32_t g0 = (mask & 1) * 0xffffffff;
298
+ uint32_t g1 = ((mask & 2) >> 1) * 0xffffffff;
299
+ uint32_t g2 = ((mask & 4) >> 2) * 0xffffffff;
300
+ uint32_t g3 = ((mask & 8) >> 3) * 0xffffffff;
301
+ return (vbool32){g0, g1, g2, g3};
302
+ }
303
+
304
+ constexpr vbool32 VsxMask2(uint32_t mask) {
305
+ uint32_t mask2 = (mask & 0xFF) >> 4;
306
+ return VsxMask1(mask2);
307
+ }
308
+
309
+ constexpr vbool64 VsxDblMask1(uint32_t mask) {
310
+ uint64_t g0 = (mask & 1) * 0xffffffffffffffff;
311
+ uint64_t g1 = ((mask & 2) >> 1) * 0xffffffffffffffff;
312
+ return (vbool64){g0, g1};
313
+ }
314
+
315
+ constexpr vbool64 VsxDblMask2(uint32_t mask) {
316
+ uint32_t mask2 = (mask & 0xF) >> 2;
317
+ return VsxDblMask1(mask2);
318
+ }
319
+
320
+ constexpr int maskForComplex(uint32_t mask) {
321
+ mask = mask & 0xF;
322
+ int complex_mask = 0;
323
+ if (mask & 1) complex_mask |= 3;
324
+ if (mask & 2) complex_mask |= (3 << 2);
325
+ if (mask & 4) complex_mask |= (3 << 4);
326
+ if (mask & 8) complex_mask |= (3 << 6);
327
+ return complex_mask;
328
+ }
329
+
330
+ constexpr int maskForComplexDbl(uint32_t mask) {
331
+ mask = mask & 0x3;
332
+ int complex_mask = 0;
333
+ if (mask & 1) complex_mask |= 3;
334
+ if (mask & 2) complex_mask |= (3 << 2);
335
+ return complex_mask;
336
+ }
337
+
338
+ constexpr int blendChoiceComplex(uint32_t mask) {
339
+ return blendChoice(maskForComplex(mask));
340
+ }
341
+
342
+ constexpr int blendChoiceComplexDbl(uint32_t mask) {
343
+ return blendChoiceDbl(maskForComplexDbl(mask));
344
+ }
345
+
346
+ constexpr vbool32 VsxComplexMask1(uint32_t mask) {
347
+ return VsxMask1(maskForComplex(mask));
348
+ }
349
+
350
+ constexpr vbool32 VsxComplexMask2(uint32_t mask) {
351
+ uint32_t mask2 = (mask & 0xF) >> 2;
352
+ return VsxMask1(maskForComplex(mask2));
353
+ }
354
+
355
+ constexpr vbool64 VsxComplexDblMask1(uint32_t mask) { return VsxDblMask1(mask); }
356
+
357
+ constexpr vbool64 VsxComplexDblMask2(uint32_t mask) {
358
+ uint32_t mask2 = (mask & 0xF) >> 2;
359
+ return VsxDblMask1(mask2);
360
+ }
361
+
362
+ // constants
363
+ namespace at {
364
+ namespace vec {
365
+ // See Note [CPU_CAPABILITY namespace]
366
+ inline namespace CPU_CAPABILITY {
367
+ //
368
+ constexpr int offset0 = 0;
369
+ constexpr int offset16 = 16;
370
+
371
+ // #Constants
372
+ const vuint8 mask_zero_bits = vuint8{128, 128, 128, 128, 128, 128, 128, 128,
373
+ 128, 128, 128, 128, 96, 64, 32, 0};
374
+
375
+ const vuint8 swap_mask =
376
+ vuint8{4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11};
377
+
378
+ const vint32 v0x7f = vec_splats(0x7f);
379
+ const vint32 vi_0 = vec_splats((int)(0));
380
+ const vint32 vi_1 = vec_splats((int)1);
381
+ const vint32 vi_2 = vec_splats((int)2);
382
+ const vint32 vi_4 = vec_splats((int)4);
383
+ const vint32 vi_inv1 = vec_splats((int)~1);
384
+ const vuint32 vu_29 = vec_splats(29u);
385
+ const vuint32 vu_23 = vec_splats(23u);
386
+
387
+ const vbool32 inv_mant_mask = (vbool32)vec_splats((unsigned int)~0xff800000);
388
+ const vbool32 sign_mask = (vbool32)vec_splats((int)0x80000000);
389
+ const vbool32 real_mask = vbool32{0xFFFFFFFF, 0x0, 0xFFFFFFFF, 0x0};
390
+ const vbool32 imag_mask = vbool32{0x0, 0xFFFFFFFF, 0x0, 0xFFFFFFFF};
391
+ const vbool32 isign_mask = vbool32{0x0, 0x80000000, 0x0, 0x80000000};
392
+ const vbool32 rsign_mask = vbool32{0x80000000, 0x0, 0x80000000, 0x0};
393
+
394
+ const vbool64 vd_sign_mask = vbool64{0x8000000000000000, 0x8000000000000000};
395
+ const vbool64 vd_imag_mask = vbool64{0x0, 0xFFFFFFFFFFFFFFFF};
396
+ const vbool64 vd_real_mask = vbool64{0xFFFFFFFFFFFFFFFF, 0x0};
397
+ const vbool64 vd_isign_mask = vbool64{0x0, 0x8000000000000000};
398
+ const vbool64 vd_rsign_mask = vbool64{0x8000000000000000, 0x0};
399
+
400
+ const vfloat32 zero = vec_splats(0.f);
401
+ const vfloat32 half = vec_splats(0.5f);
402
+ const vfloat32 one = vec_splats(1.f);
403
+ const vfloat32 two = vec_splats(2.0f);
404
+ const vfloat32 _4div_pi = vec_splats(1.27323954473516f);
405
+ const vfloat32 v_inf = (vfloat32)vec_splats(0x7f800000u);
406
+ const vfloat32 v_minus_inf = vfloat32{ 0xff800000u, 0xff800000u, 0xff800000u, 0xff800000u };
407
+ const vfloat32 v_nan = (vfloat32)vec_splats(0x7fffffff);
408
+ const vfloat32 log10e_inv = vec_splats(0.43429448190325176f);
409
+ const vfloat32 log2e_inv = vec_splats(1.4426950408889634f);
410
+ const vfloat32 log2eB_inv = vec_splats(1.442695036924675f);
411
+ const vfloat32 cephes_SQRTHF = vec_splats(0.707106781186547524f);
412
+ const vfloat32 coscof_p0 = vec_splats(2.443315711809948E-005f);
413
+ const vfloat32 coscof_p1 = vec_splats(-1.388731625493765E-003f);
414
+ const vfloat32 coscof_p2 = vec_splats(4.166664568298827E-002f);
415
+ const vfloat32 exp_hi = vec_splats(104.f);
416
+ const vfloat32 exp_lo = vec_splats(-104.f);
417
+ const vfloat32 exp_p0 = vec_splats(0.000198527617612853646278381f);
418
+ const vfloat32 exp_p1 = vec_splats((0.00139304355252534151077271f));
419
+ const vfloat32 exp_p2 = vec_splats(0.00833336077630519866943359f);
420
+ const vfloat32 exp_p3 = vec_splats(0.0416664853692054748535156f);
421
+ const vfloat32 exp_p4 = vec_splats(0.166666671633720397949219f);
422
+ const vfloat32 exp_p5 = vec_splats(0.5f);
423
+ const vfloat32 log_p0 = vec_splats(7.0376836292E-2f);
424
+ const vfloat32 log_p1 = vec_splats(-1.1514610310E-1f);
425
+ const vfloat32 log_p2 = vec_splats(1.1676998740E-1f);
426
+ const vfloat32 log_p3 = vec_splats(-1.2420140846E-1f);
427
+ const vfloat32 log_p4 = vec_splats(+1.4249322787E-1f);
428
+ const vfloat32 log_p5 = vec_splats(-1.6668057665E-1f);
429
+ const vfloat32 log_p6 = vec_splats(+2.0000714765E-1f);
430
+ const vfloat32 log_p7 = vec_splats(-2.4999993993E-1f);
431
+ const vfloat32 log_p8 = vec_splats(+3.3333331174E-1f);
432
+ const vfloat32 log_q1 = vec_splats(-2.12194440e-4f);
433
+ const vfloat32 log_q2 = vec_splats(0.693359375f);
434
+ const vfloat32 max_logf = vec_splats(88.02969187150841f);
435
+ const vfloat32 max_numf = vec_splats(1.7014117331926442990585209174225846272e38f);
436
+ const vfloat32 min_inf = (vfloat32)vec_splats(0xff800000u);
437
+ const vfloat32 min_norm_pos = (vfloat32)vec_splats(0x0800000u);
438
+ const vfloat32 minus_cephes_dp1 = vec_splats(-0.78515625f);
439
+ const vfloat32 minus_cephes_dp2 = vec_splats(-2.4187564849853515625e-4f);
440
+ const vfloat32 minus_cephes_dp3 = vec_splats(-3.77489497744594108e-8f);
441
+ const vfloat32 negln2f_hi = vec_splats(-0.693145751953125f);
442
+ const vfloat32 negln2f_lo = vec_splats(-1.428606765330187045e-06f);
443
+ const vfloat32 p0 = vec_splats(2.03721912945E-4f);
444
+ const vfloat32 p1 = vec_splats(8.33028376239E-3f);
445
+ const vfloat32 p2 = vec_splats(1.66667160211E-1f);
446
+ const vfloat32 sincof_p0 = vec_splats(-1.9515295891E-4f);
447
+ const vfloat32 sincof_p1 = vec_splats(8.3321608736E-3f);
448
+ const vfloat32 sincof_p2 = vec_splats(-1.6666654611E-1f);
449
+ const vfloat32 tanh_0p625 = vec_splats(0.625f);
450
+ const vfloat32 tanh_half_max = vec_splats(44.014845935754205f);
451
+ const vfloat32 tanh_p0 = vec_splats(-5.70498872745E-3f);
452
+ const vfloat32 tanh_p1 = vec_splats(2.06390887954E-2f);
453
+ const vfloat32 tanh_p2 = vec_splats(-5.37397155531E-2f);
454
+ const vfloat32 tanh_p3 = vec_splats(1.33314422036E-1f);
455
+ const vfloat32 tanh_p4 = vec_splats(-3.33332819422E-1f);
456
+ const vfloat32 vcheck = vec_splats((float)(1LL << 24));
457
+ const vfloat32 imag_one = vfloat32{0.f, 1.f, 0.f, 1.f};
458
+ const vfloat32 imag_half = vfloat32{0.f, 0.5f, 0.f, 0.5f};
459
+ const vfloat32 sqrt2_2 = vfloat32{0.70710676908493042f, 0.70710676908493042,
460
+ 0.70710676908493042, 0.70710676908493042};
461
+ const vfloat32 pi_2 = vfloat32{M_PI / 2, 0.0, M_PI / 2, 0.0};
462
+ const vfloat32 vf_89 = vfloat32{89.f, 89.f, 89.f, 89.f};
463
+ const vfloat64 vd_one = vec_splats(1.0);
464
+ const vfloat64 vd_zero = vec_splats(0.0);
465
+ const vfloat64 vd_log10e_inv = vec_splats(0.43429448190325176);
466
+ const vfloat64 vd_log2e_inv = vec_splats(1.4426950408889634);
467
+ const vfloat64 vd_imag_one = vfloat64{0.0, 1.0};
468
+ const vfloat64 vd_imag_half = vfloat64{0.0, 0.5};
469
+ const vfloat64 vd_sqrt2_2 = vfloat64{0.70710678118654757, 0.70710678118654757};
470
+ const vfloat64 vd_pi_2 = vfloat64{M_PI / 2.0, 0.0};
471
+
472
+ } // namespace
473
+ } // namespace vec
474
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/zarch/vec256_zarch.h ADDED
@@ -0,0 +1,2818 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <cmath>
2
+ #include <cstring>
3
+ #include <limits>
4
+ #include <type_traits>
5
+ #include <utility>
6
+ #if defined(__clang__)
7
+ #include <sleef.h>
8
+ #elif defined(__GNUC__) || defined(__GNUG__)
9
+ #include <sleef.h>
10
+ #include <vecintrin.h>
11
+ #endif
12
+ #include <ATen/cpu/vec/intrinsics.h>
13
+ #include <ATen/cpu/vec/vec_base.h>
14
+ #include <c10/util/complex.h>
15
+
16
+ #define SLEEF_MEMORY_WORKAROUND
17
+
18
+ namespace at {
19
+ namespace vec {
20
+
21
+ // See Note [CPU_CAPABILITY namespace]
22
+ inline namespace CPU_CAPABILITY {
23
+
24
+ template <typename T>
25
+ constexpr bool is_zarch_implemented() {
26
+ return (
27
+ std::is_same<T, float>::value || std::is_same<T, double>::value ||
28
+ std::is_same<T, int8_t>::value || std::is_same<T, uint8_t>::value ||
29
+ std::is_same<T, uint16_t>::value || std::is_same<T, int16_t>::value ||
30
+ std::is_same<T, int32_t>::value || std::is_same<T, int64_t>::value);
31
+ }
32
+
33
+ template <typename T>
34
+ constexpr bool is_zarch_implemented_quant() {
35
+ return (
36
+ std::is_same<T, c10::qint32>::value ||
37
+ std::is_same<T, c10::qint8>::value ||
38
+ std::is_same<T, c10::quint8>::value);
39
+ }
40
+
41
+ template <typename T>
42
+ constexpr bool is_zarch_implemented_complex() {
43
+ return std::is_same<T, c10::complex<float>>::value ||
44
+ std::is_same<T, c10::complex<double>>::value;
45
+ }
46
+
47
+ constexpr int offset0 = 0;
48
+ constexpr int offset16 = 16;
49
+
50
+ template <int N>
51
+ struct VecBinaryType {
52
+ using type __attribute__((vector_size(16))) = uintmax_t;
53
+ };
54
+
55
+ template <>
56
+ struct VecBinaryType<8> {
57
+ using type = __attribute__((vector_size(16))) unsigned long long;
58
+ };
59
+
60
+ template <>
61
+ struct VecBinaryType<4> {
62
+ using type = __attribute__((vector_size(16))) unsigned int;
63
+ };
64
+
65
+ template <>
66
+ struct VecBinaryType<2> {
67
+ using type = __attribute__((vector_size(16))) unsigned short;
68
+ };
69
+
70
+ template <>
71
+ struct VecBinaryType<1> {
72
+ using type = __attribute__((vector_size(16))) unsigned char;
73
+ };
74
+
75
+ template <typename T>
76
+ struct VecInnerType {
77
+ using Type __attribute__((vector_size(16))) = T;
78
+ using BinaryType = typename VecBinaryType<sizeof(T)>::type;
79
+ using ElementType = T;
80
+ static constexpr int size = 16 / sizeof(T);
81
+ };
82
+
83
+ // define for int64_t properly for load
84
+ template <>
85
+ struct VecInnerType<int64_t> {
86
+ using Type = __attribute__((vector_size(16))) signed long long;
87
+ using ElementType = signed long long;
88
+ using BinaryType = typename VecBinaryType<sizeof(signed long long)>::type;
89
+ static constexpr int size = 16 / sizeof(signed long long);
90
+ };
91
+
92
+ template <typename T>
93
+ using ZSimdVect = typename VecInnerType<T>::Type;
94
+ template <typename T>
95
+ using ZSimdVectBinary = typename VecInnerType<T>::BinaryType;
96
+ template <typename T>
97
+ using ZSimdVectElement = typename VecInnerType<T>::ElementType;
98
+
99
+ constexpr int blendChoiceInner(
100
+ const uint64_t mask,
101
+ const uint64_t half1 = 0xF,
102
+ const uint64_t half2 = 0xF0) {
103
+ uint64_t none = 0;
104
+ uint64_t both = half1 | half2;
105
+ // clamp it between 0 and both
106
+ auto res_mask = mask & both;
107
+ // return (a._vec0, a._vec1)
108
+ if (res_mask == none)
109
+ return 0;
110
+ // return (b._vec0,b._vec1)
111
+ else if (res_mask == both)
112
+ return 1;
113
+ // return (b._vec0, a._vec1)
114
+ else if (res_mask == half1)
115
+ return 2;
116
+ // return (a._vec0,b._vec1)
117
+ else if (res_mask == half2)
118
+ return 3;
119
+ // return (*_vec0,a._vec1)
120
+ else if (res_mask > 0 && res_mask < half1)
121
+ return 4;
122
+ // return (*_vec0,b._vec1)
123
+ else if ((res_mask & half2) == half2)
124
+ return 5;
125
+ // return (a._vec0,*_vec1)
126
+ else if ((res_mask & half1) == 0 && res_mask > half1)
127
+ return 6;
128
+ // return (b._vec0,*_vec1)
129
+ else if ((res_mask & half1) == half1 && res_mask > half1)
130
+ return 7;
131
+ // return (*_vec0,*_vec1)
132
+ return 8;
133
+ }
134
+
135
+ // it can be used to emulate blend faster
136
+ template <int Z>
137
+ constexpr int blendChoice(const uint64_t mask) {
138
+ static_assert(Z < 1 || Z > 8, "not implemented");
139
+ return blendChoiceInner(mask);
140
+ }
141
+
142
+ template <>
143
+ constexpr int blendChoice<1>(const uint64_t mask) {
144
+ return blendChoiceInner(mask, 0x0000FFFF, 0xFFFF0000);
145
+ }
146
+
147
+ template <>
148
+ constexpr int blendChoice<2>(const uint64_t mask) {
149
+ return blendChoiceInner(mask, 0x00FF, 0xFF00);
150
+ }
151
+
152
+ template <>
153
+ constexpr int blendChoice<4>(const uint64_t mask) {
154
+ return blendChoiceInner(mask, 0xF, 0xF0);
155
+ }
156
+
157
+ template <>
158
+ constexpr int blendChoice<8>(const uint64_t mask) {
159
+ // clamp it 0 and 0xF
160
+ return blendChoiceInner(mask, 0x3, 0xC);
161
+ }
162
+
163
+ template <int N>
164
+ constexpr auto GetMask1(const uint64_t mask) {
165
+ return typename VecBinaryType<N>::type{};
166
+ }
167
+
168
+ template <int N>
169
+ constexpr auto GetMask2(const uint64_t mask) {
170
+ return typename VecBinaryType<N>::type{};
171
+ }
172
+
173
+ template <>
174
+ constexpr auto GetMask1<1>(const uint64_t mask) {
175
+ constexpr uint8_t t = (int)0xFF;
176
+ uint8_t g0 = (mask & 1) * t;
177
+ uint8_t g1 = ((mask & 2) >> 1) * t;
178
+ uint8_t g2 = ((mask & 4) >> 2) * t;
179
+ uint8_t g3 = ((mask & 8) >> 3) * t;
180
+ uint8_t g4 = ((mask & 16) >> 4) * t;
181
+ uint8_t g5 = ((mask & 32) >> 5) * t;
182
+ uint8_t g6 = ((mask & 64) >> 6) * t;
183
+ uint8_t g7 = ((mask & 128) >> 7) * t;
184
+ uint8_t g8 = ((mask & 256) >> 8) * t;
185
+ uint8_t g9 = ((mask & 512) >> 9) * t;
186
+ uint8_t g10 = ((mask & 1024) >> 10) * t;
187
+ uint8_t g11 = ((mask & 2048) >> 11) * t;
188
+ uint8_t g12 = ((mask & 4096) >> 12) * t;
189
+ uint8_t g13 = ((mask & 8192) >> 13) * t;
190
+ uint8_t g14 = ((mask & 16384) >> 14) * t;
191
+ uint8_t g15 = ((mask & 32768) >> 15) * t;
192
+ return (typename VecBinaryType<1>::type){
193
+ g0, g1, g2, g3, g4, g5, g6, g7, g8, g9, g10, g11, g12, g13, g14, g15};
194
+ }
195
+
196
+ template <>
197
+ constexpr auto GetMask2<1>(const uint64_t mask) {
198
+ uint64_t mask2 = (mask & 0xFFFFFFFF) >> 16;
199
+ return GetMask1<1>(mask2);
200
+ }
201
+
202
+ template <>
203
+ constexpr auto GetMask1<2>(const uint64_t mask) {
204
+ constexpr uint16_t t = (int)0xFFFF;
205
+ uint16_t g0 = (mask & 1) * t;
206
+ uint16_t g1 = ((mask & 2) >> 1) * t;
207
+ uint16_t g2 = ((mask & 4) >> 2) * t;
208
+ uint16_t g3 = ((mask & 8) >> 3) * t;
209
+ uint16_t g4 = ((mask & 16) >> 4) * t;
210
+ uint16_t g5 = ((mask & 32) >> 5) * t;
211
+ uint16_t g6 = ((mask & 64) >> 6) * t;
212
+ uint16_t g7 = ((mask & 128) >> 7) * t;
213
+ return (typename VecBinaryType<2>::type){g0, g1, g2, g3, g4, g5, g6, g7};
214
+ }
215
+
216
+ template <>
217
+ constexpr auto GetMask2<2>(const uint64_t mask) {
218
+ uint64_t mask2 = (mask & 0xFFFF) >> 8;
219
+ return GetMask1<2>(mask2);
220
+ }
221
+
222
+ template <>
223
+ constexpr auto GetMask1<4>(const uint64_t mask) {
224
+ uint32_t g0 = (mask & 1) * 0xffffffff;
225
+ uint32_t g1 = ((mask & 2) >> 1) * 0xffffffff;
226
+ uint32_t g2 = ((mask & 4) >> 2) * 0xffffffff;
227
+ uint32_t g3 = ((mask & 8) >> 3) * 0xffffffff;
228
+ return (typename VecBinaryType<4>::type){g0, g1, g2, g3};
229
+ }
230
+
231
+ template <>
232
+ constexpr auto GetMask2<4>(const uint64_t mask) {
233
+ uint64_t mask2 = (mask & 0xFF) >> 4;
234
+ return GetMask1<4>(mask2);
235
+ }
236
+
237
+ template <>
238
+ constexpr auto GetMask1<8>(const uint64_t mask) {
239
+ uint64_t g0 = (mask & 1) * 0xffffffffffffffff;
240
+ uint64_t g1 = ((mask & 2) >> 1) * 0xffffffffffffffff;
241
+ return (typename VecBinaryType<8>::type){g0, g1};
242
+ }
243
+
244
+ template <>
245
+ constexpr auto GetMask2<8>(const uint64_t mask) {
246
+ uint64_t mask2 = (mask & 0xF) >> 2;
247
+ return GetMask1<8>(mask2);
248
+ }
249
+
250
+ template <int Z>
251
+ constexpr int maskForComplex(uint32_t mask) {
252
+ return 0;
253
+ }
254
+
255
+ template <>
256
+ constexpr int maskForComplex<8>(uint32_t mask) {
257
+ mask = mask & 0xF;
258
+ int complex_mask = 0;
259
+ if (mask & 1)
260
+ complex_mask |= 3;
261
+ if (mask & 2)
262
+ complex_mask |= (3 << 2);
263
+ if (mask & 4)
264
+ complex_mask |= (3 << 4);
265
+ if (mask & 8)
266
+ complex_mask |= (3 << 6);
267
+ return complex_mask;
268
+ }
269
+
270
+ template <>
271
+ constexpr int maskForComplex<16>(uint32_t mask) {
272
+ mask = mask & 0x3;
273
+ int complex_mask = 0;
274
+ if (mask & 1)
275
+ complex_mask |= 3;
276
+ if (mask & 2)
277
+ complex_mask |= (3 << 2);
278
+ return complex_mask;
279
+ }
280
+
281
+ template <typename T = c10::complex<float>>
282
+ constexpr int blend_choice() {
283
+ return 0xAA;
284
+ }
285
+
286
+ template <>
287
+ constexpr int blend_choice<c10::complex<double>>() {
288
+ return 0x0A;
289
+ }
290
+
291
+ constexpr int64_t allbitset(int16_t x) {
292
+ int64_t onex = 1;
293
+ return (onex << x) - onex;
294
+ }
295
+
296
+ namespace { /* unnamed namespace */
297
+
298
+ ZSimdVect<float> vec_mergee(ZSimdVect<float> x, ZSimdVect<float> y) {
299
+ constexpr ZSimdVectBinary<uint8_t> mergee_mask{
300
+ 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27};
301
+ return vec_perm(x, y, mergee_mask);
302
+ }
303
+
304
+ ZSimdVect<double> vec_mergee(ZSimdVect<double> x, ZSimdVect<double> y) {
305
+ return vec_mergeh(x, y);
306
+ }
307
+
308
+ ZSimdVect<float> vec_mergeo(ZSimdVect<float> x, ZSimdVect<float> y) {
309
+ constexpr ZSimdVectBinary<uint8_t> mergeo_mask{
310
+ 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31};
311
+ return vec_perm(x, y, mergeo_mask);
312
+ }
313
+
314
+ ZSimdVect<double> vec_mergeo(ZSimdVect<double> x, ZSimdVect<double> y) {
315
+ return vec_mergel(x, y);
316
+ }
317
+
318
+ } /* unnamed namespace */
319
+
320
+ //
321
+ template <typename T>
322
+ constexpr auto GetBpermZeroMask() {
323
+ return ZSimdVectBinary<uint8_t>{
324
+ 128,
325
+ 128,
326
+ 128,
327
+ 128,
328
+ 128,
329
+ 128,
330
+ 128,
331
+ 128,
332
+ 128,
333
+ 128,
334
+ 128,
335
+ 128,
336
+ 96,
337
+ 64,
338
+ 32,
339
+ 0};
340
+ }
341
+
342
+ template <>
343
+ constexpr auto GetBpermZeroMask<double>() {
344
+ return ZSimdVectBinary<uint8_t>{
345
+ 128,
346
+ 128,
347
+ 128,
348
+ 128,
349
+ 128,
350
+ 128,
351
+ 128,
352
+ 128,
353
+ 128,
354
+ 128,
355
+ 128,
356
+ 128,
357
+ 128,
358
+ 128,
359
+ 64,
360
+ 0};
361
+ }
362
+
363
+ constexpr auto GetSwapMaskFloat() {
364
+ return ZSimdVectBinary<uint8_t>{
365
+ 4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11};
366
+ }
367
+
368
+ template <typename T>
369
+ struct Vectorized<T, std::enable_if_t<is_zarch_implemented<T>()>> {
370
+ public:
371
+ using value_type = T;
372
+ using vtype = ZSimdVect<T>;
373
+ using vmaskType = ZSimdVectBinary<T>;
374
+ using size_type = int;
375
+ // because of gcc inconsistency for int64_t we are obliged to use this, not
376
+ // value_type
377
+ using ElementType = ZSimdVectElement<T>;
378
+ using vinner_data = std::pair<vtype, vtype>;
379
+
380
+ private:
381
+ vtype _vec0;
382
+ vtype _vec1;
383
+
384
+ public:
385
+ static constexpr size_type size() {
386
+ return VECTOR_WIDTH / sizeof(ElementType);
387
+ }
388
+ Vectorized() {}
389
+
390
+ C10_ALWAYS_INLINE Vectorized(vtype v) : _vec0{v}, _vec1{v} {}
391
+ C10_ALWAYS_INLINE Vectorized(const vinner_data &v) : _vec0{v.first}, _vec1{v.second} {}
392
+ C10_ALWAYS_INLINE Vectorized(vtype v1, vtype v2) : _vec0{v1}, _vec1{v2} {}
393
+ C10_ALWAYS_INLINE Vectorized(T s)
394
+ : _vec0{vec_splats((ElementType)s)}, _vec1{vec_splats((ElementType)s)} {}
395
+
396
+ static Vectorized<value_type> C10_ALWAYS_INLINE
397
+ loadu(const void* ptr, int count = size()) {
398
+ if (count == size()) {
399
+ return {
400
+ vec_xl(offset0, reinterpret_cast<const ElementType*>(ptr)),
401
+ vec_xl(offset16, reinterpret_cast<const ElementType*>(ptr))};
402
+ }
403
+
404
+ __at_align__ ElementType tmp_values[size()] = {};
405
+ std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(ElementType));
406
+
407
+ return {
408
+ vec_xl(offset0, reinterpret_cast<const ElementType*>(tmp_values)),
409
+ vec_xl(offset16, reinterpret_cast<const ElementType*>(tmp_values))};
410
+ }
411
+
412
+ static Vectorized<value_type> C10_ALWAYS_INLINE
413
+ loadu_one_fourth(const void* ptr) {
414
+ // load only first 8 bytes
415
+ // only intended to be used with uint8_t
416
+ return loadu(ptr, 8 / sizeof(ElementType));
417
+ }
418
+
419
+ void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const {
420
+ if (count == size()) {
421
+ vec_xst(_vec0, offset0, reinterpret_cast<ElementType*>(ptr));
422
+ vec_xst(_vec1, offset16, reinterpret_cast<ElementType*>(ptr));
423
+ } else if (count > 0) {
424
+ __at_align__ ElementType tmp_values[size()];
425
+ vec_xst(_vec0, offset0, reinterpret_cast<ElementType*>(tmp_values));
426
+ vec_xst(_vec1, offset16, reinterpret_cast<ElementType*>(tmp_values));
427
+ std::memcpy(
428
+ ptr, tmp_values, std::min(count, size()) * sizeof(ElementType));
429
+ }
430
+ }
431
+
432
+ C10_ALWAYS_INLINE const vtype& vec0() const {
433
+ return _vec0;
434
+ }
435
+
436
+ C10_ALWAYS_INLINE const vtype& vec1() const {
437
+ return _vec1;
438
+ }
439
+
440
+ C10_ALWAYS_INLINE vinner_data data() const {
441
+ return std::make_pair<>(_vec0, _vec1);
442
+ }
443
+
444
+ C10_ALWAYS_INLINE operator vinner_data() const {
445
+ return data();
446
+ }
447
+
448
+ C10_ALWAYS_INLINE const vmaskType vecb0() const {
449
+ return (vmaskType)_vec0;
450
+ }
451
+ C10_ALWAYS_INLINE const vmaskType vecb1() const {
452
+ return (vmaskType)_vec1;
453
+ }
454
+
455
+ static Vectorized<T> C10_ALWAYS_INLINE blendv(
456
+ const Vectorized<T>& a,
457
+ const Vectorized<T>& b,
458
+ const Vectorized<T>& mask) {
459
+ return {
460
+ vec_sel(a._vec0, b._vec0, mask.vecb0()),
461
+ vec_sel(a._vec1, b._vec1, mask.vecb1())};
462
+ }
463
+
464
+ template <typename U = T, std::enable_if_t<(sizeof(U) == 8), int> = 0>
465
+ C10_ALWAYS_INLINE Vectorized(T s1, T s2, T s3, T s4)
466
+ : _vec0{s1, s2}, _vec1{s3, s4} {}
467
+
468
+ template <typename U = T, std::enable_if_t<(sizeof(U) == 4), int> = 0>
469
+ C10_ALWAYS_INLINE Vectorized(T s1, T s2, T s3, T s4, T s5, T s6, T s7, T s8)
470
+ : _vec0{s1, s2, s3, s4}, _vec1{s5, s6, s7, s8} {}
471
+
472
+ template <typename U = T, std::enable_if_t<(sizeof(U) == 2), int> = 0>
473
+ C10_ALWAYS_INLINE Vectorized(
474
+ T s1,
475
+ T s2,
476
+ T s3,
477
+ T s4,
478
+ T s5,
479
+ T s6,
480
+ T s7,
481
+ T s8,
482
+ T s9,
483
+ T s10,
484
+ T s11,
485
+ T s12,
486
+ T s13,
487
+ T s14,
488
+ T s15,
489
+ T s16)
490
+ : _vec0{s1, s2, s3, s4, s5, s6, s7, s8},
491
+ _vec1{s9, s10, s11, s12, s13, s14, s15, s16} {}
492
+
493
+ template <typename U = T, std::enable_if_t<(sizeof(U) == 1), int> = 0>
494
+ C10_ALWAYS_INLINE Vectorized(
495
+ T s1,
496
+ T s2,
497
+ T s3,
498
+ T s4,
499
+ T s5,
500
+ T s6,
501
+ T s7,
502
+ T s8,
503
+ T s9,
504
+ T s10,
505
+ T s11,
506
+ T s12,
507
+ T s13,
508
+ T s14,
509
+ T s15,
510
+ T s16,
511
+ T s17,
512
+ T s18,
513
+ T s19,
514
+ T s20,
515
+ T s21,
516
+ T s22,
517
+ T s23,
518
+ T s24,
519
+ T s25,
520
+ T s26,
521
+ T s27,
522
+ T s28,
523
+ T s29,
524
+ T s30,
525
+ T s31,
526
+ T s32)
527
+ : _vec0{s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, s16},
528
+ _vec1{
529
+ s17,
530
+ s18,
531
+ s19,
532
+ s20,
533
+ s21,
534
+ s22,
535
+ s23,
536
+ s24,
537
+ s25,
538
+ s26,
539
+ s27,
540
+ s28,
541
+ s29,
542
+ s30,
543
+ s31,
544
+ s32} {}
545
+
546
+ template <typename step_t, typename U = T>
547
+ static std::enable_if_t<sizeof(U) == 8, Vectorized<T>> arange(
548
+ T base = 0,
549
+ step_t step = static_cast<step_t>(1)) {
550
+ return Vectorized<T>(base, base + step, base + 2 * step, base + 3 * step);
551
+ }
552
+
553
+ template <typename step_t, typename U = T>
554
+ static std::enable_if_t<sizeof(U) == 4, Vectorized<T>> arange(
555
+ T base = 0,
556
+ step_t step = static_cast<step_t>(1)) {
557
+ return Vectorized<T>(
558
+ base,
559
+ base + step,
560
+ base + 2 * step,
561
+ base + 3 * step,
562
+ base + 4 * step,
563
+ base + 5 * step,
564
+ base + 6 * step,
565
+ base + 7 * step);
566
+ }
567
+
568
+ template <typename step_t, typename U = T>
569
+ static std::enable_if_t<sizeof(U) == 2, Vectorized<T>> arange(
570
+ T base = 0,
571
+ step_t step = static_cast<step_t>(1)) {
572
+ return Vectorized<T>(
573
+ base,
574
+ base + step,
575
+ base + 2 * step,
576
+ base + 3 * step,
577
+ base + 4 * step,
578
+ base + 5 * step,
579
+ base + 6 * step,
580
+ base + 7 * step,
581
+ base + 8 * step,
582
+ base + 9 * step,
583
+ base + 10 * step,
584
+ base + 11 * step,
585
+ base + 12 * step,
586
+ base + 13 * step,
587
+ base + 14 * step,
588
+ base + 15 * step);
589
+ }
590
+
591
+ template <typename step_t, typename U = T>
592
+ static std::enable_if_t<sizeof(U) == 1, Vectorized<T>> arange(
593
+ T base = 0,
594
+ step_t step = static_cast<step_t>(1)) {
595
+ return Vectorized<T>(
596
+ base,
597
+ base + step,
598
+ base + 2 * step,
599
+ base + 3 * step,
600
+ base + 4 * step,
601
+ base + 5 * step,
602
+ base + 6 * step,
603
+ base + 7 * step,
604
+ base + 8 * step,
605
+ base + 9 * step,
606
+ base + 10 * step,
607
+ base + 11 * step,
608
+ base + 12 * step,
609
+ base + 13 * step,
610
+ base + 14 * step,
611
+ base + 15 * step,
612
+ base + 16 * step,
613
+ base + 17 * step,
614
+ base + 18 * step,
615
+ base + 19 * step,
616
+ base + 20 * step,
617
+ base + 21 * step,
618
+ base + 22 * step,
619
+ base + 23 * step,
620
+ base + 24 * step,
621
+ base + 25 * step,
622
+ base + 26 * step,
623
+ base + 27 * step,
624
+ base + 28 * step,
625
+ base + 29 * step,
626
+ base + 30 * step,
627
+ base + 31 * step);
628
+ }
629
+
630
+ // blend section
631
+ template <int64_t mask>
632
+ static std::enable_if_t<blendChoice<sizeof(T)>(mask) == 0, Vectorized<T>>
633
+ C10_ALWAYS_INLINE blend(const Vectorized<T>& a, const Vectorized<T>& b) {
634
+ return a;
635
+ }
636
+
637
+ template <int64_t mask>
638
+ static std::enable_if_t<blendChoice<sizeof(T)>(mask) == 1, Vectorized<T>>
639
+ C10_ALWAYS_INLINE blend(const Vectorized<T>& a, const Vectorized<T>& b) {
640
+ return b;
641
+ }
642
+
643
+ template <int64_t mask>
644
+ static std::enable_if_t<blendChoice<sizeof(T)>(mask) == 2, Vectorized<T>>
645
+ C10_ALWAYS_INLINE blend(const Vectorized<T>& a, const Vectorized<T>& b) {
646
+ return {b._vec0, a._vec1};
647
+ }
648
+
649
+ template <int64_t mask>
650
+ static std::enable_if_t<blendChoice<sizeof(T)>(mask) == 3, Vectorized<T>>
651
+ C10_ALWAYS_INLINE blend(const Vectorized<T>& a, const Vectorized<T>& b) {
652
+ return {a._vec0, b._vec1};
653
+ }
654
+
655
+ template <int64_t mask>
656
+ static std::enable_if_t<blendChoice<sizeof(T)>(mask) == 4, Vectorized<T>>
657
+ C10_ALWAYS_INLINE blend(const Vectorized<T>& a, const Vectorized<T>& b) {
658
+ const vmaskType mask_1st = GetMask1<sizeof(T)>(mask);
659
+ return {(vtype)vec_sel(a._vec0, b._vec0, mask_1st), a._vec1};
660
+ }
661
+
662
+ template <int64_t mask>
663
+ static std::enable_if_t<blendChoice<sizeof(T)>(mask) == 5, Vectorized<T>>
664
+ C10_ALWAYS_INLINE blend(const Vectorized<T>& a, const Vectorized<T>& b) {
665
+ const vmaskType mask_1st = GetMask1<sizeof(T)>(mask);
666
+ return {(vtype)vec_sel(a._vec0, b._vec0, mask_1st), b._vec1};
667
+ }
668
+
669
+ template <int64_t mask>
670
+ static std::enable_if_t<blendChoice<sizeof(T)>(mask) == 6, Vectorized<T>>
671
+ C10_ALWAYS_INLINE blend(const Vectorized<T>& a, const Vectorized<T>& b) {
672
+ const vmaskType mask_2nd = GetMask2<sizeof(T)>(mask);
673
+ // generated masks
674
+ return {a._vec0, (vtype)vec_sel(a._vec1, b._vec1, mask_2nd)};
675
+ }
676
+
677
+ template <int64_t mask>
678
+ static std::enable_if_t<blendChoice<sizeof(T)>(mask) == 7, Vectorized<T>>
679
+ C10_ALWAYS_INLINE blend(const Vectorized<T>& a, const Vectorized<T>& b) {
680
+ const vmaskType mask_2nd = GetMask2<sizeof(T)>(mask);
681
+ // generated masks
682
+ return {b._vec0, (vtype)vec_sel(a._vec1, b._vec1, mask_2nd)};
683
+ }
684
+
685
+ template <int64_t mask>
686
+ static std::enable_if_t<blendChoice<sizeof(T)>(mask) == 8, Vectorized<T>>
687
+ C10_ALWAYS_INLINE blend(const Vectorized<T>& a, const Vectorized<T>& b) {
688
+ const vmaskType mask_1st = GetMask1<sizeof(T)>(mask);
689
+ const vmaskType mask_2nd = GetMask2<sizeof(T)>(mask);
690
+ return {
691
+ (vtype)vec_sel(a._vec0, b._vec0, mask_1st),
692
+ (vtype)vec_sel(a._vec1, b._vec1, mask_2nd)};
693
+ }
694
+
695
+ template <int16_t Z, int16_t C>
696
+ static inline std::enable_if_t<(Z >= C), Vectorized<T>> set_inner(
697
+ const Vectorized<T>& a,
698
+ const Vectorized<T>& b,
699
+ size_t count) {
700
+ return b;
701
+ }
702
+
703
+ template <int16_t Z, int16_t C>
704
+ static inline std::enable_if_t<(Z < C), Vectorized<T>> set_inner(
705
+ const Vectorized<T>& a,
706
+ const Vectorized<T>& b,
707
+ size_t count) {
708
+ if (count == Z)
709
+ return blend<allbitset(Z)>(a, b);
710
+ else
711
+ return set_inner<Z + 1, C>(a, b, count);
712
+ }
713
+
714
+ static Vectorized<T> set(
715
+ const Vectorized<T>& a,
716
+ const Vectorized<T>& b,
717
+ size_t count = size()) {
718
+ if (count == 0)
719
+ return a;
720
+ return set_inner<1, size()>(a, b, count);
721
+ }
722
+
723
+ const ElementType& operator[](int idx) const = delete;
724
+ ElementType& operator[](int idx) = delete;
725
+
726
+ Vectorized<T> C10_ALWAYS_INLINE operator+(const Vectorized<T>& other) const {
727
+ return Vectorized<T>{_vec0 + other._vec0, _vec1 + other._vec1};
728
+ }
729
+
730
+ Vectorized<T> C10_ALWAYS_INLINE operator-(const Vectorized<T>& other) const {
731
+ return Vectorized<T>{_vec0 - other._vec0, _vec1 - other._vec1};
732
+ }
733
+
734
+ Vectorized<T> C10_ALWAYS_INLINE operator*(const Vectorized<T>& other) const {
735
+ return Vectorized<T>{_vec0 * other._vec0, _vec1 * other._vec1};
736
+ }
737
+
738
+ Vectorized<T> C10_ALWAYS_INLINE operator/(const Vectorized<T>& other) const {
739
+ return Vectorized<T>{_vec0 / other._vec0, _vec1 / other._vec1};
740
+ }
741
+
742
+ Vectorized<T> C10_ALWAYS_INLINE operator&(const Vectorized<T>& other) const {
743
+ return Vectorized<T>{
744
+ (vtype)(vecb0() & other.vecb0()), (vtype)(vecb1() & other.vecb1())};
745
+ }
746
+
747
+ Vectorized<T> C10_ALWAYS_INLINE operator|(const Vectorized<T>& other) const {
748
+ return Vectorized<T>{
749
+ (vtype)(vecb0() | other.vecb0()), (vtype)(vecb1() | other.vecb1())};
750
+ }
751
+
752
+ Vectorized<T> C10_ALWAYS_INLINE operator^(const Vectorized<T>& other) const {
753
+ return Vectorized<T>{
754
+ (vtype)(vecb0() ^ other.vecb0()), (vtype)(vecb1() ^ other.vecb1())};
755
+ }
756
+
757
+ Vectorized<T> C10_ALWAYS_INLINE operator<<(const Vectorized<T> &other) const {
758
+ constexpr ElementType max_shift = sizeof(ElementType) * CHAR_BIT;
759
+
760
+ ElementType a_array[Vectorized<T>::size()];
761
+ ElementType b_array[Vectorized<T>::size()];
762
+ ElementType c_array[Vectorized<T>::size()];
763
+
764
+ store(a_array);
765
+ other.store(b_array);
766
+
767
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
768
+ T shift = b_array[i];
769
+ if ((static_cast<std::make_signed_t<T>>(shift) < 0) || (shift >= max_shift)) {
770
+ c_array[i] = 0;
771
+ } else {
772
+ c_array[i] = static_cast<std::make_unsigned_t<T>>(a_array[i]) << shift;
773
+ }
774
+ }
775
+
776
+ return loadu(c_array);
777
+ }
778
+
779
+ Vectorized<T> C10_ALWAYS_INLINE operator>>(const Vectorized<T> &other) const {
780
+ // right shift value to retain sign bit for signed and no bits for unsigned
781
+ constexpr ElementType max_shift = sizeof(T) * CHAR_BIT - std::is_signed_v<T>;
782
+
783
+ ElementType a_array[Vectorized<T>::size()];
784
+ ElementType b_array[Vectorized<T>::size()];
785
+ ElementType c_array[Vectorized<T>::size()];
786
+
787
+ store(a_array);
788
+ other.store(b_array);
789
+
790
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
791
+ T shift = b_array[i];
792
+ if ((static_cast<std::make_signed_t<T>>(shift) < 0) || (shift >= max_shift)) {
793
+ c_array[i] = a_array[i] >> max_shift;
794
+ } else {
795
+ c_array[i] = a_array[i] >> shift;
796
+ }
797
+ }
798
+
799
+ return loadu(c_array);
800
+ }
801
+
802
+ Vectorized<T> _not() const {
803
+ return {(vtype)vec_nor(vecb0(), vecb0()), (vtype)vec_nor(vecb1(), vecb1())};
804
+ }
805
+
806
+ Vectorized<T> C10_ALWAYS_INLINE operator==(const Vectorized<T>& other) const {
807
+ return Vectorized<T>{
808
+ vec_cmpeq(_vec0, other._vec0), vec_cmpeq(_vec1, other._vec1)};
809
+ }
810
+
811
+ Vectorized<T> C10_ALWAYS_INLINE operator!=(const Vectorized<T>& other) const {
812
+ return Vectorized<T>{
813
+ vec_cmpeq(_vec0, other._vec0), vec_cmpeq(_vec1, other._vec1)}
814
+ ._not();
815
+ }
816
+ Vectorized<T> C10_ALWAYS_INLINE operator>(const Vectorized<T>& other) const {
817
+ return Vectorized<T>{
818
+ vec_cmpgt(_vec0, other._vec0), vec_cmpgt(_vec1, other._vec1)};
819
+ }
820
+ Vectorized<T> C10_ALWAYS_INLINE operator>=(const Vectorized<T>& other) const {
821
+ return Vectorized<T>{
822
+ vec_cmpge(_vec0, other._vec0), vec_cmpge(_vec1, other._vec1)};
823
+ }
824
+
825
+ Vectorized<T> C10_ALWAYS_INLINE operator<(const Vectorized<T>& other) const {
826
+ return Vectorized<T>{
827
+ vec_cmplt(_vec0, other._vec0), vec_cmplt(_vec1, other._vec1)};
828
+ }
829
+
830
+ Vectorized<T> C10_ALWAYS_INLINE operator<=(const Vectorized<T>& other) const {
831
+ return Vectorized<T>{
832
+ vec_cmple(_vec0, other._vec0), vec_cmple(_vec1, other._vec1)};
833
+ }
834
+
835
+ Vectorized<T> C10_ALWAYS_INLINE eq(const Vectorized<T>& other) const {
836
+ return (*this == other) & Vectorized<T>((T)1.0);
837
+ }
838
+ Vectorized<T> C10_ALWAYS_INLINE ne(const Vectorized<T>& other) const {
839
+ return (*this != other) & Vectorized<T>((T)1.0);
840
+ }
841
+ Vectorized<T> C10_ALWAYS_INLINE gt(const Vectorized<T>& other) const {
842
+ return (*this > other) & Vectorized<T>((T)1.0);
843
+ }
844
+ Vectorized<T> C10_ALWAYS_INLINE ge(const Vectorized<T>& other) const {
845
+ return (*this >= other) & Vectorized<T>((T)1.0);
846
+ }
847
+ Vectorized<T> C10_ALWAYS_INLINE lt(const Vectorized<T>& other) const {
848
+ return (*this < other) & Vectorized<T>((T)1.0);
849
+ }
850
+ Vectorized<T> C10_ALWAYS_INLINE le(const Vectorized<T>& other) const {
851
+ return (*this <= other) & Vectorized<T>((T)1.0);
852
+ }
853
+
854
+ template <
855
+ typename U = T,
856
+ std::enable_if_t<!std::is_unsigned<U>::value, int> = 0>
857
+ Vectorized<U> C10_ALWAYS_INLINE abs() const {
858
+ return {vec_abs(_vec0), vec_abs(_vec1)};
859
+ }
860
+
861
+ template <
862
+ typename U = T,
863
+ std::enable_if_t<std::is_unsigned<U>::value, int> = 0>
864
+ Vectorized<U> C10_ALWAYS_INLINE abs() const {
865
+ return {_vec0, _vec1};
866
+ }
867
+
868
+ Vectorized<T> C10_ALWAYS_INLINE neg() const {
869
+ return {-_vec0, -_vec1};
870
+ }
871
+
872
+ Vectorized<T> isnan() const {
873
+ auto x = *this;
874
+ auto ret = (x == x);
875
+ return ret._not();
876
+ }
877
+
878
+ bool has_inf_nan() const {
879
+ for (const auto i : c10::irange(size()/2)) {
880
+ if(_isnan(_vec0[i]) || _isinf(_vec0[i])) {
881
+ return true;
882
+ }
883
+ }
884
+ for (const auto i : c10::irange(size()/2)) {
885
+ if(_isnan(_vec1[i]) || _isinf(_vec1[i])) {
886
+ return true;
887
+ }
888
+ }
889
+ return false;
890
+ }
891
+
892
+ template <
893
+ typename U = T,
894
+ std::enable_if_t<std::is_floating_point<U>::value, int> = 0>
895
+ Vectorized<U> angle() const {
896
+ auto tmp = blendv(
897
+ Vectorized<U>(0), Vectorized<U>(c10::pi<U>), *this < Vectorized<U>(0));
898
+ return blendv(tmp, *this, isnan());
899
+ }
900
+
901
+ template <
902
+ typename U = T,
903
+ std::enable_if_t<!std::is_floating_point<U>::value, int> = 0>
904
+ Vectorized<U> angle() const {
905
+ return blendv(
906
+ Vectorized<U>(0), Vectorized<U>(c10::pi<U>), *this < Vectorized<U>(0));
907
+ }
908
+
909
+ Vectorized<T> real() const {
910
+ return *this;
911
+ }
912
+ Vectorized<T> imag() const {
913
+ return Vectorized<T>{0};
914
+ }
915
+ Vectorized<T> conj() const {
916
+ return *this;
917
+ }
918
+
919
+ template <
920
+ typename U = T,
921
+ std::enable_if_t<std::is_floating_point<U>::value, int> = 0>
922
+ int zero_mask() const {
923
+ auto cmp = (*this == Vectorized<U>(0));
924
+ constexpr auto mask_zero_bits = GetBpermZeroMask<U>();
925
+ ZSimdVectBinary<uint64_t> result0 =
926
+ vec_bperm_u128((ZSimdVectBinary<uint8_t>)cmp.vecb0(), mask_zero_bits);
927
+ ZSimdVectBinary<uint64_t> result1 =
928
+ vec_bperm_u128((ZSimdVectBinary<uint8_t>)cmp.vecb1(), mask_zero_bits);
929
+ return (result0[0] | (result1[0] << (size() / 2)));
930
+ }
931
+
932
+ Vectorized<T> C10_ALWAYS_INLINE floor() const {
933
+ return {vec_floor(_vec0), vec_floor(_vec1)};
934
+ }
935
+
936
+ Vectorized<T> C10_ALWAYS_INLINE ceil() const {
937
+ return {vec_ceil(_vec0), vec_ceil(_vec1)};
938
+ }
939
+
940
+ Vectorized<T> C10_ALWAYS_INLINE round() const {
941
+ return {vec_round(_vec0), vec_round(_vec1)};
942
+ }
943
+
944
+ Vectorized<T> C10_ALWAYS_INLINE rint() const {
945
+ return {vec_rint(_vec0), vec_rint(_vec1)};
946
+ }
947
+
948
+ Vectorized<T> C10_ALWAYS_INLINE trunc() const {
949
+ return {vec_trunc(_vec0), vec_trunc(_vec1)};
950
+ }
951
+
952
+ Vectorized<T> C10_ALWAYS_INLINE frac() const {
953
+ return *this - trunc();
954
+ }
955
+
956
+ Vectorized<T> C10_ALWAYS_INLINE sqrt() const {
957
+ return {vec_sqrt(_vec0), vec_sqrt(_vec1)};
958
+ }
959
+ Vectorized<T> C10_ALWAYS_INLINE reciprocal() const {
960
+ return Vectorized<T>((T)1) / (*this);
961
+ }
962
+ Vectorized<T> C10_ALWAYS_INLINE rsqrt() const {
963
+ return sqrt().reciprocal();
964
+ }
965
+
966
+ template <
967
+ typename U = T,
968
+ std::enable_if_t<std::is_same<U, float>::value, int> = 0>
969
+ inline Vectorized<T> mapOrdinary(float (*const f)(float)) const {
970
+ float a00 = f(_vec0[0]);
971
+ float a01 = f(_vec0[1]);
972
+ float a02 = f(_vec0[2]);
973
+ float a03 = f(_vec0[3]);
974
+ float a10 = f(_vec1[0]);
975
+ float a11 = f(_vec1[1]);
976
+ float a12 = f(_vec1[2]);
977
+ float a13 = f(_vec1[3]);
978
+ return Vectorized<T>{a00, a01, a02, a03, a10, a11, a12, a13};
979
+ }
980
+
981
+ template <
982
+ typename U = T,
983
+ std::enable_if_t<std::is_same<U, double>::value, int> = 0>
984
+ inline Vectorized<T> mapOrdinary(double (*const f)(double)) const {
985
+ return Vectorized<T>(f(_vec0[0]), f(_vec0[1]), f(_vec1[0]), f(_vec1[1]));
986
+ }
987
+
988
+ template <
989
+ typename U = T,
990
+ std::enable_if_t<std::is_same<U, float>::value, int> = 0>
991
+ inline Vectorized<T> mapOrdinary(
992
+ float (*const f)(float, float),
993
+ const Vectorized<T>& b) const {
994
+ float a00 = f(_vec0[0], b._vec0[0]);
995
+ float a01 = f(_vec0[1], b._vec0[1]);
996
+ float a02 = f(_vec0[2], b._vec0[2]);
997
+ float a03 = f(_vec0[3], b._vec0[3]);
998
+ float a10 = f(_vec1[0], b._vec1[0]);
999
+ float a11 = f(_vec1[1], b._vec1[1]);
1000
+ float a12 = f(_vec1[2], b._vec1[2]);
1001
+ float a13 = f(_vec1[3], b._vec1[3]);
1002
+ return Vectorized<T>{a00, a01, a02, a03, a10, a11, a12, a13};
1003
+ }
1004
+
1005
+ template <
1006
+ typename U = T,
1007
+ std::enable_if_t<std::is_same<U, double>::value, int> = 0>
1008
+ inline Vectorized<T> mapOrdinary(
1009
+ double (*const f)(double, double),
1010
+ const Vectorized<T>& b) const {
1011
+ return Vectorized<T>(
1012
+ f(_vec0[0], b._vec0[0]),
1013
+ f(_vec0[1], b._vec0[1]),
1014
+ f(_vec1[0], b._vec1[0]),
1015
+ f(_vec1[1], b._vec1[1]));
1016
+ }
1017
+
1018
+ template <
1019
+ typename FloatOp,
1020
+ typename DoubleOp,
1021
+ typename U = T,
1022
+ std::enable_if_t<std::is_same<U, float>::value, int> = 0>
1023
+ inline Vectorized<T> mapSleef(FloatOp f, DoubleOp d) const {
1024
+ vtype a0 = f(_vec0);
1025
+ vtype a1 = f(_vec1);
1026
+ return Vectorized<T>{a0, a1};
1027
+ }
1028
+
1029
+ template <
1030
+ typename FloatOp,
1031
+ typename DoubleOp,
1032
+ typename U = T,
1033
+ std::enable_if_t<std::is_same<U, double>::value, int> = 0>
1034
+ inline Vectorized<T> mapSleef(FloatOp f, DoubleOp d) const {
1035
+ return Vectorized<T>(d(_vec0), d(_vec1));
1036
+ }
1037
+
1038
+ template <
1039
+ typename FloatOp,
1040
+ typename DoubleOp,
1041
+ typename U = T,
1042
+ std::enable_if_t<std::is_same<U, float>::value, int> = 0>
1043
+ inline Vectorized<T> mapSleef(FloatOp f, DoubleOp d, const Vectorized<T>& b)
1044
+ const {
1045
+ vtype a0 = f(_vec0, b._vec0);
1046
+ vtype a1 = f(_vec1, b._vec1);
1047
+ return Vectorized<T>{a0, a1};
1048
+ }
1049
+
1050
+ template <
1051
+ typename FloatOp,
1052
+ typename DoubleOp,
1053
+ typename U = T,
1054
+ std::enable_if_t<std::is_same<U, double>::value, int> = 0>
1055
+ inline Vectorized<T> mapSleef(FloatOp f, DoubleOp d, const Vectorized<T>& b)
1056
+ const {
1057
+ return Vectorized<T>(d(_vec0, b._vec0), d(_vec1, b._vec1));
1058
+ }
1059
+
1060
+ Vectorized<T> acos() const {
1061
+ return mapSleef(Sleef_acosf4_u10, Sleef_acosd2_u10);
1062
+ }
1063
+ Vectorized<T> asin() const {
1064
+ return mapSleef(Sleef_asinf4_u10, Sleef_asind2_u10);
1065
+ }
1066
+ Vectorized<T> atan() const {
1067
+ return mapSleef(Sleef_atanf4_u10, Sleef_atand2_u10);
1068
+ }
1069
+ Vectorized<T> atanh() const {
1070
+ return mapSleef(Sleef_atanhf4_u10, Sleef_atanhd2_u10);
1071
+ }
1072
+
1073
+ Vectorized<T> erf() const {
1074
+ return mapSleef(Sleef_erff4_u10, Sleef_erfd2_u10);
1075
+ }
1076
+ Vectorized<T> erfc() const {
1077
+ return mapSleef(Sleef_erfcf4_u15, Sleef_erfcd2_u15);
1078
+ }
1079
+
1080
+ Vectorized<T> exp() const {
1081
+ return mapSleef(Sleef_expf4_u10, Sleef_expd2_u10);
1082
+ }
1083
+ Vectorized<T> exp2() const {
1084
+ return mapSleef(Sleef_exp2f4_u10, Sleef_exp2d2_u10);
1085
+ }
1086
+ Vectorized<T> expm1() const {
1087
+ return mapSleef(Sleef_expm1f4_u10, Sleef_expm1d2_u10);
1088
+ }
1089
+ Vectorized<T> exp_u20() const {
1090
+ return exp();
1091
+ }
1092
+
1093
+ Vectorized<T> log() const {
1094
+ return mapSleef(Sleef_logf4_u10, Sleef_logd2_u10);
1095
+ }
1096
+ Vectorized<T> log2() const {
1097
+ return mapSleef(Sleef_log2f4_u10, Sleef_log2d2_u10);
1098
+ }
1099
+ Vectorized<T> log10() const {
1100
+ return mapSleef(Sleef_log10f4_u10, Sleef_log10d2_u10);
1101
+ }
1102
+ Vectorized<T> log1p() const {
1103
+ return mapSleef(Sleef_log1pf4_u10, Sleef_log1pd2_u10);
1104
+ }
1105
+
1106
+ Vectorized<T> sin() const {
1107
+ #ifndef SLEEF_MEMORY_WORKAROUND
1108
+ return mapSleef(Sleef_sinf4_u10, Sleef_sind2_u10);
1109
+ #else
1110
+ return mapOrdinary(std::sin);
1111
+ #endif
1112
+ }
1113
+ Vectorized<T> sinh() const {
1114
+ return mapSleef(Sleef_sinhf4_u10, Sleef_sinhd2_u10);
1115
+ }
1116
+ Vectorized<T> cos() const {
1117
+ #ifndef SLEEF_MEMORY_WORKAROUND
1118
+ return mapSleef(Sleef_cosf4_u10, Sleef_cosd2_u10);
1119
+ #else
1120
+ return mapOrdinary(std::cos);
1121
+ #endif
1122
+ }
1123
+ Vectorized<T> cosh() const {
1124
+ return mapSleef(Sleef_coshf4_u10, Sleef_coshd2_u10);
1125
+ }
1126
+
1127
+ Vectorized<T> tan() const {
1128
+ #ifndef SLEEF_MEMORY_WORKAROUND
1129
+ return mapSleef(Sleef_tanf4_u10, Sleef_tand2_u10);
1130
+ #else
1131
+ return mapOrdinary(std::tan);
1132
+ #endif
1133
+ }
1134
+ Vectorized<T> tanh() const {
1135
+ return mapSleef(Sleef_tanhf4_u10, Sleef_tanhd2_u10);
1136
+ }
1137
+
1138
+ Vectorized<T> lgamma() const {
1139
+ return mapSleef(Sleef_lgammaf4_u10, Sleef_lgammad2_u10);
1140
+ }
1141
+
1142
+ Vectorized<T> atan2(const Vectorized<T>& b) const {
1143
+ return mapSleef(Sleef_atan2f4_u10, Sleef_atan2d2_u10, b);
1144
+ }
1145
+ Vectorized<T> copysign(const Vectorized<T>& sign) const {
1146
+ return mapSleef(Sleef_copysignf4, Sleef_copysignd2, sign);
1147
+ }
1148
+ Vectorized<T> fmod(const Vectorized<T>& q) const {
1149
+ return mapSleef(Sleef_fmodf4, Sleef_fmodd2, q);
1150
+ }
1151
+
1152
+ Vectorized<T> hypot(const Vectorized<T>& b) const {
1153
+ return mapSleef(Sleef_hypotf4_u05, Sleef_hypotd2_u05, b);
1154
+ }
1155
+
1156
+ Vectorized<T> pow(const Vectorized<T>& b) const {
1157
+ return mapSleef(Sleef_powf4_u10, Sleef_powd2_u10, b);
1158
+ }
1159
+
1160
+ Vectorized<T> nextafter(const Vectorized<T>& b) const {
1161
+ return mapSleef(Sleef_nextafterf4, Sleef_nextafterd2, b);
1162
+ }
1163
+
1164
+ Vectorized<T> erfinv() const {
1165
+ return mapOrdinary(calc_erfinv);
1166
+ }
1167
+
1168
+ Vectorized<T> digamma() const {
1169
+ return mapOrdinary(calc_digamma);
1170
+ }
1171
+
1172
+ Vectorized<T> igamma(const Vectorized<T>& x) const {
1173
+ return mapOrdinary(calc_igamma, x);
1174
+ }
1175
+
1176
+ Vectorized<T> igammac(const Vectorized<T>& x) const {
1177
+ return mapOrdinary(calc_igammac, x);
1178
+ }
1179
+
1180
+ Vectorized<T> i0() const {
1181
+ return mapOrdinary(calc_i0);
1182
+ }
1183
+
1184
+ Vectorized<T> i0e() const {
1185
+ return mapOrdinary(calc_i0e);
1186
+ }
1187
+
1188
+ template <
1189
+ typename U = T,
1190
+ std::enable_if_t<!std::is_floating_point<U>::value, int> = 0>
1191
+ Vectorized<T> minimum(const Vectorized<T>& other) const {
1192
+ return {vec_min(_vec0, other._vec0), vec_min(_vec1, other._vec1)};
1193
+ }
1194
+
1195
+ /* Propagates NaN if either input is a NaN. */
1196
+ template <
1197
+ typename U = T,
1198
+ std::enable_if_t<std::is_floating_point<U>::value, int> = 0>
1199
+ Vectorized<T> minimum(const Vectorized<T>& other) const {
1200
+ Vectorized<T> tmp = {vec_min(_vec0, other._vec0), vec_min(_vec1, other._vec1)};
1201
+ tmp = blendv(tmp, *this, isnan());
1202
+ return blendv(tmp, other, other.isnan());
1203
+ }
1204
+
1205
+ template <
1206
+ typename U = T,
1207
+ std::enable_if_t<!std::is_floating_point<U>::value, int> = 0>
1208
+ Vectorized<T> maximum(const Vectorized<T>& other) const {
1209
+ return {vec_max(_vec0, other._vec0), vec_max(_vec1, other._vec1)};
1210
+ }
1211
+
1212
+ /* Propagates NaN if either input is a NaN. */
1213
+ template <
1214
+ typename U = T,
1215
+ std::enable_if_t<std::is_floating_point<U>::value, int> = 0>
1216
+ Vectorized<T> maximum(const Vectorized<T>& other) const {
1217
+ Vectorized<T> tmp = {vec_max(_vec0, other._vec0), vec_max(_vec1, other._vec1)};
1218
+ tmp = blendv(tmp, *this, isnan());
1219
+ return blendv(tmp, other, other.isnan());
1220
+ }
1221
+
1222
+ template <
1223
+ typename U = T,
1224
+ std::enable_if_t<!std::is_floating_point<U>::value, int> = 0>
1225
+ Vectorized<T> clamp_min(const Vectorized<T>& min) const {
1226
+ return {vec_max(_vec0, min._vec0), vec_max(_vec1, min._vec1)};
1227
+ }
1228
+
1229
+ /* Keeps NaN if actual value is NaN */
1230
+ template <
1231
+ typename U = T,
1232
+ std::enable_if_t<std::is_floating_point<U>::value, int> = 0>
1233
+ Vectorized<T> clamp_min(const Vectorized<T>& min) const {
1234
+ Vectorized<T> tmp = {vec_max(_vec0, min._vec0), vec_max(_vec1, min._vec1)};
1235
+ return blendv(tmp, *this, isnan());
1236
+ }
1237
+
1238
+ template <
1239
+ typename U = T,
1240
+ std::enable_if_t<!std::is_floating_point<U>::value, int> = 0>
1241
+ Vectorized<T> clamp_max(const Vectorized<T>& max) const {
1242
+ return {vec_min(_vec0, max._vec0), vec_min(_vec1, max._vec1)};
1243
+ }
1244
+
1245
+ /* Keeps NaN if actual value is NaN */
1246
+ template <
1247
+ typename U = T,
1248
+ std::enable_if_t<std::is_floating_point<U>::value, int> = 0>
1249
+ Vectorized<T> clamp_max(const Vectorized<T>& max) const {
1250
+ Vectorized<T> tmp = {vec_min(_vec0, max._vec0), vec_min(_vec1, max._vec1)};
1251
+ return blendv(tmp, *this, isnan());
1252
+ }
1253
+
1254
+ template <
1255
+ typename U = T,
1256
+ std::enable_if_t<std::is_same<U, float>::value, int> = 0>
1257
+ Vectorized<T> swapped() const {
1258
+ auto swap_mask = GetSwapMaskFloat();
1259
+ vtype v0 = vec_perm(_vec0, _vec0, swap_mask);
1260
+ vtype v1 = vec_perm(_vec1, _vec1, swap_mask);
1261
+ return {v0, v1};
1262
+ }
1263
+
1264
+ template <
1265
+ typename U = T,
1266
+ std::enable_if_t<std::is_same<U, double>::value, int> = 0>
1267
+ Vectorized<T> swapped() const {
1268
+ vtype v0 = vec_permi(_vec0, _vec0, 2);
1269
+ vtype v1 = vec_permi(_vec1, _vec1, 2);
1270
+ return {v0, v1};
1271
+ }
1272
+
1273
+ template <
1274
+ typename U = T,
1275
+ std::enable_if_t<std::is_floating_point<U>::value, int> = 0>
1276
+ static Vectorized<T> mergee(Vectorized<T>& first, Vectorized<T>& second) {
1277
+ return {
1278
+ vec_mergee(first._vec0, second._vec0),
1279
+ vec_mergee(first._vec1, second._vec1)};
1280
+ }
1281
+
1282
+ template <
1283
+ typename U = T,
1284
+ std::enable_if_t<std::is_floating_point<U>::value, int> = 0>
1285
+ static Vectorized<T> mergeo(Vectorized<T>& first, Vectorized<T>& second) {
1286
+ return {
1287
+ vec_mergeo(first._vec0, second._vec0),
1288
+ vec_mergeo(first._vec1, second._vec1)};
1289
+ }
1290
+
1291
+ static Vectorized<T> horizontal_add_perm(
1292
+ Vectorized<T>& first,
1293
+ Vectorized<T>& second) {
1294
+ // we will simulate it differently with 6 instructions total
1295
+ // lets permute second so that we can add it getting horizontal sums
1296
+ auto first_perm = first.swapped(); // 2perm
1297
+ auto second_perm = second.swapped(); // 2perm
1298
+ // summ
1299
+ auto first_ret = first + first_perm; // 2add
1300
+ auto second_ret = second + second_perm; // 2 add
1301
+ // now lets choose evens
1302
+ return mergee(first_ret, second_ret); // 2 mergee's
1303
+ }
1304
+
1305
+ static Vectorized<T> horizontal_sub_perm(
1306
+ Vectorized<T>& first,
1307
+ Vectorized<T>& second) {
1308
+ // we will simulate it differently with 6 instructions total
1309
+ // lets permute second so that we can add it getting horizontal sums
1310
+ auto first_perm = first.swapped(); // 2perm
1311
+ auto second_perm = second.swapped(); // 2perm
1312
+ // summ
1313
+ auto first_ret = first - first_perm; // 2sub
1314
+ auto second_ret = second - second_perm; // 2 sub
1315
+ // now lets choose evens
1316
+ return mergee(first_ret, second_ret); // 2 mergee's
1317
+ }
1318
+
1319
+ template <
1320
+ typename U = T,
1321
+ std::enable_if_t<std::is_floating_point<U>::value, int> = 0>
1322
+ Vectorized<T> mergee() const {
1323
+ return {vec_mergee(_vec0, _vec0), vec_mergee(_vec1, _vec1)};
1324
+ }
1325
+
1326
+ template <
1327
+ typename U = T,
1328
+ std::enable_if_t<std::is_floating_point<U>::value, int> = 0>
1329
+ Vectorized<T> mergeo() const {
1330
+ return {vec_mergeo(_vec0, _vec0), vec_mergeo(_vec1, _vec1)};
1331
+ }
1332
+
1333
+ template <
1334
+ typename U = T,
1335
+ std::enable_if_t<std::is_same<U, uint8_t>::value, int> = 0>
1336
+ Vectorized<int32_t> to_vec_float_helper() const {
1337
+ int32_t values[8] = {
1338
+ _vec0[0],
1339
+ _vec0[1],
1340
+ _vec0[2],
1341
+ _vec0[3],
1342
+ _vec0[4],
1343
+ _vec0[5],
1344
+ _vec0[6],
1345
+ _vec0[7],
1346
+ };
1347
+
1348
+ return Vectorized<int32_t>{
1349
+ values[0], values[1], values[2], values[3],
1350
+ values[4], values[5], values[6], values[7]
1351
+ };
1352
+ }
1353
+
1354
+ template <
1355
+ typename U = T,
1356
+ std::enable_if_t<std::is_same<U, int32_t>::value, int> = 0>
1357
+ Vectorized<uint8_t> to_vec_uint8_helper() const {
1358
+ // helper function for float to uint8_t conversion
1359
+ uint8_t values[8] = {
1360
+ static_cast<uint8_t>(_vec0[0]),
1361
+ static_cast<uint8_t>(_vec0[1]),
1362
+ static_cast<uint8_t>(_vec0[2]),
1363
+ static_cast<uint8_t>(_vec0[3]),
1364
+ static_cast<uint8_t>(_vec1[0]),
1365
+ static_cast<uint8_t>(_vec1[1]),
1366
+ static_cast<uint8_t>(_vec1[2]),
1367
+ static_cast<uint8_t>(_vec1[3]),
1368
+ };
1369
+
1370
+ return Vectorized<uint8_t>{
1371
+ values[0], values[1], values[2], values[3],
1372
+ values[4], values[5], values[6], values[7],
1373
+ 0, 0, 0, 0,
1374
+ 0, 0, 0, 0,
1375
+ 0, 0, 0, 0,
1376
+ 0, 0, 0, 0,
1377
+ 0, 0, 0, 0,
1378
+ 0, 0, 0, 0,
1379
+ };
1380
+ }
1381
+ };
1382
+
1383
+ template <>
1384
+ inline Vectorized<int64_t> operator~(const Vectorized<int64_t>& a) {
1385
+ return a._not();
1386
+ }
1387
+
1388
+ template <>
1389
+ inline Vectorized<int32_t> operator~(const Vectorized<int32_t>& a) {
1390
+ return a._not();
1391
+ }
1392
+
1393
+ template <>
1394
+ inline Vectorized<int16_t> operator~(const Vectorized<int16_t>& a) {
1395
+ return a._not();
1396
+ }
1397
+
1398
+ template <>
1399
+ inline Vectorized<int8_t> operator~(const Vectorized<int8_t>& a) {
1400
+ return a._not();
1401
+ }
1402
+
1403
+ template <>
1404
+ inline Vectorized<uint8_t> operator~(const Vectorized<uint8_t>& a) {
1405
+ return a._not();
1406
+ }
1407
+
1408
+ #define DEFINE_MAXMIN_FUNCS(operand_type) \
1409
+ template <> \
1410
+ Vectorized<operand_type> inline maximum( \
1411
+ const Vectorized<operand_type>& a, const Vectorized<operand_type>& b) { \
1412
+ return a.maximum(b); \
1413
+ } \
1414
+ template <> \
1415
+ Vectorized<operand_type> inline minimum( \
1416
+ const Vectorized<operand_type>& a, const Vectorized<operand_type>& b) { \
1417
+ return a.minimum(b); \
1418
+ }
1419
+
1420
+ #define DEFINE_CLAMP_MAXMIN_FUNCS(typex) \
1421
+ DEFINE_MAXMIN_FUNCS(typex) \
1422
+ template <> \
1423
+ Vectorized<typex> C10_ALWAYS_INLINE clamp_min( \
1424
+ const Vectorized<typex>& a, const Vectorized<typex>& min) { \
1425
+ return a.clamp_min(min); \
1426
+ } \
1427
+ template <> \
1428
+ Vectorized<typex> C10_ALWAYS_INLINE clamp_max( \
1429
+ const Vectorized<typex>& a, const Vectorized<typex>& max) { \
1430
+ return a.clamp_max(max); \
1431
+ } \
1432
+ template <> \
1433
+ Vectorized<typex> C10_ALWAYS_INLINE clamp( \
1434
+ const Vectorized<typex>& a, \
1435
+ const Vectorized<typex>& min, \
1436
+ const Vectorized<typex>& max) { \
1437
+ return clamp_max(clamp_min(a, min), max); \
1438
+ }
1439
+
1440
+ DEFINE_CLAMP_MAXMIN_FUNCS(int8_t)
1441
+ DEFINE_CLAMP_MAXMIN_FUNCS(uint8_t)
1442
+ DEFINE_CLAMP_MAXMIN_FUNCS(int16_t)
1443
+ DEFINE_CLAMP_MAXMIN_FUNCS(int32_t)
1444
+ DEFINE_CLAMP_MAXMIN_FUNCS(int64_t)
1445
+ DEFINE_CLAMP_MAXMIN_FUNCS(float)
1446
+ DEFINE_CLAMP_MAXMIN_FUNCS(double)
1447
+
1448
+ namespace { /* unnamed namespace */
1449
+
1450
+ #if !defined(vec_float) || __ARCH__ < 13
1451
+ #warning \
1452
+ "float->int and int->float conversion is simulated. compile for z15 for improved performance"
1453
+ inline ZSimdVect<float> vec_int_flt(const ZSimdVect<int> x) {
1454
+ return ZSimdVect<float>{float(x[0]), float(x[1]), float(x[2]), float(x[3])};
1455
+ }
1456
+ inline ZSimdVect<int> vec_flt_int(const ZSimdVect<float> x) {
1457
+ return ZSimdVect<int>{int(x[0]), int(x[1]), int(x[2]), int(x[3])};
1458
+ }
1459
+ #else
1460
+ #define vec_int_flt vec_float
1461
+ #define vec_flt_int vec_signed
1462
+ #endif
1463
+
1464
+ Vectorized<float> convert_to_float(const Vectorized<int32_t>& x) {
1465
+ return {vec_int_flt(x.vec0()), vec_int_flt(x.vec1())};
1466
+ }
1467
+
1468
+ Vectorized<int32_t> convert_to_int(const Vectorized<float>& x) {
1469
+ return {vec_flt_int(x.vec0()), vec_flt_int(x.vec1())};
1470
+ }
1471
+
1472
+ Vectorized<double> convert_to_float(const Vectorized<int64_t>& x) {
1473
+ return {vec_double(x.vec0()), vec_double(x.vec1())};
1474
+ }
1475
+
1476
+ Vectorized<int64_t> convert_to_int(const Vectorized<double>& x) {
1477
+ return {vec_signed(x.vec0()), vec_signed(x.vec1())};
1478
+ }
1479
+
1480
+ } /* unnamed namespace */
1481
+
1482
+ template <typename T, typename V>
1483
+ Vectorized<V> cast_zvector(const Vectorized<T>& x) {
1484
+ using cast_type = typename Vectorized<V>::vtype;
1485
+ return Vectorized<V>{(cast_type)x.vec0(), (cast_type)x.vec1()};
1486
+ }
1487
+
1488
+ template <>
1489
+ Vectorized<float> C10_ALWAYS_INLINE fmadd(
1490
+ const Vectorized<float>& a,
1491
+ const Vectorized<float>& b,
1492
+ const Vectorized<float>& c) {
1493
+ return Vectorized<float>{
1494
+ __builtin_s390_vfmasb(a.vec0(), b.vec0(), c.vec0()),
1495
+ __builtin_s390_vfmasb(a.vec1(), b.vec1(), c.vec1())};
1496
+ }
1497
+ template <>
1498
+ Vectorized<double> C10_ALWAYS_INLINE fmadd(
1499
+ const Vectorized<double>& a,
1500
+ const Vectorized<double>& b,
1501
+ const Vectorized<double>& c) {
1502
+ return Vectorized<double>{
1503
+ __builtin_s390_vfmadb(a.vec0(), b.vec0(), c.vec0()),
1504
+ __builtin_s390_vfmadb(a.vec1(), b.vec1(), c.vec1())};
1505
+ }
1506
+ template <>
1507
+ Vectorized<int16_t> C10_ALWAYS_INLINE fmadd(
1508
+ const Vectorized<int16_t>& a,
1509
+ const Vectorized<int16_t>& b,
1510
+ const Vectorized<int16_t>& c) {
1511
+ return Vectorized<int16_t>{
1512
+ a.vec0() * b.vec0() + c.vec0(), a.vec1() * b.vec1() + c.vec1()};
1513
+ }
1514
+ template <>
1515
+ Vectorized<int32_t> C10_ALWAYS_INLINE fmadd(
1516
+ const Vectorized<int32_t>& a,
1517
+ const Vectorized<int32_t>& b,
1518
+ const Vectorized<int32_t>& c) {
1519
+ return Vectorized<int32_t>{
1520
+ a.vec0() * b.vec0() + c.vec0(), a.vec1() * b.vec1() + c.vec1()};
1521
+ }
1522
+ template <>
1523
+ Vectorized<int64_t> C10_ALWAYS_INLINE fmadd(
1524
+ const Vectorized<int64_t>& a,
1525
+ const Vectorized<int64_t>& b,
1526
+ const Vectorized<int64_t>& c) {
1527
+ return Vectorized<int64_t>{
1528
+ a.vec0() * b.vec0() + c.vec0(), a.vec1() * b.vec1() + c.vec1()};
1529
+ }
1530
+
1531
+ template <>
1532
+ Vectorized<int64_t> C10_ALWAYS_INLINE
1533
+ convert_to_int_of_same_size<double>(const Vectorized<double>& src) {
1534
+ return convert_to_int(src);
1535
+ }
1536
+
1537
+ template <>
1538
+ Vectorized<int32_t> C10_ALWAYS_INLINE
1539
+ convert_to_int_of_same_size<float>(const Vectorized<float>& src) {
1540
+ return convert_to_int(src);
1541
+ }
1542
+
1543
+ template <>
1544
+ inline void convert(const int32_t* src, float* dst, int64_t n) {
1545
+ // int32_t and float have same size
1546
+ int64_t i;
1547
+ for (i = 0; i <= (n - Vectorized<float>::size());
1548
+ i += Vectorized<float>::size()) {
1549
+ const int32_t* src_a = src + i;
1550
+ float* dst_a = dst + i;
1551
+ auto input_vec = Vectorized<int32_t>::loadu(src_a);
1552
+ auto output_vec = convert_to_float(input_vec);
1553
+ output_vec.store(dst_a);
1554
+ }
1555
+
1556
+ for (; i < n; i++) {
1557
+ dst[i] = static_cast<float>(src[i]);
1558
+ }
1559
+ }
1560
+
1561
+ template <>
1562
+ inline void convert(const int64_t* src, double* dst, int64_t n) {
1563
+ int64_t i;
1564
+ for (i = 0; i <= (n - Vectorized<double>::size());
1565
+ i += Vectorized<double>::size()) {
1566
+ const int64_t* src_a = src + i;
1567
+ double* dst_a = dst + i;
1568
+ auto input_vec = Vectorized<int64_t>::loadu(src_a);
1569
+ auto output_vec = convert_to_float(input_vec);
1570
+ output_vec.store(dst_a);
1571
+ }
1572
+ for (; i < n; i++) {
1573
+ dst[i] = static_cast<double>(src[i]);
1574
+ }
1575
+ }
1576
+
1577
+ #define DEFINE_REINTERPRET_CAST_FUNCS(Fst, Cst) \
1578
+ template <> \
1579
+ C10_ALWAYS_INLINE Vectorized<Cst> cast<Cst, Fst>( \
1580
+ const Vectorized<Fst>& src) { \
1581
+ return cast_zvector<Fst, Cst>(src); \
1582
+ }
1583
+
1584
+ #define DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(Fst) \
1585
+ DEFINE_REINTERPRET_CAST_FUNCS(Fst, double) \
1586
+ DEFINE_REINTERPRET_CAST_FUNCS(Fst, float) \
1587
+ DEFINE_REINTERPRET_CAST_FUNCS(Fst, int64_t) \
1588
+ DEFINE_REINTERPRET_CAST_FUNCS(Fst, int32_t) \
1589
+ DEFINE_REINTERPRET_CAST_FUNCS(Fst, int16_t)
1590
+
1591
+ DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(float)
1592
+ DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(double)
1593
+ DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(int64_t)
1594
+ DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(int32_t)
1595
+ DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(int16_t)
1596
+
1597
+ #undef DEFINE_REINTERPRET_CAST_FUNCS
1598
+
1599
+ template <typename T>
1600
+ struct unpack_type {
1601
+ using type = T;
1602
+ };
1603
+ template <>
1604
+ struct unpack_type<int8_t> {
1605
+ using type = int16_t;
1606
+ };
1607
+ template <>
1608
+ struct unpack_type<uint8_t> {
1609
+ using type = int16_t;
1610
+ };
1611
+ template <>
1612
+ struct unpack_type<int16_t> {
1613
+ using type = int32_t;
1614
+ };
1615
+
1616
+ template <typename T>
1617
+ struct pack_type {
1618
+ using type = T;
1619
+ };
1620
+ template <>
1621
+ struct pack_type<int16_t> {
1622
+ using type = int8_t;
1623
+ };
1624
+ template <>
1625
+ struct pack_type<int32_t> {
1626
+ using type = int16_t;
1627
+ };
1628
+
1629
+ namespace { /* unnamed namespace */
1630
+
1631
+ template <typename T, typename V = typename unpack_type<T>::type>
1632
+ std::pair<Vectorized<V>, Vectorized<V>> unpack(const Vectorized<T>& x) {
1633
+ auto vec0 = vec_unpackh(x.vec0());
1634
+ auto vec1 = vec_unpackl(x.vec0());
1635
+ auto vec2 = vec_unpackh(x.vec1());
1636
+ auto vec3 = vec_unpackl(x.vec1());
1637
+ return {Vectorized<V>{vec0, vec1}, Vectorized<V>{vec2, vec3}};
1638
+ }
1639
+
1640
+ template <>
1641
+ std::pair<Vectorized<int16_t>, Vectorized<int16_t>> unpack<uint8_t, int16_t>(
1642
+ const Vectorized<uint8_t>& x) {
1643
+ using typeX = typename Vectorized<uint16_t>::vtype;
1644
+ typeX vec0 = vec_unpackh(x.vec0());
1645
+ typeX vec1 = vec_unpackl(x.vec0());
1646
+ typeX vec2 = vec_unpackh(x.vec1());
1647
+ typeX vec3 = vec_unpackl(x.vec1());
1648
+ // auto mask = Vectorized<uint16_t>(0xFF);
1649
+ // vec0 = vec0 & mask;
1650
+ // vec1 = vec1 & mask;
1651
+ // vec2 = vec2 & mask;
1652
+ // vec3 = vec3 & mask;
1653
+ return {
1654
+ cast_zvector<uint16_t, int16_t>(Vectorized<uint16_t>{vec0, vec1}),
1655
+ cast_zvector<uint16_t, int16_t>(Vectorized<uint16_t>{vec2, vec3})};
1656
+ }
1657
+
1658
+ template <typename T, typename V = typename pack_type<T>::type>
1659
+ Vectorized<V> pack(const Vectorized<T>& first, const Vectorized<T>& second) {
1660
+ auto vec0 = vec_packs(first.vec0(), first.vec1());
1661
+ auto vec1 = vec_packs(second.vec0(), second.vec1());
1662
+ return Vectorized<V>{vec0, vec1};
1663
+ }
1664
+
1665
+ template <>
1666
+ Vectorized<uint8_t> pack(
1667
+ const Vectorized<int16_t>& first,
1668
+ const Vectorized<int16_t>& second) {
1669
+ auto vec0 = vec_packsu(first.vec0(), first.vec1());
1670
+ auto vec1 = vec_packsu(second.vec0(), second.vec1());
1671
+ return Vectorized<uint8_t>{vec0, vec1};
1672
+ }
1673
+
1674
+ } /* unnamed namespace */
1675
+
1676
+ //////////////////////////////////QUANT///////////////////////////////////////////
1677
+ template <typename T>
1678
+ struct Vectorized<T, std::enable_if_t<is_zarch_implemented_quant<T>()>> {
1679
+ public:
1680
+ using value_type = typename T::underlying;
1681
+ using vtype = ZSimdVect<value_type>;
1682
+ using vmaskType = ZSimdVectBinary<value_type>;
1683
+ using vinner_type = Vectorized<value_type>;
1684
+ using size_type = int;
1685
+
1686
+ static constexpr size_type size() {
1687
+ return VECTOR_WIDTH / sizeof(value_type);
1688
+ }
1689
+
1690
+ static constexpr size_t float_num_vecs() {
1691
+ return size() / Vectorized<float>::size();
1692
+ }
1693
+ static constexpr int int_num_vecs() {
1694
+ return float_num_vecs();
1695
+ }
1696
+ using float_vec_return_type = std::array<Vectorized<float>, float_num_vecs()>;
1697
+ using int_vec_return_type =
1698
+ std::array<Vectorized<c10::qint32>, int_num_vecs()>;
1699
+
1700
+ private:
1701
+ vinner_type _vec;
1702
+
1703
+ public:
1704
+ Vectorized() {}
1705
+
1706
+ explicit C10_ALWAYS_INLINE Vectorized(vinner_type v) : _vec{v} {}
1707
+ Vectorized(const T& val) : _vec(val.val_) {}
1708
+
1709
+ C10_ALWAYS_INLINE const vinner_type& vec() const {
1710
+ return _vec;
1711
+ }
1712
+
1713
+ static Vectorized<T> C10_ALWAYS_INLINE
1714
+ loadu(const void* ptr, int count = size()) {
1715
+ return Vectorized<T>{vinner_type::loadu(ptr, count)};
1716
+ }
1717
+
1718
+ void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const {
1719
+ _vec.store(ptr, count);
1720
+ }
1721
+
1722
+ Vectorized<T> relu(Vectorized<T> zero_point) const {
1723
+ return Vectorized<T>{_vec.maximum(zero_point._vec)};
1724
+ }
1725
+
1726
+ Vectorized<T> relu6(Vectorized<T> zero_point, Vectorized<T> q_six) const {
1727
+ auto ret_max = _vec.maximum(zero_point._vec);
1728
+ auto ret_min = ret_max.minimum(q_six._vec);
1729
+ return Vectorized<T>{ret_min};
1730
+ }
1731
+
1732
+ template <
1733
+ typename U = T,
1734
+ std::enable_if_t<Vectorized<U>::float_num_vecs() == 1, int> = 0>
1735
+ int_vec_return_type widening_subtract(Vectorized<T> b) const {
1736
+ return {*this - b};
1737
+ }
1738
+
1739
+ template <
1740
+ typename U = T,
1741
+ std::enable_if_t<Vectorized<U>::float_num_vecs() == 1, int> = 0>
1742
+ float_vec_return_type dequantize(
1743
+ Vectorized<float> scale,
1744
+ Vectorized<float> zero_point,
1745
+ Vectorized<float> scale_zp_premul) const {
1746
+ auto float_val = convert_to_float(_vec);
1747
+ return {fmadd(scale, float_val, scale_zp_premul)};
1748
+ }
1749
+
1750
+ template <
1751
+ typename U = T,
1752
+ std::enable_if_t<Vectorized<U>::float_num_vecs() == 1, int> = 0>
1753
+ float_vec_return_type dequantize(
1754
+ Vectorized<float> scale,
1755
+ Vectorized<float> zero_point) const {
1756
+ auto float_val = convert_to_float(_vec);
1757
+ return {(float_val - zero_point) * scale};
1758
+ }
1759
+
1760
+ template <
1761
+ typename U = T,
1762
+ std::enable_if_t<Vectorized<U>::float_num_vecs() == 1, int> = 0>
1763
+ static Vectorized<T> quantize(
1764
+ const float_vec_return_type& rhs,
1765
+ float scale,
1766
+ int32_t zero_point,
1767
+ float inverse_scale) {
1768
+ Vectorized<float> vecf = rhs[0];
1769
+ vecf = vecf * Vectorized<float>(inverse_scale);
1770
+ vecf = vecf.rint() + Vectorized<float>((float)(zero_point));
1771
+ auto veci = convert_to_int(vecf);
1772
+
1773
+ return Vectorized<T>{veci};
1774
+ }
1775
+
1776
+ template <
1777
+ typename U = T,
1778
+ std::enable_if_t<Vectorized<U>::int_num_vecs() == 1, int> = 0>
1779
+ static Vectorized<T> requantize_from_int(
1780
+ const int_vec_return_type& inp,
1781
+ float multiplier,
1782
+ int32_t zero_point) {
1783
+ Vectorized<T> vi = inp[0];
1784
+ auto vecf = convert_to_float(vi.vec());
1785
+ vecf = vecf * Vectorized<float>(multiplier);
1786
+ vecf = vecf.rint();
1787
+ auto veci = convert_to_int(vecf) + Vectorized<int>(zero_point);
1788
+
1789
+ return Vectorized<T>{veci};
1790
+ }
1791
+
1792
+ template <
1793
+ typename U = T,
1794
+ std::enable_if_t<Vectorized<U>::int_num_vecs() == 4, int> = 0>
1795
+ int_vec_return_type widening_subtract(Vectorized<U> b) const {
1796
+ auto ret16 = unpack(_vec);
1797
+ auto ret16B = unpack(b.vec());
1798
+ auto ret32_0 = unpack(ret16.first);
1799
+ auto ret32_1 = unpack(ret16.second);
1800
+ auto ret32B_0 = unpack(ret16B.first);
1801
+ auto ret32B_1 = unpack(ret16B.second);
1802
+
1803
+ return {
1804
+ Vectorized<c10::qint32>(ret32_0.first - ret32B_0.first),
1805
+ Vectorized<c10::qint32>(ret32_0.second - ret32B_0.second),
1806
+ Vectorized<c10::qint32>(ret32_1.first - ret32B_1.first),
1807
+ Vectorized<c10::qint32>(ret32_1.second - ret32B_1.second)};
1808
+ }
1809
+
1810
+ template <
1811
+ typename U = T,
1812
+ std::enable_if_t<Vectorized<U>::float_num_vecs() == 4, int> = 0>
1813
+ float_vec_return_type C10_ALWAYS_INLINE dequantize(
1814
+ Vectorized<float> scale,
1815
+ Vectorized<float> zero_point,
1816
+ Vectorized<float> scale_zp_premul) const {
1817
+ // unpacking unsigned as signed
1818
+ auto ret16 = unpack(_vec);
1819
+ auto ret32_0 = unpack(ret16.first);
1820
+ auto ret32_1 = unpack(ret16.second);
1821
+
1822
+ auto vecf_0 = convert_to_float(ret32_0.first);
1823
+ auto vecf_1 = convert_to_float(ret32_0.second);
1824
+
1825
+ auto vecf_2 = convert_to_float(ret32_1.first);
1826
+ auto vecf_3 = convert_to_float(ret32_1.second);
1827
+ return {
1828
+ fmadd(scale, vecf_0, scale_zp_premul),
1829
+ fmadd(scale, vecf_1, scale_zp_premul),
1830
+ fmadd(scale, vecf_2, scale_zp_premul),
1831
+ fmadd(scale, vecf_3, scale_zp_premul)};
1832
+ }
1833
+
1834
+ template <
1835
+ typename U = T,
1836
+ std::enable_if_t<Vectorized<U>::float_num_vecs() == 4, int> = 0>
1837
+ float_vec_return_type dequantize(
1838
+ Vectorized<float> scale,
1839
+ Vectorized<float> zero_point) const {
1840
+ // unpacking unsigned as signed
1841
+ auto ret16 = unpack(_vec);
1842
+ auto ret32_0 = unpack(ret16.first);
1843
+ auto ret32_1 = unpack(ret16.second);
1844
+
1845
+ auto vecf_0 = convert_to_float(ret32_0.first);
1846
+ auto vecf_1 = convert_to_float(ret32_0.second);
1847
+
1848
+ auto vecf_2 = convert_to_float(ret32_1.first);
1849
+ auto vecf_3 = convert_to_float(ret32_1.second);
1850
+
1851
+ return {
1852
+ (vecf_0 - zero_point) * scale,
1853
+ (vecf_1 - zero_point) * scale,
1854
+ (vecf_2 - zero_point) * scale,
1855
+ (vecf_3 - zero_point) * scale };
1856
+ }
1857
+
1858
+ template <
1859
+ typename U = T,
1860
+ std::enable_if_t<Vectorized<U>::float_num_vecs() == 4, int> = 0>
1861
+ static Vectorized<T> quantize(
1862
+ const float_vec_return_type& rhs,
1863
+ float scale,
1864
+ int32_t zero_point,
1865
+ float inverse_scale) {
1866
+ auto vec_inverse = Vectorized<float>(inverse_scale);
1867
+ auto vec_zero_point = Vectorized<float>((float)zero_point);
1868
+
1869
+ auto vecf0 = rhs[0];
1870
+ auto vecf2 = rhs[1];
1871
+ auto vecf4 = rhs[2];
1872
+ auto vecf6 = rhs[3];
1873
+
1874
+ vecf0 = vecf0 * vec_inverse;
1875
+ vecf2 = vecf2 * vec_inverse;
1876
+ vecf4 = vecf4 * vec_inverse;
1877
+ vecf6 = vecf6 * vec_inverse;
1878
+
1879
+ vecf0 = vecf0.rint() + vec_zero_point;
1880
+ vecf2 = vecf2.rint() + vec_zero_point;
1881
+ vecf4 = vecf4.rint() + vec_zero_point;
1882
+ vecf6 = vecf6.rint() + vec_zero_point;
1883
+
1884
+ auto veci0 = convert_to_int(vecf0);
1885
+ auto veci2 = convert_to_int(vecf2);
1886
+ auto veci4 = convert_to_int(vecf4);
1887
+ auto veci6 = convert_to_int(vecf6);
1888
+
1889
+ auto vecshi0 = pack(veci0, veci2);
1890
+ auto vecshi2 = pack(veci4, veci6);
1891
+ auto ret = pack<int16_t, typename U::underlying>(vecshi0, vecshi2);
1892
+
1893
+ return Vectorized<T>{ret};
1894
+ }
1895
+
1896
+ template <
1897
+ typename U = T,
1898
+ std::enable_if_t<Vectorized<U>::int_num_vecs() == 4, int> = 0>
1899
+ static Vectorized<U> requantize_from_int(
1900
+ const int_vec_return_type& inp,
1901
+ float multiplier,
1902
+ int32_t zero_point) {
1903
+ Vectorized<float> vec_multiplier = Vectorized<float>(multiplier);
1904
+ Vectorized<int32_t> vec_zero_point = Vectorized<int32_t>(zero_point);
1905
+
1906
+ Vectorized<c10::qint32> vi0 = inp[0];
1907
+ Vectorized<c10::qint32> vi1 = inp[1];
1908
+ Vectorized<c10::qint32> vi2 = inp[2];
1909
+ Vectorized<c10::qint32> vi3 = inp[3];
1910
+
1911
+ auto vecf0 = convert_to_float(vi0.vec());
1912
+ auto vecf2 = convert_to_float(vi1.vec());
1913
+
1914
+ auto vecf4 = convert_to_float(vi2.vec());
1915
+ auto vecf6 = convert_to_float(vi3.vec());
1916
+
1917
+ vecf0 = vecf0 * vec_multiplier;
1918
+ vecf2 = vecf2 * vec_multiplier;
1919
+
1920
+ vecf4 = vecf4 * vec_multiplier;
1921
+ vecf6 = vecf6 * vec_multiplier;
1922
+
1923
+ vecf0 = vecf0.rint();
1924
+ vecf2 = vecf2.rint();
1925
+ vecf4 = vecf4.rint();
1926
+ vecf6 = vecf6.rint();
1927
+
1928
+ auto veci0 = convert_to_int(vecf0);
1929
+ auto veci2 = convert_to_int(vecf2);
1930
+ auto veci4 = convert_to_int(vecf4);
1931
+ auto veci6 = convert_to_int(vecf6);
1932
+
1933
+ veci0 = veci0 + vec_zero_point;
1934
+ veci2 = veci2 + vec_zero_point;
1935
+
1936
+ veci4 = veci4 + vec_zero_point;
1937
+ veci6 = veci6 + vec_zero_point;
1938
+
1939
+ auto vecshi0 = pack<int32_t, int16_t>(veci0, veci2);
1940
+ auto vecshi2 = pack<int32_t, int16_t>(veci4, veci6);
1941
+
1942
+ auto ret = pack<int16_t, typename U::underlying>(vecshi0, vecshi2);
1943
+
1944
+ return Vectorized<U>{ret};
1945
+ }
1946
+
1947
+ Vectorized<T> C10_ALWAYS_INLINE operator+(const Vectorized<T>& other) const {
1948
+ return Vectorized<T>{_vec + other._vec};
1949
+ }
1950
+
1951
+ Vectorized<T> C10_ALWAYS_INLINE operator-(const Vectorized<T>& other) const {
1952
+ return Vectorized<T>{_vec - other._vec};
1953
+ }
1954
+
1955
+ Vectorized<T> C10_ALWAYS_INLINE operator*(const Vectorized<T>& other) const {
1956
+ return Vectorized<T>{_vec * other._vec};
1957
+ }
1958
+
1959
+ Vectorized<T> C10_ALWAYS_INLINE operator/(const Vectorized<T>& other) const {
1960
+ return Vectorized<T>{_vec / other._vec};
1961
+ }
1962
+
1963
+ Vectorized<T> C10_ALWAYS_INLINE operator&(const Vectorized<T>& other) const {
1964
+ return Vectorized<T>{_vec & other._vec};
1965
+ }
1966
+
1967
+ Vectorized<T> C10_ALWAYS_INLINE operator|(const Vectorized<T>& other) const {
1968
+ return Vectorized<T>{_vec | other._vec};
1969
+ }
1970
+
1971
+ Vectorized<T> C10_ALWAYS_INLINE operator^(const Vectorized<T>& other) const {
1972
+ return Vectorized<T>{_vec ^ other._vec};
1973
+ }
1974
+ Vectorized<T> C10_ALWAYS_INLINE operator==(const Vectorized<T>& other) const {
1975
+ return Vectorized<T>{_vec == other._vec};
1976
+ }
1977
+
1978
+ Vectorized<T> C10_ALWAYS_INLINE operator!=(const Vectorized<T>& other) const {
1979
+ return Vectorized<T>{_vec != other._vec};
1980
+ }
1981
+ Vectorized<T> C10_ALWAYS_INLINE operator>(const Vectorized<T>& other) const {
1982
+ return Vectorized<T>{_vec > other._vec};
1983
+ }
1984
+ Vectorized<T> C10_ALWAYS_INLINE operator>=(const Vectorized<T>& other) const {
1985
+ return Vectorized<T>{_vec >= other._vec};
1986
+ }
1987
+
1988
+ Vectorized<T> C10_ALWAYS_INLINE operator<(const Vectorized<T>& other) const {
1989
+ return Vectorized<T>{_vec < other._vec};
1990
+ }
1991
+
1992
+ Vectorized<T> C10_ALWAYS_INLINE operator<=(const Vectorized<T>& other) const {
1993
+ return Vectorized<T>{_vec <= other._vec};
1994
+ }
1995
+
1996
+ Vectorized<T> C10_ALWAYS_INLINE eq(const Vectorized<T>& other) const {
1997
+ return Vectorized<T>{_vec.eq(other._vec)};
1998
+ }
1999
+ Vectorized<T> C10_ALWAYS_INLINE ne(const Vectorized<T>& other) const {
2000
+ return Vectorized<T>{_vec.ne(other._vec)};
2001
+ }
2002
+ Vectorized<T> C10_ALWAYS_INLINE gt(const Vectorized<T>& other) const {
2003
+ return Vectorized<T>{_vec.gt(other._vec)};
2004
+ }
2005
+ Vectorized<T> C10_ALWAYS_INLINE ge(const Vectorized<T>& other) const {
2006
+ return Vectorized<T>{_vec.ge(other._vec)};
2007
+ }
2008
+ Vectorized<T> C10_ALWAYS_INLINE lt(const Vectorized<T>& other) const {
2009
+ return Vectorized<T>{_vec.lt(other._vec)};
2010
+ }
2011
+ Vectorized<T> C10_ALWAYS_INLINE le(const Vectorized<T>& other) const {
2012
+ return Vectorized<T>{_vec.le(other._vec)};
2013
+ }
2014
+
2015
+ Vectorized<T> clamp_min(const Vectorized<T>& min) const {
2016
+ return Vectorized<T>{_vec.clamp_min(min._vec)};
2017
+ }
2018
+
2019
+ Vectorized<T> clamp_max(const Vectorized<T>& max) const {
2020
+ return Vectorized<T>{_vec.clamp_max(max._vec)};
2021
+ }
2022
+
2023
+ Vectorized<T> minimum(const Vectorized<T>& other) const {
2024
+ return Vectorized<T>{_vec.minimum(other._vec)};
2025
+ }
2026
+
2027
+ Vectorized<T> maximum(const Vectorized<T>& other) const {
2028
+ return Vectorized<T>{_vec.maximum(other._vec)};
2029
+ }
2030
+ };
2031
+
2032
+ DEFINE_CLAMP_MAXMIN_FUNCS(c10::quint8)
2033
+ DEFINE_CLAMP_MAXMIN_FUNCS(c10::qint8)
2034
+ DEFINE_CLAMP_MAXMIN_FUNCS(c10::qint32)
2035
+
2036
+ template <typename U = float>
2037
+ constexpr auto real_mask() {
2038
+ return (ZSimdVect<U>)ZSimdVectBinary<float>{0xFFFFFFFF, 0, 0xFFFFFFFF, 0};
2039
+ }
2040
+
2041
+ template <>
2042
+ constexpr auto real_mask<double>() {
2043
+ return (ZSimdVect<double>)ZSimdVectBinary<double>{0xFFFFFFFFFFFFFFFF, 0};
2044
+ }
2045
+
2046
+ template <typename U = float>
2047
+ constexpr auto image_mask() {
2048
+ return (ZSimdVect<U>)ZSimdVectBinary<U>{0, 0xFFFFFFFF, 0, 0xFFFFFFFF};
2049
+ }
2050
+
2051
+ template <>
2052
+ constexpr auto image_mask<double>() {
2053
+ return (ZSimdVect<double>)ZSimdVectBinary<double>{0, 0xFFFFFFFFFFFFFFFF};
2054
+ }
2055
+
2056
+ template <typename U = float>
2057
+ constexpr auto rsign_mask() {
2058
+ return ZSimdVect<U>{-0.f, 0.f, -0.f, 0.f};
2059
+ }
2060
+
2061
+ template <>
2062
+ constexpr auto rsign_mask<double>() {
2063
+ return ZSimdVect<double>{-0.0, 0.f};
2064
+ }
2065
+
2066
+ template <typename U = float>
2067
+ constexpr auto isign_mask() {
2068
+ return ZSimdVect<U>{0.0, -0.f, 0.0, -0.f};
2069
+ }
2070
+
2071
+ template <>
2072
+ constexpr auto isign_mask<double>() {
2073
+ return ZSimdVect<double>{0.0, -0.0};
2074
+ }
2075
+
2076
+ template <typename U = float>
2077
+ constexpr auto image_one() {
2078
+ return ZSimdVect<U>{0, 1.f, 0, 1.f};
2079
+ }
2080
+
2081
+ template <>
2082
+ constexpr auto image_one<double>() {
2083
+ return ZSimdVect<double>{0.0, 1.0};
2084
+ }
2085
+
2086
+ template <typename U = float>
2087
+ constexpr auto pi_half() {
2088
+ return ZSimdVect<U>{(float)(M_PI / 2.0), 0.f, (float)(M_PI / 2.0), 0.f};
2089
+ }
2090
+
2091
+ template <>
2092
+ constexpr auto pi_half<double>() {
2093
+ return ZSimdVect<double>{M_PI / 2.0, 0.0};
2094
+ }
2095
+
2096
+ template <typename U = float>
2097
+ constexpr auto image_half() {
2098
+ return ZSimdVect<U>{0, 0.5f, 0, 0.5f};
2099
+ }
2100
+
2101
+ template <>
2102
+ constexpr auto image_half<double>() {
2103
+ return ZSimdVect<double>{0.0, 0.5};
2104
+ }
2105
+
2106
+ template <typename U>
2107
+ constexpr U log2e_inv() {
2108
+ return static_cast<U>(1.4426950408889634);
2109
+ }
2110
+
2111
+ template <typename U>
2112
+ constexpr U log10e_inv() {
2113
+ return static_cast<U>(0.43429448190325176);
2114
+ }
2115
+
2116
+ template <typename T>
2117
+ struct Vectorized<T, std::enable_if_t<is_zarch_implemented_complex<T>()>> {
2118
+ public:
2119
+ using underline_type = decltype(std::declval<T>().imag());
2120
+ using value_type = T;
2121
+ using vtype = ZSimdVect<underline_type>;
2122
+ using vmaskType = ZSimdVectBinary<underline_type>;
2123
+ using vinner_type = Vectorized<underline_type>;
2124
+ using size_type = int;
2125
+ using vinner_data = typename Vectorized<underline_type>::vinner_data;
2126
+
2127
+ static constexpr size_type size() {
2128
+ return VECTOR_WIDTH / sizeof(value_type);
2129
+ }
2130
+
2131
+ private:
2132
+ vinner_type _vec;
2133
+
2134
+ public:
2135
+ Vectorized() {}
2136
+
2137
+ C10_ALWAYS_INLINE Vectorized(const vinner_data &v) : _vec{v.first, v.second} {}
2138
+
2139
+ template <typename U = T, std::enable_if_t<(sizeof(U) == 16), int> = 0>
2140
+ C10_ALWAYS_INLINE Vectorized(T s1, T s2)
2141
+ : _vec{s1.real(), s1.imag(), s2.real(), s2.imag()} {}
2142
+
2143
+ template <typename U = T, std::enable_if_t<(sizeof(U) == 8), int> = 0>
2144
+ C10_ALWAYS_INLINE Vectorized(T s1, T s2, T s3, T s4)
2145
+ : _vec{
2146
+ s1.real(),
2147
+ s1.imag(),
2148
+ s2.real(),
2149
+ s2.imag(),
2150
+ s3.real(),
2151
+ s3.imag(),
2152
+ s4.real(),
2153
+ s4.imag()} {}
2154
+
2155
+ template <typename U = T, std::enable_if_t<(sizeof(U) == 16), int> = 0>
2156
+ C10_ALWAYS_INLINE Vectorized(T s) : Vectorized<T>(s, s) {}
2157
+
2158
+ template <typename U = T, std::enable_if_t<(sizeof(U) == 8), int> = 0>
2159
+ C10_ALWAYS_INLINE Vectorized(T s) : Vectorized<T>(s, s, s, s) {}
2160
+
2161
+ C10_ALWAYS_INLINE operator vinner_type() const {
2162
+ return _vec;
2163
+ }
2164
+
2165
+ C10_ALWAYS_INLINE const vinner_type& vec() const {
2166
+ return _vec;
2167
+ }
2168
+
2169
+ C10_ALWAYS_INLINE operator vinner_data() const {
2170
+ return _vec.data();
2171
+ }
2172
+
2173
+ C10_ALWAYS_INLINE vinner_data data() const {
2174
+ return _vec.data();
2175
+ }
2176
+
2177
+ static Vectorized<T> C10_ALWAYS_INLINE
2178
+ loadu(const void* ptr, int count = size()) {
2179
+ return Vectorized<T>{vinner_type::loadu(ptr, 2 * count)};
2180
+ }
2181
+
2182
+ void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const {
2183
+ return _vec.store(ptr, 2 * count);
2184
+ }
2185
+
2186
+ static Vectorized<T> blendv(
2187
+ const Vectorized<T>& a,
2188
+ const Vectorized<T>& b,
2189
+ const Vectorized<T>& mask) {
2190
+ // convert std::complex<V> index mask to V index mask: xy -> xxyy
2191
+ vinner_type vmask = mask.vec();
2192
+ auto mask_complex = vinner_type(
2193
+ vec_mergeh(vmask.vec0(), vmask.vec0()),
2194
+ vec_mergeh(vmask.vec1(), vmask.vec1()));
2195
+ return Vectorized<T>{vinner_type::blendv(a.vec(), b.vec(), mask_complex)};
2196
+ }
2197
+
2198
+ template <int64_t mask>
2199
+ static auto C10_ALWAYS_INLINE
2200
+ blend(const Vectorized<T>& a, const Vectorized<T>& b) {
2201
+ constexpr int mask_complex = maskForComplex<sizeof(T)>(mask);
2202
+ return Vectorized<T>{
2203
+ vinner_type::template blend<mask_complex>(a.vec(), b.vec())};
2204
+ }
2205
+
2206
+ template <typename step_t, typename U = T>
2207
+ static std::enable_if_t<sizeof(U) == 16, Vectorized<T>> arange(
2208
+ T base = 0,
2209
+ step_t step = static_cast<step_t>(1)) {
2210
+ return Vectorized<T>(base, base + step);
2211
+ }
2212
+
2213
+ template <typename step_t, typename U = T>
2214
+ static std::enable_if_t<sizeof(U) == 8, Vectorized<T>> arange(
2215
+ T base = 0,
2216
+ step_t step = static_cast<step_t>(1)) {
2217
+ return Vectorized<T>(
2218
+ base,
2219
+ base + step,
2220
+ base + value_type(2) * step,
2221
+ base + value_type(3) * step);
2222
+ }
2223
+
2224
+ template <int16_t Z, int16_t C>
2225
+ static inline std::enable_if_t<(Z >= C), Vectorized<T>> set_inner(
2226
+ const Vectorized<T>& a,
2227
+ const Vectorized<T>& b,
2228
+ size_t count) {
2229
+ return b;
2230
+ }
2231
+
2232
+ template <int16_t Z, int16_t C>
2233
+ static inline std::enable_if_t<(Z < C), Vectorized<T>> set_inner(
2234
+ const Vectorized<T>& a,
2235
+ const Vectorized<T>& b,
2236
+ size_t count) {
2237
+ if (count == Z)
2238
+ return blend<allbitset(Z)>(a, b);
2239
+ else
2240
+ return set_inner<Z + 1, C>(a, b, count);
2241
+ }
2242
+
2243
+ static Vectorized<T> set(
2244
+ const Vectorized<T>& a,
2245
+ const Vectorized<T>& b,
2246
+ size_t count = size()) {
2247
+ if (count == 0)
2248
+ return a;
2249
+ return set_inner<1, size()>(a, b, count);
2250
+ }
2251
+
2252
+ const T& operator[](int idx) const = delete;
2253
+ T& operator[](int idx) = delete;
2254
+
2255
+ template <
2256
+ typename U = T,
2257
+ std::enable_if_t<std::is_same<U, c10::complex<float>>::value, int> = 0>
2258
+ Vectorized<T> mapOrdinary(T (*const f)(const T&)) const {
2259
+ auto v0 = _vec.vec0();
2260
+ auto v1 = _vec.vec1();
2261
+ return Vectorized<T>{
2262
+ f(T(v0[0], v0[1])),
2263
+ f(T(v0[2], v0[3])),
2264
+ f(T(v1[0], v1[1])),
2265
+ f(T(v1[2], v1[3]))};
2266
+ }
2267
+
2268
+ template <
2269
+ typename U = T,
2270
+ std::enable_if_t<std::is_same<U, c10::complex<double>>::value, int> = 0>
2271
+ Vectorized<U> mapOrdinary(T (*const f)(const T&)) const {
2272
+ auto v0 = _vec.vec0();
2273
+ auto v1 = _vec.vec1();
2274
+ return Vectorized<T>{f(T(v0[0], v0[1])), f(T(v1[0], v1[1]))};
2275
+ }
2276
+
2277
+ template <
2278
+ typename U = T,
2279
+ std::enable_if_t<std::is_same<U, c10::complex<float>>::value, int> = 0>
2280
+ Vectorized<T> mapOrdinary(T (*const f)(T)) const {
2281
+ auto v0 = _vec.vec0();
2282
+ auto v1 = _vec.vec1();
2283
+ return Vectorized<T>{
2284
+ f(T(v0[0], v0[1])),
2285
+ f(T(v0[2], v0[3])),
2286
+ f(T(v1[0], v1[1])),
2287
+ f(T(v1[2], v1[3]))};
2288
+ }
2289
+
2290
+ template <
2291
+ typename U = T,
2292
+ std::enable_if_t<std::is_same<U, c10::complex<double>>::value, int> = 0>
2293
+ Vectorized<T> mapOrdinary(T (*const f)(T)) const {
2294
+ auto v0 = _vec.vec0();
2295
+ auto v1 = _vec.vec1();
2296
+ return Vectorized<T>{f(T(v0[0], v0[1])), f(T(v1[0], v1[1]))};
2297
+ }
2298
+
2299
+ template <
2300
+ typename U = T,
2301
+ std::enable_if_t<std::is_same<U, c10::complex<float>>::value, int> = 0>
2302
+ inline Vectorized<T> mapOrdinary(
2303
+ T (*const f)(const T&, const T&),
2304
+ const Vectorized<T>& b) const {
2305
+ auto v0 = _vec.vec0();
2306
+ auto v1 = _vec.vec1();
2307
+ auto bvec = b.vec();
2308
+ auto b0 = bvec.vec0();
2309
+ auto b1 = bvec.vec1();
2310
+ T a00 = f(T(v0[0], v0[1]), T(b0[0], b0[1]));
2311
+ T a01 = f(T(v0[2], v0[3]), T(b0[2], b0[3]));
2312
+ T a02 = f(T(v1[0], v1[1]), T(b1[0], b1[1]));
2313
+ T a03 = f(T(v1[2], v1[3]), T(b1[2], b1[3]));
2314
+ return Vectorized<T>{a00, a01, a02, a03};
2315
+ }
2316
+
2317
+ template <
2318
+ typename U = T,
2319
+ std::enable_if_t<std::is_same<U, c10::complex<double>>::value, int> = 0>
2320
+ inline Vectorized<T> mapOrdinary(
2321
+ T (*const f)(const T&, const T&),
2322
+ const Vectorized<T>& b) const {
2323
+ auto v0 = _vec.vec0();
2324
+ auto v1 = _vec.vec1();
2325
+ auto bvec = b.vec();
2326
+ auto b0 = bvec.vec0();
2327
+ auto b1 = bvec.vec1();
2328
+ U a00 = f(U(v0[0], v0[1]), U(b0[0], b0[1]));
2329
+ U a01 = f(U(v1[0], v1[1]), U(b1[0], b1[1]));
2330
+ return Vectorized<T>{a00, a01};
2331
+ }
2332
+
2333
+ Vectorized<T> C10_ALWAYS_INLINE operator+(const Vectorized<T>& other) const {
2334
+ return Vectorized<T>{_vec + other._vec};
2335
+ }
2336
+
2337
+ Vectorized<T> C10_ALWAYS_INLINE operator-(const Vectorized<T>& other) const {
2338
+ return Vectorized<T>{_vec - other._vec};
2339
+ }
2340
+
2341
+ Vectorized<T> inline operator*(const Vectorized<T>& b) const {
2342
+ //(a + bi) * (c + di) = (ac - bd) + (ad + bc)i
2343
+ vinner_type bv = b.vec();
2344
+ #if !defined(ZVECTOR_SIMULATE_X86_MULT)
2345
+ // this is more z arch friendly than simulating horizontal from x86
2346
+ vinner_type vi = bv.mergeo();
2347
+ vinner_type vr = bv.mergee();
2348
+ vi = vi ^ rsign_mask<underline_type>();
2349
+ vinner_type ret = _vec * vr;
2350
+ vinner_type vx_swapped = _vec.swapped();
2351
+ ret = fmadd(vx_swapped, vi, ret);
2352
+ #else
2353
+ vinner_type ac_bd = _vec * b;
2354
+ vinner_type d_c = bv.swapped();
2355
+ d_c = d_c ^ isign_mask<underline_type>();
2356
+ vinner_type ad_bc = _vec * d_c;
2357
+ vinner_type ret = vinner_type::horizontal_sub_perm(ac_bd, ad_bc);
2358
+ #endif
2359
+ return Vectorized<T>{ret};
2360
+ }
2361
+
2362
+ template <
2363
+ typename U = T,
2364
+ std::enable_if_t<std::is_same<U, c10::complex<float>>::value, int> = 0>
2365
+ static typename Vectorized<T>::vinner_type real_neg(const typename Vectorized<T>::vinner_type &a)
2366
+ {
2367
+ const auto swap_mask = ZSimdVectBinary<uint8_t>{
2368
+ 0, 1, 2, 3, 20, 21, 22, 23, 8, 9, 10, 11, 28, 29, 30, 31};
2369
+
2370
+ auto a_neg = a.neg();
2371
+ vtype v0 = vec_perm(a_neg.vec0(), a.vec0(), swap_mask);
2372
+ vtype v1 = vec_perm(a_neg.vec1(), a.vec1(), swap_mask);
2373
+ return {v0, v1};
2374
+ }
2375
+
2376
+ template <
2377
+ typename U = T,
2378
+ std::enable_if_t<std::is_same<U, c10::complex<double>>::value, int> = 0>
2379
+ static typename Vectorized<T>::vinner_type real_neg(const typename Vectorized<T>::vinner_type &a)
2380
+ {
2381
+ auto a_neg = a.neg();
2382
+ auto v0 = vec_permi(a_neg.vec0(), a.vec0(), 1);
2383
+ auto v1 = vec_permi(a_neg.vec1(), a.vec1(), 1);
2384
+ return { v0, v1 };
2385
+ }
2386
+
2387
+ Vectorized<T> inline operator/(const Vectorized<T>& b) const {
2388
+ // Unfortunately, this breaks some tests
2389
+ // Implement it like it's done for avx2
2390
+ auto fabs_cd = b.vec().abs(); // |c| |d|
2391
+ auto fabs_dc = fabs_cd.swapped(); // |d| |c|
2392
+ auto scale = vinner_type {1.0} / maximum(fabs_cd, fabs_dc); // 1/sc 1/sc
2393
+ auto a2 = vec() * scale; // a/sc b/sc
2394
+ auto b2 = b.vec() * scale; // c/sc d/sc
2395
+ auto acbd2 = a2 * b2; // ac/sc^2 bd/sc^2
2396
+
2397
+ auto dc2 = b2.swapped(); // d/sc c/sc
2398
+ dc2 = Vectorized<T>::real_neg(dc2); // -d/|c,d| c/sc
2399
+ auto adbc2 = a2 * dc2; // -ad/sc^2 bc/sc^2
2400
+ auto sum1 = acbd2 + acbd2.swapped(); // (ac+bd)/sc^2 (ac+bd)/sc^2
2401
+ auto sum2 = adbc2 + adbc2.swapped(); // (bc-ad)/sc^2 (bc-ad)/sc^2
2402
+ auto res2 = vinner_type::mergee(sum1, sum2); // (ac+bd)/sc^2 (bc-ad)/sc^2
2403
+
2404
+ // get the denominator
2405
+ auto denom2 = Vectorized<T>{b2}.abs_2_(); // (c^2+d^2)/sc^2 (c^2+d^2)/sc^2
2406
+ res2 = res2 / denom2;
2407
+ return Vectorized<T>{ res2 };
2408
+ }
2409
+
2410
+ Vectorized<T> angle2_() const {
2411
+ auto b_a = _vec.swapped(); // b a
2412
+ return Vectorized<T>{_vec.atan2(b_a).swapped()};
2413
+ }
2414
+
2415
+ Vectorized<T> angle() const {
2416
+ return angle2_().real();
2417
+ }
2418
+
2419
+ Vectorized<T> atan() const {
2420
+ // atan(x) = i/2 * ln((i + z)/(i - z))
2421
+ auto ione = Vectorized<T>{vinner_type(image_one<underline_type>())};
2422
+ auto sum = ione + *this;
2423
+ auto sub = ione - *this;
2424
+ auto ln = (sum / sub).log(); // ln((i + z)/(i - z))
2425
+ return ln *
2426
+ Vectorized<T>{vinner_type(image_half<underline_type>())}; // i/2*ln()
2427
+ }
2428
+
2429
+ Vectorized<T> atanh() const {
2430
+ return mapOrdinary(std::atanh);
2431
+ }
2432
+
2433
+ Vectorized<T> asin() const {
2434
+ // asin(x)
2435
+ // = -i*ln(iz + sqrt(1 -z^2))
2436
+ // = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi)))
2437
+ // = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi))
2438
+ #if 1
2439
+ vinner_type cnj = conj().vec();
2440
+ vinner_type b_a = cnj.swapped();
2441
+ vinner_type ab = cnj * b_a;
2442
+ vinner_type im = ab + ab;
2443
+ vinner_type val_2 = _vec * _vec;
2444
+ vinner_type val_2_swapped = val_2.swapped();
2445
+ vinner_type re = vinner_type::horizontal_sub_perm(val_2, val_2_swapped);
2446
+ re = vinner_type(static_cast<underline_type>(1)) - re;
2447
+ constexpr int blend_mask =
2448
+ blend_choice<T>(); // 0x0A for complex<double> , 0xAA for complex<float>
2449
+ vinner_type blendx = vinner_type::template blend<blend_mask>(re, im);
2450
+ auto root = Vectorized<T>(blendx).sqrt();
2451
+ auto ln = Vectorized<T>(Vectorized<T>(b_a) + root).log();
2452
+ return Vectorized<T>(ln.vec().swapped()).conj();
2453
+ #else
2454
+ return mapOrdinary(std::asin);
2455
+ #endif
2456
+ }
2457
+
2458
+ Vectorized<T> acos() const {
2459
+ // acos(x) = pi/2 - asin(x)
2460
+ return Vectorized<T>(vinner_type(pi_half<underline_type>())) - asin();
2461
+ }
2462
+
2463
+ Vectorized<T> sin() const {
2464
+ return mapOrdinary(std::sin);
2465
+ }
2466
+ Vectorized<T> sinh() const {
2467
+ return mapOrdinary(std::sinh);
2468
+ }
2469
+ Vectorized<T> cos() const {
2470
+ return mapOrdinary(std::cos);
2471
+ }
2472
+ Vectorized<T> cosh() const {
2473
+ return mapOrdinary(std::cosh);
2474
+ }
2475
+ Vectorized<T> ceil() const {
2476
+ return Vectorized<T>{_vec.ceil()};
2477
+ }
2478
+ Vectorized<T> floor() const {
2479
+ return Vectorized<T>{_vec.floor()};
2480
+ }
2481
+ Vectorized<T> neg() const {
2482
+ return Vectorized<T>(_vec.neg());
2483
+ }
2484
+ Vectorized<T> round() const {
2485
+ return Vectorized<T>{_vec.round()};
2486
+ }
2487
+ Vectorized<T> tan() const {
2488
+ return mapOrdinary(std::tan);
2489
+ }
2490
+ Vectorized<T> tanh() const {
2491
+ return mapOrdinary(std::tanh);
2492
+ }
2493
+ Vectorized<T> trunc() const {
2494
+ return Vectorized<T>{_vec.trunc()};
2495
+ }
2496
+
2497
+ Vectorized<T> C10_ALWAYS_INLINE operator&(const Vectorized<T>& other) const {
2498
+ return Vectorized<T>{_vec & other._vec};
2499
+ }
2500
+
2501
+ Vectorized<T> C10_ALWAYS_INLINE operator|(const Vectorized<T>& other) const {
2502
+ return Vectorized<T>{_vec | other._vec};
2503
+ }
2504
+
2505
+ Vectorized<T> C10_ALWAYS_INLINE operator^(const Vectorized<T>& other) const {
2506
+ return Vectorized<T>{_vec ^ other._vec};
2507
+ }
2508
+ Vectorized<T> C10_ALWAYS_INLINE operator==(const Vectorized<T>& other) const {
2509
+ return Vectorized<T>{_vec == other._vec};
2510
+ }
2511
+
2512
+ Vectorized<T> C10_ALWAYS_INLINE operator!=(const Vectorized<T>& other) const {
2513
+ return Vectorized<T>{_vec != other._vec};
2514
+ }
2515
+
2516
+ Vectorized<T> C10_ALWAYS_INLINE eq(const Vectorized<T>& other) const {
2517
+ auto eq = _vec.eq(other._vec); // compares real and imag individually
2518
+ // If both real numbers and imag numbers are equal, then the complex numbers are equal
2519
+ auto real = eq & vinner_type(real_mask<underline_type>());
2520
+ auto imag = (eq & vinner_type(image_mask<underline_type>())).swapped();
2521
+ return Vectorized<T>{real & imag};
2522
+ }
2523
+ Vectorized<T> C10_ALWAYS_INLINE ne(const Vectorized<T>& other) const {
2524
+ auto ne = _vec.ne(other._vec); // compares real and imag individually
2525
+ // If either real numbers or imag numbers are not equal, then the complex numbers are not equal
2526
+ auto real = ne & vinner_type(real_mask<underline_type>());
2527
+ auto imag = (ne & vinner_type(image_mask<underline_type>())).swapped();
2528
+ return Vectorized<T>{real | imag};
2529
+ }
2530
+
2531
+ Vectorized<T> real() const {
2532
+ return Vectorized<T>(_vec & vinner_type(real_mask<underline_type>()));
2533
+ }
2534
+ Vectorized<T> imag_() const {
2535
+ return Vectorized<T>(_vec & vinner_type(image_mask<underline_type>()));
2536
+ }
2537
+ Vectorized<T> imag() const {
2538
+ return Vectorized<T>{
2539
+ (_vec & vinner_type(image_mask<underline_type>())).swapped()};
2540
+ }
2541
+
2542
+ Vectorized<T> conj() const {
2543
+ return Vectorized<T>(_vec ^ vinner_type(isign_mask<underline_type>()));
2544
+ }
2545
+
2546
+ vinner_data abs_2_() const {
2547
+ auto a = _vec * _vec;
2548
+ a = a + a.swapped();
2549
+ return a.mergee().data();
2550
+ }
2551
+
2552
+ static T abs_helper(const T &value)
2553
+ {
2554
+ return T(std::abs(value));
2555
+ }
2556
+
2557
+ Vectorized<T> abs() const {
2558
+ return mapOrdinary(abs_helper);
2559
+ }
2560
+
2561
+ Vectorized<T> exp() const {
2562
+ return mapOrdinary(std::exp);
2563
+ }
2564
+
2565
+ Vectorized<T> exp2() const {
2566
+ return mapOrdinary(exp2_impl);
2567
+ }
2568
+
2569
+ Vectorized<T> expm1() const {
2570
+ return mapOrdinary(std::expm1);
2571
+ }
2572
+
2573
+ Vectorized<T> log() const {
2574
+ return mapOrdinary(std::log);
2575
+ }
2576
+
2577
+ Vectorized<T> log2() const {
2578
+ // log2eB_inv
2579
+ auto ret = log();
2580
+ return Vectorized<T>{ret._vec * vinner_type(log2e_inv<underline_type>())};
2581
+ }
2582
+
2583
+ Vectorized<T> log10() const {
2584
+ auto ret = log();
2585
+ return Vectorized<T>{ret._vec * vinner_type(log10e_inv<underline_type>())};
2586
+ }
2587
+
2588
+ Vectorized<T> log1p() const {
2589
+ return mapOrdinary(std::log1p);
2590
+ }
2591
+
2592
+ Vectorized<T> sgn() const {
2593
+ return mapOrdinary(at::native::sgn_impl);
2594
+ }
2595
+
2596
+ Vectorized<T> pow(const Vectorized<T>& exp) const {
2597
+ return mapOrdinary(std::pow, exp);
2598
+ }
2599
+
2600
+ Vectorized<T> sqrt() const {
2601
+ return mapOrdinary(std::sqrt);
2602
+ }
2603
+
2604
+ Vectorized<T> reciprocal() const {
2605
+ // re + im*i = (a + bi) / (c + di)
2606
+ // re = (ac + bd)/abs_2() = c/abs_2()
2607
+ // im = (bc - ad)/abs_2() = d/abs_2()
2608
+ vinner_type c_d = _vec ^ vinner_type(isign_mask<underline_type>());
2609
+ vinner_type abs = abs_2_();
2610
+ return Vectorized<T>{c_d / abs};
2611
+ }
2612
+
2613
+ Vectorized<T> rsqrt() const {
2614
+ return sqrt().reciprocal();
2615
+ }
2616
+
2617
+ Vectorized<T> operator<(const Vectorized<T>& other) const {
2618
+ TORCH_CHECK(false, "not supported for complex numbers");
2619
+ }
2620
+
2621
+ Vectorized<T> operator<=(const Vectorized<T>& other) const {
2622
+ TORCH_CHECK(false, "not supported for complex numbers");
2623
+ }
2624
+
2625
+ Vectorized<T> operator>(const Vectorized<T>& other) const {
2626
+ TORCH_CHECK(false, "not supported for complex numbers");
2627
+ }
2628
+
2629
+ Vectorized<T> operator>=(const Vectorized<T>& other) const {
2630
+ TORCH_CHECK(false, "not supported for complex numbers");
2631
+ }
2632
+
2633
+ Vectorized<T> lt(const Vectorized<T>& other) const {
2634
+ TORCH_CHECK(false, "not supported for complex numbers");
2635
+ }
2636
+
2637
+ Vectorized<T> le(const Vectorized<T>& other) const {
2638
+ TORCH_CHECK(false, "not supported for complex numbers");
2639
+ }
2640
+
2641
+ Vectorized<T> gt(const Vectorized<T>& other) const {
2642
+ TORCH_CHECK(false, "not supported for complex numbers");
2643
+ }
2644
+
2645
+ Vectorized<T> ge(const Vectorized<T>& other) const {
2646
+ TORCH_CHECK(false, "not supported for complex numbers");
2647
+ }
2648
+ };
2649
+
2650
+ template <typename T, std::enable_if_t<(sizeof(T) == 8), int> = 0>
2651
+ std::pair<Vectorized<T>, Vectorized<T>> inline inner_interleave2(
2652
+ const Vectorized<T>& a,
2653
+ const Vectorized<T>& b) {
2654
+ // inputs:
2655
+ // a = {a0, a1, a2, a3}
2656
+ // b = {b0, b1, b2, b3}
2657
+ using vtype = typename Vectorized<T>::vtype;
2658
+ vtype ab00 = vec_permi(a.vec0(), b.vec0(), 0);
2659
+ vtype ab11 = vec_permi(a.vec0(), b.vec0(), 3);
2660
+ vtype ab2_00 = vec_permi(a.vec1(), b.vec1(), 0);
2661
+ vtype ab2_11 = vec_permi(a.vec1(), b.vec1(), 3);
2662
+ // return {a0, b0, a1, b1}
2663
+ // {a2, b2, a3, b3}
2664
+ return std::make_pair(
2665
+ Vectorized<T>{ab00, ab11}, Vectorized<T>{ab2_00, ab2_11});
2666
+ }
2667
+
2668
+ template <typename T, std::enable_if_t<(sizeof(T) == 8), int> = 0>
2669
+ std::pair<Vectorized<T>, Vectorized<T>> inline inner_deinterleave2(
2670
+ const Vectorized<T>& a,
2671
+ const Vectorized<T>& b) {
2672
+ // inputs:
2673
+ // a = {a0, b0, a1, b1}
2674
+ // b = {a2, b2, a3, b3}
2675
+ using vtype = typename Vectorized<T>::vtype;
2676
+ vtype aa01 = vec_permi(a.vec0(), a.vec1(), 0);
2677
+ vtype aa23 = vec_permi(b.vec0(), b.vec1(), 0);
2678
+
2679
+ vtype bb_01 = vec_permi(a.vec0(), a.vec1(), 3);
2680
+ vtype bb_23 = vec_permi(b.vec0(), b.vec1(), 3);
2681
+
2682
+ // swap lanes:
2683
+ // return {a0, a1, a2, a3}
2684
+ // {b0, b1, b2, b3}
2685
+ return std::make_pair(Vectorized<T>{aa01, aa23}, Vectorized<T>{bb_01, bb_23});
2686
+ }
2687
+
2688
+ template <typename T, std::enable_if_t<(sizeof(T) == 4), int> = 0>
2689
+ std::pair<Vectorized<T>, Vectorized<T>> inline inner_interleave2(
2690
+ const Vectorized<T>& a,
2691
+ const Vectorized<T>& b) {
2692
+ // inputs:
2693
+ // a = {a0, a1, a2, a3,, a4, a5, a6, a7}
2694
+ // b = {b0, b1, b2, b3,, b4, b5, b6, b7}
2695
+ using vtype = typename Vectorized<T>::vtype;
2696
+ vtype ab0011 = vec_mergeh(a.vec0(), b.vec0());
2697
+ vtype ab2233 = vec_mergel(a.vec0(), b.vec0());
2698
+
2699
+ vtype ab2_0011 = vec_mergeh(a.vec1(), b.vec1());
2700
+ vtype ab2_2233 = vec_mergel(a.vec1(), b.vec1());
2701
+ // group cols crossing lanes:
2702
+ // return {a0, b0, a1, b1,, a2, b2, a3, b3}
2703
+ // {a4, b4, a5, b5,, a6, b6, a7, b7}
2704
+
2705
+ return std::make_pair(
2706
+ Vectorized<T>{ab0011, ab2233}, Vectorized<T>{ab2_0011, ab2_2233});
2707
+ }
2708
+
2709
+ template <typename T, std::enable_if_t<(sizeof(T) == 4), int> = 0>
2710
+ std::pair<Vectorized<T>, Vectorized<T>> inline inner_deinterleave2(
2711
+ const Vectorized<T>& a,
2712
+ const Vectorized<T>& b) {
2713
+ // inputs:
2714
+ // a = {a0, b0, a1, b1,, a2, b2, a3, b3}
2715
+ // b = {a4, b4, a5, b5,, a6, b6, a7, b7}
2716
+ using vtype = typename Vectorized<T>::vtype;
2717
+ // {a0,a2,b0,b2} {a1,a3,b1,b3}
2718
+ vtype a0a2b0b2 = vec_mergeh(a.vec0(), a.vec1());
2719
+ vtype a1a3b1b3 = vec_mergel(a.vec0(), a.vec1());
2720
+
2721
+ vtype aa0123 = vec_mergeh(a0a2b0b2, a1a3b1b3);
2722
+ vtype bb0123 = vec_mergel(a0a2b0b2, a1a3b1b3);
2723
+
2724
+ vtype a0a2b0b2_2 = vec_mergeh(b.vec0(), b.vec1());
2725
+ vtype a1a3b1b3_2 = vec_mergel(b.vec0(), b.vec1());
2726
+
2727
+ vtype aa0123_2 = vec_mergeh(a0a2b0b2_2, a1a3b1b3_2);
2728
+ vtype bb0123_2 = vec_mergel(a0a2b0b2_2, a1a3b1b3_2);
2729
+
2730
+ // it could be done with vec_perm ,too
2731
+ // swap lanes:
2732
+ // return {a0, a1, a2, a3,, a4, a5, a6, a7}
2733
+ // {b0, b1, b2, b3,, b4, b5, b6, b7}
2734
+
2735
+ return std::make_pair(
2736
+ Vectorized<T>{aa0123, aa0123_2}, Vectorized<T>{bb0123, bb0123_2});
2737
+ }
2738
+
2739
+ template <>
2740
+ std::pair<Vectorized<float>, Vectorized<float>> inline interleave2<float>(
2741
+ const Vectorized<float>& a,
2742
+ const Vectorized<float>& b) {
2743
+ return inner_interleave2<float>(a, b);
2744
+ }
2745
+
2746
+ template <>
2747
+ std::pair<Vectorized<int32_t>, Vectorized<int32_t>> inline interleave2<int32_t>(
2748
+ const Vectorized<int32_t>& a,
2749
+ const Vectorized<int32_t>& b) {
2750
+ return inner_interleave2<int32_t>(a, b);
2751
+ }
2752
+
2753
+ template <>
2754
+ std::pair<Vectorized<double>, Vectorized<double>> inline interleave2<double>(
2755
+ const Vectorized<double>& a,
2756
+ const Vectorized<double>& b) {
2757
+ return inner_interleave2<double>(a, b);
2758
+ }
2759
+
2760
+ template <>
2761
+ std::pair<Vectorized<int64_t>, Vectorized<int64_t>> inline interleave2<int64_t>(
2762
+ const Vectorized<int64_t>& a,
2763
+ const Vectorized<int64_t>& b) {
2764
+ return inner_interleave2<int64_t>(a, b);
2765
+ }
2766
+
2767
+ template <>
2768
+ std::pair<Vectorized<float>, Vectorized<float>> inline deinterleave2<float>(
2769
+ const Vectorized<float>& a,
2770
+ const Vectorized<float>& b) {
2771
+ return inner_deinterleave2<float>(a, b);
2772
+ }
2773
+
2774
+ template <>
2775
+ std::pair<Vectorized<int32_t>, Vectorized<int32_t>> inline deinterleave2<
2776
+ int32_t>(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
2777
+ return inner_deinterleave2<int32_t>(a, b);
2778
+ }
2779
+
2780
+ template <>
2781
+ std::pair<Vectorized<double>, Vectorized<double>> inline deinterleave2<double>(
2782
+ const Vectorized<double>& a,
2783
+ const Vectorized<double>& b) {
2784
+ return inner_deinterleave2<double>(a, b);
2785
+ }
2786
+
2787
+ template <>
2788
+ std::pair<Vectorized<int64_t>, Vectorized<int64_t>> inline deinterleave2<
2789
+ int64_t>(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
2790
+ return inner_deinterleave2<int64_t>(a, b);
2791
+ }
2792
+
2793
+ template <typename T>
2794
+ typename std::enable_if<std::is_same<T, uint8_t>::value, at::vec::Vectorized<float>>::type
2795
+ inline convert_int8_to_float(const Vectorized<T> &src) {
2796
+ // Note: this function only convert inputs number of elements equal to at::vec::Vectorized<float>.size()
2797
+ // Only handle first 64 bits
2798
+ auto vec_int = src.to_vec_float_helper();
2799
+
2800
+ return convert_to_float(vec_int);
2801
+ }
2802
+
2803
+ template <typename T>
2804
+ typename std::enable_if<std::is_same<T, uint8_t>::value, at::vec::Vectorized<T>>::type
2805
+ inline convert_float_to_int8(const Vectorized<float> &src) {
2806
+ constexpr auto min_val = std::numeric_limits<T>::min();
2807
+ constexpr auto max_val = std::numeric_limits<T>::max();
2808
+
2809
+ auto vec_int = clamp(convert_to_int(src), Vectorized<int32_t>(min_val), Vectorized<int32_t>(max_val));
2810
+
2811
+ return vec_int.to_vec_uint8_helper();
2812
+ }
2813
+
2814
+ #undef DEFINE_CLAMP_MAXMIN_FUNCS
2815
+ #undef DEFINE_MAXMIN_FUNCS
2816
+ } // namespace
2817
+ } // namespace vec
2818
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512.h ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/intrinsics.h>
7
+
8
+ #include <ATen/cpu/vec/vec_base.h>
9
+ #include <ATen/cpu/vec/vec512/vec512_float.h>
10
+ #include <ATen/cpu/vec/vec512/vec512_bfloat16.h>
11
+ #include <ATen/cpu/vec/vec512/vec512_double.h>
12
+ #include <ATen/cpu/vec/vec512/vec512_int.h>
13
+ #include <ATen/cpu/vec/vec512/vec512_qint.h>
14
+ #include <ATen/cpu/vec/vec512/vec512_complex_float.h>
15
+ #include <ATen/cpu/vec/vec512/vec512_complex_double.h>
16
+
17
+ #include <algorithm>
18
+ #include <cstddef>
19
+ #include <cstdint>
20
+ #include <cstring>
21
+ #include <ostream>
22
+
23
+ namespace at {
24
+ namespace vec {
25
+
26
+ // See Note [CPU_CAPABILITY namespace]
27
+ inline namespace CPU_CAPABILITY {
28
+
29
+ inline std::ostream& operator<<(std::ostream& stream, const c10::qint32& val) {
30
+ stream << val.val_;
31
+ return stream;
32
+ }
33
+ inline std::ostream& operator<<(std::ostream& stream, const c10::qint8& val) {
34
+ stream << static_cast<int>(val.val_);
35
+ return stream;
36
+ }
37
+ inline std::ostream& operator<<(std::ostream& stream, const c10::quint8& val) {
38
+ stream << static_cast<unsigned int>(val.val_);
39
+ return stream;
40
+ }
41
+
42
+ template <typename T>
43
+ std::ostream& operator<<(std::ostream& stream, const Vectorized<T>& vec) {
44
+ T buf[Vectorized<T>::size()];
45
+ vec.store(buf);
46
+ stream << "vec[";
47
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
48
+ if (i != 0) {
49
+ stream << ", ";
50
+ }
51
+ stream << buf[i];
52
+ }
53
+ stream << "]";
54
+ return stream;
55
+ }
56
+
57
+
58
+ #if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER)
59
+
60
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CAST (AVX512) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
61
+
62
+ template<>
63
+ inline Vectorized<float> cast<float, double>(const Vectorized<double>& src) {
64
+ return _mm512_castpd_ps(src);
65
+ }
66
+
67
+ template<>
68
+ inline Vectorized<double> cast<double, float>(const Vectorized<float>& src) {
69
+ return _mm512_castps_pd(src);
70
+ }
71
+
72
+ template<>
73
+ inline Vectorized<float> cast<float, int32_t>(const Vectorized<int32_t>& src) {
74
+ return _mm512_castsi512_ps(src);
75
+ }
76
+
77
+ template<>
78
+ inline Vectorized<double> cast<double, int64_t>(const Vectorized<int64_t>& src) {
79
+ return _mm512_castsi512_pd(src);
80
+ }
81
+
82
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GATHER ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
83
+
84
+ template<int64_t scale = 1>
85
+ std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<double>>
86
+ inline gather(const double* base_addr, const Vectorized<int64_t>& vindex) {
87
+ return _mm512_i64gather_pd(vindex, base_addr, scale);
88
+ }
89
+
90
+ template<int64_t scale = 1>
91
+ std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<float>>
92
+ inline gather(const float* base_addr, const Vectorized<int32_t>& vindex) {
93
+ return _mm512_i32gather_ps(vindex, base_addr, scale);
94
+ }
95
+
96
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MASK GATHER ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
97
+
98
+ template<int64_t scale = 1>
99
+ std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<double>>
100
+ inline mask_gather(const Vectorized<double>& src, const double* base_addr,
101
+ const Vectorized<int64_t>& vindex, Vectorized<double>& mask) {
102
+ auto all_ones = _mm512_castsi512_pd(_mm512_set1_epi64(0xFFFFFFFFFFFFFFFF));
103
+ auto mask_ = _mm512_cmp_pd_mask(all_ones, mask.values, _CMP_EQ_OQ);
104
+ return _mm512_mask_i64gather_pd(src, mask_, vindex, base_addr, scale);
105
+ }
106
+
107
+ template<int64_t scale = 1>
108
+ std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<float>>
109
+ inline mask_gather(const Vectorized<float>& src, const float* base_addr,
110
+ const Vectorized<int32_t>& vindex, Vectorized<float>& mask) {
111
+ auto all_ones = _mm512_castsi512_ps(_mm512_set1_epi32(0xFFFFFFFF));
112
+ auto mask_ = _mm512_cmp_ps_mask(all_ones, mask.values, _CMP_EQ_OQ);
113
+ return _mm512_mask_i32gather_ps(src, mask_, vindex, base_addr, scale);
114
+ }
115
+
116
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CONVERT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
117
+
118
+ template<>
119
+ Vectorized<int64_t>
120
+ inline convert_to_int_of_same_size<double>(const Vectorized<double> &src) {
121
+ return _mm512_cvtpd_epi64(src);
122
+ }
123
+
124
+ template<>
125
+ Vectorized<int32_t>
126
+ inline convert_to_int_of_same_size<float>(const Vectorized<float> &src) {
127
+ return _mm512_cvttps_epi32(src);
128
+ }
129
+
130
+ template<>
131
+ Vectorized<double>
132
+ inline convert_to_fp_of_same_size<double>(const Vectorized<int64_t> &src) {
133
+ return _mm512_cvtepi64_pd(src);
134
+ }
135
+
136
+ template<>
137
+ Vectorized<float>
138
+ inline convert_to_fp_of_same_size<float>(const Vectorized<int32_t> &src) {
139
+ return _mm512_cvtepi32_ps(src);
140
+ }
141
+
142
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ INTERLEAVE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
143
+
144
+ template <>
145
+ std::pair<Vectorized<double>, Vectorized<double>>
146
+ inline interleave2<double>(const Vectorized<double>& a, const Vectorized<double>& b) {
147
+ // inputs:
148
+ // a = {a0, a1, a3, a3, a4, a5, a6, a7}
149
+ // b = {b0, b1, b2, b3, b4, b5, b6, b7}
150
+ // group cols crossing lanes:
151
+ // return {a0, b0, a1, b1, a2, b2, a3, b3}
152
+ // {a4, b4, a5, b5, a6, b6, a7, b7}
153
+ __m512i idx1 = _mm512_set_epi64(11, 3, 10, 2, 9, 1, 8, 0);
154
+ __m512i idx2 = _mm512_set_epi64(15, 7, 14, 6, 13, 5, 12, 4);
155
+ return std::make_pair(_mm512_mask_permutex2var_pd(a, 0xff, idx1, b),
156
+ _mm512_mask_permutex2var_pd(a, 0xff, idx2, b));
157
+ }
158
+
159
+ template <>
160
+ std::pair<Vectorized<float>, Vectorized<float>>
161
+ inline interleave2<float>(const Vectorized<float>& a, const Vectorized<float>& b) {
162
+ // inputs:
163
+ // a = {a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15}
164
+ // b = {b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15}
165
+ //
166
+ // return:
167
+ // {a0, b0, a1, b1, a2, b2, a3, b3, a4, b4, a5, b5, a6, b6, a7, b7}
168
+ // {a8, b8, a9, b9, a10, b10, a11, b11, a12, b12, a13, b13, a14, b14, a15, b15}
169
+ __m512i idx1 = _mm512_set_epi32(23, 7, 22, 6, 21, 5, 20, 4,
170
+ 19, 3, 18, 2, 17, 1, 16, 0);
171
+ __m512i idx2 = _mm512_set_epi32(31, 15, 30, 14, 29, 13, 28, 12,
172
+ 27, 11, 26, 10, 25, 9, 24, 8);
173
+ return std::make_pair(_mm512_mask_permutex2var_ps(a, 0xffff, idx1, b),
174
+ _mm512_mask_permutex2var_ps(a, 0xffff, idx2, b));
175
+ }
176
+
177
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DEINTERLEAVE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
178
+
179
+ template <>
180
+ std::pair<Vectorized<double>, Vectorized<double>>
181
+ inline deinterleave2<double>(const Vectorized<double>& a, const Vectorized<double>& b) {
182
+ // inputs:
183
+ // a = {a0, b0, a1, b1, a2, b2, a3, b3}
184
+ // b = {a4, b4, a5, b5, a6, b6, a7, b7}
185
+ // output:
186
+ // return {a0, a1, a2, a3, a4, a5, a6, a7}
187
+ // {b0, b1, b2, b3, b4, b5, b6, b7}
188
+ // The members of indices have been written in binary format for better understandability
189
+ __m512i idx1 = _mm512_set_epi64(14, 12, 10, 8, 6, 4, 2, 0);
190
+ __m512i idx2 = _mm512_set_epi64(15, 13, 11, 9, 7, 5, 3, 1);
191
+
192
+ return std::make_pair(_mm512_mask_permutex2var_pd(a, 0xff, idx1, b),
193
+ _mm512_mask_permutex2var_pd(a, 0xff, idx2, b));
194
+ }
195
+
196
+ template <>
197
+ std::pair<Vectorized<float>, Vectorized<float>>
198
+ inline deinterleave2<float>(const Vectorized<float>& a, const Vectorized<float>& b) {
199
+ // inputs:
200
+ // a = {a0, b0, a1, b1, a2, b2, a3, b3, a4, b4, a5, b5, a6, b6, a7, b7}
201
+ // b = {a8, b8, a9, b9, a10, b10, a11, b11, a12, b12, a13, b13, a14, b14, a15, b15}
202
+ // output:
203
+ // return {a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15}
204
+ // {b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15}
205
+ __m512i idx1 = _mm512_set_epi32(30, 28, 26, 24, 22, 20, 18, 16,
206
+ 14, 12, 10, 8, 6, 4, 2, 0);
207
+ __m512i idx2 = _mm512_set_epi32(31, 29, 27, 25, 23, 21, 19, 17,
208
+ 15, 13, 11, 9, 7, 5, 3, 1);
209
+
210
+ return std::make_pair(_mm512_mask_permutex2var_ps(a, 0xffff, idx1, b),
211
+ _mm512_mask_permutex2var_ps(a, 0xffff, idx2, b));
212
+ }
213
+
214
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FLIP ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
215
+
216
+ template<>
217
+ inline Vectorized<float> flip(const Vectorized<float> & v) {
218
+ const __m512i mask = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7,
219
+ 8, 9, 10, 11, 12, 13, 14, 15);
220
+ return _mm512_permutexvar_ps(mask, v);
221
+ }
222
+
223
+ template<>
224
+ inline Vectorized<double> flip(const Vectorized<double> & v) {
225
+ const __m512i mask = _mm512_set_epi64(0, 1, 2, 3, 4, 5, 6, 7);
226
+ return _mm512_permutexvar_pd(mask, v);
227
+ }
228
+
229
+ template<>
230
+ inline Vectorized<int64_t> flip(const Vectorized<int64_t> & v) {
231
+ const __m512i mask = _mm512_set_epi64(0, 1, 2, 3, 4, 5, 6, 7);
232
+ return _mm512_permutexvar_epi64(mask, v);
233
+ }
234
+
235
+ template<>
236
+ inline Vectorized<int32_t> flip(const Vectorized<int32_t> & v) {
237
+ const __m512i mask = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7,
238
+ 8, 9, 10, 11, 12, 13, 14, 15);
239
+ return _mm512_permutexvar_epi32(mask, v);
240
+ }
241
+
242
+ template<>
243
+ inline Vectorized<int16_t> flip(const Vectorized<int16_t> & v) {
244
+ const __m512i mask = _mm512_set_epi16(
245
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
246
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
247
+ );
248
+ return _mm512_permutexvar_epi16(mask, v);
249
+ }
250
+
251
+ inline __m512i flip8(const __m512i & v) {
252
+ const __m512i mask1 = _mm512_set_epi8(
253
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
254
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
255
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
256
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
257
+ );
258
+ const __m512i mask2 = _mm512_set_epi64(1, 0, 3, 2, 5, 4, 7, 6);
259
+ auto reversed_vec = _mm512_shuffle_epi8(v, mask1);
260
+ return _mm512_permutexvar_epi64(mask2, reversed_vec);
261
+ }
262
+
263
+ template<>
264
+ inline Vectorized<int8_t> flip(const Vectorized<int8_t> & v) {
265
+ return flip8(v);
266
+ }
267
+
268
+ template<>
269
+ inline Vectorized<uint8_t> flip(const Vectorized<uint8_t> & v) {
270
+ return flip8(v);
271
+ }
272
+
273
+ #endif // defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER)
274
+
275
+ }}}
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_bfloat16.h ADDED
@@ -0,0 +1,1644 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/intrinsics.h>
7
+ #include <ATen/cpu/vec/vec_base.h>
8
+ #include <c10/util/irange.h>
9
+
10
+ #if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER)
11
+ #include <sleef.h>
12
+ #endif
13
+
14
+ namespace at {
15
+ namespace vec {
16
+ // See Note [CPU_CAPABILITY namespace]
17
+ inline namespace CPU_CAPABILITY {
18
+
19
+ #if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER)
20
+
21
+ // bfloat16 conversion
22
+ static inline void cvtbf16_fp32(const __m256i& a, __m512& o) {
23
+ o = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_cvtepu16_epi32(a), 16));
24
+ }
25
+
26
+ static inline void cvtbf16_fp32(const __m512i& a, __m512& o1, __m512& o2) {
27
+ __m256i lo = _mm512_extracti32x8_epi32(a, 0);
28
+ __m256i hi = _mm512_extracti32x8_epi32(a, 1);
29
+ cvtbf16_fp32(lo, o1);
30
+ cvtbf16_fp32(hi, o2);
31
+ }
32
+
33
+ static inline __m256i cvtfp32_bf16(const __m512& src) {
34
+ __m512i value = _mm512_castps_si512(src);
35
+ __m512i nan = _mm512_set1_epi32(0xffff);
36
+ auto mask_value = _mm512_cmp_ps_mask(src, src, _CMP_ORD_Q);
37
+ __m512i ones = _mm512_set1_epi32(0x1);
38
+ __m512i vec_bias = _mm512_set1_epi32(0x7fff);
39
+ // uint32_t lsb = (input >> 16) & 1;
40
+ auto t_value = _mm512_and_si512(_mm512_srli_epi32(value, 16), ones);
41
+ // uint32_t rounding_bias = 0x7fff + lsb;
42
+ t_value = _mm512_add_epi32(t_value, vec_bias);
43
+ // input += rounding_bias;
44
+ t_value = _mm512_add_epi32(t_value, value);
45
+ // input = input >> 16;
46
+ t_value = _mm512_srli_epi32(t_value, 16);
47
+ // Check NaN before converting back to bf16
48
+ t_value = _mm512_mask_blend_epi32(mask_value, nan, t_value);
49
+ return _mm512_cvtusepi32_epi16(t_value);
50
+ }
51
+
52
+ static inline __m512i cvtfp32_bf16(const __m512& a, const __m512& b) {
53
+ __m512i lo = _mm512_castps_si512(a);
54
+ __m512i hi = _mm512_castps_si512(b);
55
+ __m512i nan = _mm512_set1_epi32(0xffff);
56
+ auto mask_lo = _mm512_cmp_ps_mask(a, a, _CMP_ORD_Q);
57
+ auto mask_hi = _mm512_cmp_ps_mask(b, b, _CMP_ORD_Q);
58
+ __m512i ones = _mm512_set1_epi32(0x1);
59
+ __m512i vec_bias = _mm512_set1_epi32(0x7fff);
60
+ // uint32_t lsb = (input >> 16) & 1;
61
+ auto t_lo = _mm512_and_si512(_mm512_srli_epi32(lo, 16), ones);
62
+ auto t_hi = _mm512_and_si512(_mm512_srli_epi32(hi, 16), ones);
63
+ // uint32_t rounding_bias = 0x7fff + lsb;
64
+ t_lo = _mm512_add_epi32(t_lo, vec_bias);
65
+ t_hi = _mm512_add_epi32(t_hi, vec_bias);
66
+ // input += rounding_bias;
67
+ t_lo = _mm512_add_epi32(t_lo, lo);
68
+ t_hi = _mm512_add_epi32(t_hi, hi);
69
+ // input = input >> 16;
70
+ t_lo = _mm512_srli_epi32(t_lo, 16);
71
+ t_hi = _mm512_srli_epi32(t_hi, 16);
72
+ // Check NaN before converting back to bf16
73
+ t_lo = _mm512_mask_blend_epi32(mask_lo, nan, t_lo);
74
+ t_hi = _mm512_mask_blend_epi32(mask_hi, nan, t_hi);
75
+
76
+ t_lo = _mm512_packus_epi32(t_lo, t_hi); // t_hi[4-7] t_lo[4-7] t_hi[0-4] t_lo[0-4]
77
+ __m512i idx = _mm512_set_epi64(7, 5, 3, 1, 6, 4, 2, 0);
78
+ return _mm512_permutexvar_epi64(idx, t_lo);
79
+ }
80
+
81
+ static inline __m512i merge_compare_result(const __m512& a, const __m512& b) {
82
+ __m512i lo = _mm512_castps_si512(a);
83
+ __m512i hi = _mm512_castps_si512(b);
84
+ lo = _mm512_srli_epi32(lo, 16);
85
+ hi = _mm512_srli_epi32(hi, 16);
86
+ auto out = _mm512_packus_epi32(lo, hi);
87
+ __m512i idx = _mm512_set_epi64(7, 5, 3, 1, 6, 4, 2, 0);
88
+ return _mm512_permutexvar_epi64(idx, out);
89
+ }
90
+
91
+ // float16 conversion
92
+ static inline void cvtfp16_fp32(const __m256i& a, __m512& o) {
93
+ o = _mm512_cvtph_ps(a);
94
+ }
95
+
96
+ static inline void cvtfp16_fp32(const __m512i& a, __m512& o1, __m512& o2) {
97
+ __m256i lo = _mm512_extracti32x8_epi32(a, 0);
98
+ __m256i hi = _mm512_extracti32x8_epi32(a, 1);
99
+ cvtfp16_fp32(lo, o1);
100
+ cvtfp16_fp32(hi, o2);
101
+ }
102
+
103
+ static inline __m512i cvtfp32_fp16(const __m512& a, const __m512& b) {
104
+ __m256i lo = _mm512_cvtps_ph(
105
+ a, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
106
+ __m256i hi = _mm512_cvtps_ph(
107
+ b, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
108
+ __m512 t_lo = _mm512_castsi512_ps(_mm512_castsi256_si512(lo));
109
+ __m256 t_hi = _mm256_castsi256_ps(hi);
110
+ return _mm512_castps_si512(_mm512_insertf32x8(t_lo, t_hi, 1));
111
+ }
112
+
113
+ // dtype conversion between float16/bfloat16 and float32
114
+ template <typename T, typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
115
+ inline void cvt_to_fp32(const __m256i& a, __m512& o);
116
+ template <> inline void cvt_to_fp32<BFloat16>(const __m256i& a, __m512& o) {
117
+ cvtbf16_fp32(a, o);
118
+ }
119
+ template <> inline void cvt_to_fp32<Half>(const __m256i& a, __m512& o) {
120
+ cvtfp16_fp32(a, o);
121
+ }
122
+
123
+ template <typename T, typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
124
+ inline void cvt_to_fp32(const __m512i& a, __m512& o1, __m512& o2);
125
+ template <> inline void cvt_to_fp32<BFloat16>(const __m512i& a, __m512& o1, __m512& o2) {
126
+ cvtbf16_fp32(a, o1, o2);
127
+ }
128
+ template <> inline void cvt_to_fp32<Half>(const __m512i& a, __m512& o1, __m512& o2) {
129
+ cvtfp16_fp32(a, o1, o2);
130
+ }
131
+
132
+ template <typename T, bool is_compare_op = false,
133
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
134
+ inline __m512i cvt_from_fp32(const __m512& a, const __m512& b);
135
+ template <> inline __m512i cvt_from_fp32<BFloat16, false>(const __m512& a, const __m512& b) {
136
+ return cvtfp32_bf16(a, b);
137
+ }
138
+ template <> inline __m512i cvt_from_fp32<BFloat16, true>(const __m512& a, const __m512& b) {
139
+ return merge_compare_result(a, b);
140
+ }
141
+ template <> inline __m512i cvt_from_fp32<Half, false>(const __m512& a, const __m512& b) {
142
+ return cvtfp32_fp16(a, b);
143
+ }
144
+ template <> inline __m512i cvt_from_fp32<Half, true>(const __m512& a, const __m512& b) {
145
+ return cvtfp32_fp16(a, b);
146
+ }
147
+
148
+ template <typename T>
149
+ class Vectorized16 {
150
+ static_assert(
151
+ is_reduced_floating_point_v<T>,
152
+ "Support only float16 and bfloat16.");
153
+ private:
154
+ __m512i values;
155
+ public:
156
+ using value_type = uint16_t;
157
+ using size_type = int;
158
+ static constexpr size_type size() {
159
+ return 32;
160
+ }
161
+ Vectorized16() {}
162
+ Vectorized16(__m512i v) : values(v) {}
163
+ Vectorized16(T val) {
164
+ value_type uw = val.x;
165
+ values = _mm512_set1_epi16(uw);
166
+ }
167
+ Vectorized16(T val1, T val2, T val3, T val4,
168
+ T val5, T val6, T val7, T val8,
169
+ T val9, T val10, T val11, T val12,
170
+ T val13, T val14, T val15, T val16,
171
+ T val17, T val18, T val19, T val20,
172
+ T val21, T val22, T val23, T val24,
173
+ T val25, T val26, T val27, T val28,
174
+ T val29, T val30, T val31, T val32) {
175
+ values = _mm512_set_epi16(
176
+ val32.x, val31.x, val30.x, val29.x, val28.x, val27.x, val26.x, val25.x,
177
+ val24.x, val23.x, val22.x, val21.x, val20.x, val19.x, val18.x, val17.x,
178
+ val16.x, val15.x, val14.x, val13.x, val12.x, val11.x, val10.x, val9.x,
179
+ val8.x, val7.x, val6.x, val5.x, val4.x, val3.x, val2.x, val1.x);
180
+ }
181
+ operator __m512i() const {
182
+ return values;
183
+ }
184
+ T& operator[](int idx) = delete;
185
+ const T& operator[](int idx) const = delete;
186
+ int zero_mask() const {
187
+ // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit
188
+ return _mm512_cmpeq_epi16_mask(values, _mm512_set1_epi16(0));
189
+ }
190
+ static Vectorized<T> loadu(const void* ptr, int16_t count = size()) {
191
+ if (count == size())
192
+ return _mm512_loadu_si512(reinterpret_cast<const __m512i*>(ptr));
193
+
194
+ __mmask32 mask = (1ULL << count) - 1;
195
+ return _mm512_maskz_loadu_epi16(mask, ptr);
196
+ }
197
+ void store(void* ptr, int count = size()) const {
198
+ if (count == size()) {
199
+ _mm512_storeu_si512(reinterpret_cast<__m512i*>(ptr), values);
200
+ } else if (count > 0) {
201
+ __mmask32 mask = (1ULL << count) - 1;
202
+ _mm512_mask_storeu_epi16(ptr, mask, values);
203
+ }
204
+ }
205
+ template <int64_t mask>
206
+ static Vectorized<T> blend(const Vectorized<T>& a, const Vectorized<T>& b) {
207
+ __at_align__ int16_t tmp_values[size()];
208
+ a.store(tmp_values);
209
+ if (mask & 0x01)
210
+ tmp_values[0] = b.values[31];
211
+ if (mask & 0x02)
212
+ tmp_values[1] = b.values[30];
213
+ if (mask & 0x04)
214
+ tmp_values[2] = b.values[29];
215
+ if (mask & 0x08)
216
+ tmp_values[3] = b.values[28];
217
+ if (mask & 0x10)
218
+ tmp_values[4] = b.values[27];
219
+ if (mask & 0x20)
220
+ tmp_values[5] = b.values[26];
221
+ if (mask & 0x40)
222
+ tmp_values[6] = b.values[25];
223
+ if (mask & 0x80)
224
+ tmp_values[7] = b.values[24];
225
+ if (mask & 0x100)
226
+ tmp_values[8] = b.values[23];
227
+ if (mask & 0x200)
228
+ tmp_values[9] = b.values[22];
229
+ if (mask & 0x400)
230
+ tmp_values[10] = b.values[21];
231
+ if (mask & 0x800)
232
+ tmp_values[11] = b.values[20];
233
+ if (mask & 0x1000)
234
+ tmp_values[12] = b.values[19];
235
+ if (mask & 0x2000)
236
+ tmp_values[13] = b.values[18];
237
+ if (mask & 0x4000)
238
+ tmp_values[14] = b.values[17];
239
+ if (mask & 0x8000)
240
+ tmp_values[15] = b.values[16];
241
+ if (mask & 0x10000)
242
+ tmp_values[16] = b.values[15];
243
+ if (mask & 0x20000)
244
+ tmp_values[17] = b.values[14];
245
+ if (mask & 0x40000)
246
+ tmp_values[18] = b.values[13];
247
+ if (mask & 0x80000)
248
+ tmp_values[19] = b.values[12];
249
+ if (mask & 0x100000)
250
+ tmp_values[20] = b.values[11];
251
+ if (mask & 0x200000)
252
+ tmp_values[21] = b.values[10];
253
+ if (mask & 0x400000)
254
+ tmp_values[22] = b.values[9];
255
+ if (mask & 0x800000)
256
+ tmp_values[23] = b.values[8];
257
+ if (mask & 0x1000000)
258
+ tmp_values[24] = b.values[7];
259
+ if (mask & 0x2000000)
260
+ tmp_values[25] = b.values[6];
261
+ if (mask & 0x4000000)
262
+ tmp_values[26] = b.values[5];
263
+ if (mask & 0x8000000)
264
+ tmp_values[27] = b.values[4];
265
+ if (mask & 0x10000000)
266
+ tmp_values[28] = b.values[3];
267
+ if (mask & 0x20000000)
268
+ tmp_values[29] = b.values[2];
269
+ if (mask & 0x40000000)
270
+ tmp_values[30] = b.values[1];
271
+ if (mask & 0x80000000)
272
+ tmp_values[31] = b.values[0];
273
+ return loadu(tmp_values);
274
+ }
275
+ static Vectorized<T> blendv(const Vectorized<T>& a,
276
+ const Vectorized<T>& b, const Vectorized<T>& mask) {
277
+ auto all_ones = _mm512_set1_epi16(0xFFFF);
278
+ auto mask_ = _mm512_cmp_epi16_mask(mask, all_ones, _MM_CMPINT_EQ);
279
+ return _mm512_mask_blend_epi16(mask_, a.values, b.values);
280
+ }
281
+ template<typename step_t>
282
+ static Vectorized<T> arange(T base = 0.f, step_t step = static_cast<step_t>(1)) {
283
+ return Vectorized<T>(
284
+ base, base + step, base + 2 * step, base + 3 * step,
285
+ base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step,
286
+ base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step,
287
+ base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step,
288
+ base + 16 * step, base + 17 * step, base + 18 * step, base + 19 * step,
289
+ base + 20 * step, base + 21 * step, base + 22 * step, base + 23 * step,
290
+ base + 24 * step, base + 25 * step, base + 26 * step, base + 27 * step,
291
+ base + 28 * step, base + 29 * step, base + 30 * step, base + 31 * step);
292
+ }
293
+ static Vectorized<T> set(const Vectorized<T>& a,
294
+ const Vectorized<T>& b, int64_t count = size()) {
295
+ switch (count) {
296
+ case 0:
297
+ return a;
298
+ case 1:
299
+ return blend<1>(a, b);
300
+ case 2:
301
+ return blend<3>(a, b);
302
+ case 3:
303
+ return blend<7>(a, b);
304
+ case 4:
305
+ return blend<15>(a, b);
306
+ case 5:
307
+ return blend<31>(a, b);
308
+ case 6:
309
+ return blend<63>(a, b);
310
+ case 7:
311
+ return blend<127>(a, b);
312
+ case 8:
313
+ return blend<255>(a, b);
314
+ case 9:
315
+ return blend<511>(a, b);
316
+ case 10:
317
+ return blend<1023>(a, b);
318
+ case 11:
319
+ return blend<2047>(a, b);
320
+ case 12:
321
+ return blend<4095>(a, b);
322
+ case 13:
323
+ return blend<8191>(a, b);
324
+ case 14:
325
+ return blend<16383>(a, b);
326
+ case 15:
327
+ return blend<32767>(a, b);
328
+ case 16:
329
+ return blend<65535>(a, b);
330
+ case 17:
331
+ return blend<131071>(a, b);
332
+ case 18:
333
+ return blend<262143>(a, b);
334
+ case 19:
335
+ return blend<524287>(a, b);
336
+ case 20:
337
+ return blend<1048575>(a, b);
338
+ case 21:
339
+ return blend<2097151>(a, b);
340
+ case 22:
341
+ return blend<4194303>(a, b);
342
+ case 23:
343
+ return blend<8388607>(a, b);
344
+ case 24:
345
+ return blend<16777215>(a, b);
346
+ case 25:
347
+ return blend<33554431>(a, b);
348
+ case 26:
349
+ return blend<67108863>(a, b);
350
+ case 27:
351
+ return blend<134217727>(a, b);
352
+ case 28:
353
+ return blend<268435455>(a, b);
354
+ case 29:
355
+ return blend<536870911>(a, b);
356
+ case 30:
357
+ return blend<1073741823>(a, b);
358
+ case 31:
359
+ return blend<2147483647>(a, b);
360
+ }
361
+ return b;
362
+ }
363
+ #pragma clang diagnostic push
364
+ #pragma clang diagnostic ignored "-Wignored-qualifiers"
365
+ Vectorized<T> map(const __m512 (*const vop)(__m512)) const {
366
+ __m512 lo, hi;
367
+ cvt_to_fp32<T>(values, lo, hi);
368
+ const auto o1 = vop(lo);
369
+ const auto o2 = vop(hi);
370
+ return cvt_from_fp32<T>(o1, o2);
371
+ }
372
+ Vectorized<T> isnan() const {
373
+ __m512 lo, hi;
374
+ cvt_to_fp32<T>(values, lo, hi);
375
+ __mmask16 lo_mask, hi_mask;
376
+ __m512 zero = _mm512_set1_ps(0.0);
377
+ __m512i zeroi = _mm512_castps_si512(zero);
378
+ lo_mask = _mm512_cmp_ps_mask(lo, zero, _CMP_UNORD_Q);
379
+ lo = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zeroi, lo_mask, 0xFFFF'FFFF));
380
+ hi_mask = _mm512_cmp_ps_mask(hi, zero, _CMP_UNORD_Q);
381
+ hi = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zeroi, hi_mask, 0xFFFF'FFFF));
382
+ return merge_compare_result(lo, hi);
383
+ }
384
+ #pragma clang diagnostic pop
385
+ Vectorized<T> abs() const {
386
+ return _mm512_andnot_si512(_mm512_set1_epi16(0x8000), values);
387
+ }
388
+ Vectorized<T> angle() const {
389
+ __m512 lo, hi;
390
+ cvt_to_fp32<T>(values, lo, hi);
391
+ auto angle_lambda = [](__m512 values) {
392
+ const auto zero_vec = _mm512_set1_ps(0.f);
393
+ const auto nan_vec = _mm512_set1_ps(NAN);
394
+ const auto not_nan_mask = _mm512_cmp_ps_mask(values, values, _CMP_EQ_OQ);
395
+ const auto non_nan_mask_vec = _mm512_mask_set1_epi32(_mm512_castps_si512(zero_vec),
396
+ not_nan_mask, 0xFFFFFFFF);
397
+ const auto nan_mask = _mm512_cmp_ps_mask(_mm512_castsi512_ps(non_nan_mask_vec),
398
+ zero_vec, _CMP_EQ_OQ);
399
+ const auto pi = _mm512_set1_ps(c10::pi<float>);
400
+
401
+ const auto neg_mask = _mm512_cmp_ps_mask(values, zero_vec, _CMP_LT_OQ);
402
+ auto angle = _mm512_mask_blend_ps(neg_mask, zero_vec, pi);
403
+ angle = _mm512_mask_blend_ps(nan_mask, angle, nan_vec);
404
+ return angle;
405
+ };
406
+ auto o1 = angle_lambda(lo);
407
+ auto o2 = angle_lambda(hi);
408
+ return cvt_from_fp32<T>(o1, o2);
409
+ }
410
+ Vectorized<T> real() const {
411
+ return *this;
412
+ }
413
+ Vectorized<T> imag() const {
414
+ return _mm512_set1_epi16(0);
415
+ }
416
+ Vectorized<T> conj() const {
417
+ return *this;
418
+ }
419
+ Vectorized<T> acos() const {
420
+ return map(Sleef_acosf16_u10);
421
+ }
422
+ Vectorized<T> acosh() const {
423
+ return map(Sleef_acoshf16_u10);
424
+ }
425
+ Vectorized<T> asin() const {
426
+ return map(Sleef_asinf16_u10);
427
+ }
428
+ Vectorized<T> atan() const {
429
+ return map(Sleef_atanf16_u10);
430
+ }
431
+ Vectorized<T> atanh() const {
432
+ return map(Sleef_atanhf16_u10);
433
+ }
434
+ Vectorized<T> atan2(const Vectorized<T> &b) const {
435
+ __m512 lo, hi;
436
+ __m512 b1, b2;
437
+ cvt_to_fp32<T>(values, lo, hi);
438
+ cvt_to_fp32<T>(b.values, b1, b2);
439
+ auto o1 = Sleef_atan2f16_u10(lo, b1);
440
+ auto o2 = Sleef_atan2f16_u10(hi, b2);
441
+ return cvt_from_fp32<T>(o1, o2);
442
+ }
443
+ Vectorized<T> copysign(const Vectorized<T> &sign) const {
444
+ // copy sign bit (0x8000) from sign and remaining bits from values
445
+ __m512i mask_value = _mm512_set1_epi32(~0x80008000);
446
+ __m512i mask_signbit = _mm512_set1_epi32(0x80008000);
447
+ return Vectorized<T>(
448
+ _mm512_or_si512(
449
+ _mm512_and_si512(values, mask_value),
450
+ _mm512_and_si512(sign, mask_signbit)));
451
+ }
452
+ Vectorized<T> erf() const {
453
+ return map(Sleef_erff16_u10);
454
+ }
455
+ Vectorized<T> erfc() const {
456
+ return map(Sleef_erfcf16_u15);
457
+ }
458
+ Vectorized<T> erfinv() const {
459
+ __m512 lo, hi;
460
+ cvt_to_fp32<T>(values, lo, hi);
461
+ __at_align__ float tmp1[size() / 2], tmp2[size() / 2];
462
+ _mm512_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
463
+ _mm512_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
464
+ for (int64_t i = 0; i < size() / 2; i++) {
465
+ tmp1[i] = calc_erfinv(tmp1[i]);
466
+ tmp2[i] = calc_erfinv(tmp2[i]);
467
+ }
468
+ auto o1 = _mm512_loadu_ps(tmp1);
469
+ auto o2 = _mm512_loadu_ps(tmp2);
470
+ return cvt_from_fp32<T>(o1, o2);
471
+ }
472
+ Vectorized<T> exp() const {
473
+ return map(Sleef_expf16_u10);
474
+ }
475
+ Vectorized<T> exp2() const {
476
+ return map(Sleef_exp2f16_u10);
477
+ }
478
+ Vectorized<T> expm1() const {
479
+ return map(Sleef_expm1f16_u10);
480
+ }
481
+ Vectorized<T> exp_u20() const {
482
+ return exp();
483
+ }
484
+ Vectorized<T> fmod(const Vectorized<T> & q) const {
485
+ __m512 x_lo, x_hi;
486
+ cvt_to_fp32<T>(values, x_lo, x_hi);
487
+ __m512 q_lo, q_hi;
488
+ cvtbf16_fp32(q.values, q_lo, q_hi);
489
+ auto o1 = Sleef_fmodf16(x_lo, q_lo);
490
+ auto o2 = Sleef_fmodf16(x_hi, q_hi);
491
+ return cvt_from_fp32<T>(o1, o2);
492
+ }
493
+ Vectorized<T> hypot(const Vectorized<T> &b) const {
494
+ __m512 lo, hi;
495
+ __m512 b1, b2;
496
+ cvt_to_fp32<T>(values, lo, hi);
497
+ cvt_to_fp32<T>(b.values, b1, b2);
498
+ auto o1 = Sleef_hypotf16_u05(lo, b1);
499
+ auto o2 = Sleef_hypotf16_u05(hi, b2);
500
+ return cvt_from_fp32<T>(o1, o2);
501
+ }
502
+ Vectorized<T> i0() const {
503
+ __m512 lo, hi;
504
+ cvt_to_fp32<T>(values, lo, hi);
505
+ __at_align__ float tmp1[size() / 2], tmp2[size() / 2];
506
+ _mm512_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
507
+ _mm512_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
508
+ for (int64_t i = 0; i < size() / 2; i++) {
509
+ tmp1[i] = calc_i0(tmp1[i]);
510
+ tmp2[i] = calc_i0(tmp2[i]);
511
+ }
512
+ auto o1 = _mm512_loadu_ps(tmp1);
513
+ auto o2 = _mm512_loadu_ps(tmp2);
514
+ return cvt_from_fp32<T>(o1, o2);
515
+ }
516
+ Vectorized<T> i0e() const {
517
+ __m512 lo, hi;
518
+ cvt_to_fp32<T>(values, lo, hi);
519
+ constexpr auto sz = size();
520
+ __at_align__ float tmp1[sz / 2], tmp2[sz / 2];
521
+ _mm512_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
522
+ _mm512_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
523
+
524
+ for (auto i = decltype(sz){0}; i < sz / 2; i++) {
525
+ tmp1[i] = calc_i0e(tmp1[i]);
526
+ tmp2[i] = calc_i0e(tmp2[i]);
527
+ }
528
+ const auto o1 = _mm512_loadu_ps(tmp1);
529
+ const auto o2 = _mm512_loadu_ps(tmp2);
530
+ return cvt_from_fp32<T>(o1, o2);
531
+ }
532
+ Vectorized<T> digamma() const {
533
+ __m512 lo, hi;
534
+ cvt_to_fp32<T>(values, lo, hi);
535
+ constexpr auto sz = size();
536
+ __at_align__ float tmp1[sz / 2], tmp2[sz / 2];
537
+ _mm512_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
538
+ _mm512_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
539
+
540
+ for (auto i = decltype(sz){0}; i < sz / 2; i++) {
541
+ tmp1[i] = calc_digamma(tmp1[i]);
542
+ tmp2[i] = calc_digamma(tmp2[i]);
543
+ }
544
+ const auto o1 = _mm512_loadu_ps(tmp1);
545
+ const auto o2 = _mm512_loadu_ps(tmp2);
546
+ return cvt_from_fp32<T>(o1, o2);
547
+ }
548
+ Vectorized<T> igamma(const Vectorized<T> &x) const {
549
+ __m512 lo, hi;
550
+ __m512 xlo, xhi;
551
+ cvt_to_fp32<T>(values, lo, hi);
552
+ cvt_to_fp32<T>(x.values, xlo, xhi);
553
+ __at_align__ float tmp1[size() / 2], tmp2[size() / 2];
554
+ _mm512_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
555
+ _mm512_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
556
+ __at_align__ float tmpx1[size() / 2], tmpx2[size() / 2];
557
+ _mm512_storeu_ps(reinterpret_cast<float*>(tmpx1), xlo);
558
+ _mm512_storeu_ps(reinterpret_cast<float*>(tmpx2), xhi);
559
+ for (int64_t i = 0; i < size() / 2; ++i) {
560
+ tmp1[i] = calc_igamma(tmp1[i], tmpx1[i]);
561
+ tmp2[i] = calc_igamma(tmp2[i], tmpx2[i]);
562
+ }
563
+ auto o1 = _mm512_loadu_ps(tmp1);
564
+ auto o2 = _mm512_loadu_ps(tmp2);
565
+ return cvt_from_fp32<T>(o1, o2);
566
+ }
567
+
568
+ Vectorized<T> igammac(const Vectorized<T> &x) const {
569
+ __m512 lo, hi;
570
+ __m512 xlo, xhi;
571
+ cvt_to_fp32<T>(values, lo, hi);
572
+ cvt_to_fp32<T>(x.values, xlo, xhi);
573
+ __at_align__ float tmp1[size() / 2], tmp2[size() / 2];
574
+ _mm512_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
575
+ _mm512_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
576
+ __at_align__ float tmpx1[size() / 2], tmpx2[size() / 2];
577
+ _mm512_storeu_ps(reinterpret_cast<float*>(tmpx1), xlo);
578
+ _mm512_storeu_ps(reinterpret_cast<float*>(tmpx2), xhi);
579
+ for (int64_t i = 0; i < size() / 2; ++i) {
580
+ tmp1[i] = calc_igammac(tmp1[i], tmpx1[i]);
581
+ tmp2[i] = calc_igammac(tmp2[i], tmpx2[i]);
582
+ }
583
+ auto o1 = _mm512_loadu_ps(tmp1);
584
+ auto o2 = _mm512_loadu_ps(tmp2);
585
+ return cvt_from_fp32<T>(o1, o2);
586
+ }
587
+ Vectorized<T> log() const {
588
+ return map(Sleef_logf16_u10);
589
+ }
590
+ Vectorized<T> log2() const {
591
+ return map(Sleef_log2f16_u10);
592
+ }
593
+ Vectorized<T> log10() const {
594
+ return map(Sleef_log10f16_u10);
595
+ }
596
+ Vectorized<T> log1p() const {
597
+ return map(Sleef_log1pf16_u10);
598
+ }
599
+ Vectorized<T> sin() const {
600
+ return map(Sleef_sinf16_u10);
601
+ }
602
+ Vectorized<T> sinh() const {
603
+ return map(Sleef_sinhf16_u10);
604
+ }
605
+ Vectorized<T> cos() const {
606
+ return map(Sleef_cosf16_u10);
607
+ }
608
+ Vectorized<T> cosh() const {
609
+ return map(Sleef_coshf16_u10);
610
+ }
611
+ Vectorized<T> ceil() const {
612
+ __m512 lo, hi;
613
+ cvt_to_fp32<T>(values, lo, hi);
614
+ auto o1 = _mm512_ceil_ps(lo);
615
+ auto o2 = _mm512_ceil_ps(hi);
616
+ return cvt_from_fp32<T>(o1, o2);
617
+ }
618
+ Vectorized<T> floor() const {
619
+ __m512 lo, hi;
620
+ cvt_to_fp32<T>(values, lo, hi);
621
+ auto o1 = _mm512_floor_ps(lo);
622
+ auto o2 = _mm512_floor_ps(hi);
623
+ return cvt_from_fp32<T>(o1, o2);
624
+ }
625
+ Vectorized<T> neg() const {
626
+ return _mm512_xor_si512(values, _mm512_set1_epi16(0x8000));
627
+ }
628
+ Vectorized<T> round() const {
629
+ __m512 lo, hi;
630
+ cvt_to_fp32<T>(values, lo, hi);
631
+ auto o1 = _mm512_roundscale_ps(lo, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
632
+ auto o2 = _mm512_roundscale_ps(hi, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
633
+ return cvt_from_fp32<T>(o1, o2);
634
+ }
635
+ Vectorized<T> tan() const {
636
+ return map(Sleef_tanf16_u10);
637
+ }
638
+ Vectorized<T> tanh() const {
639
+ return map(Sleef_tanhf16_u10);
640
+ }
641
+ Vectorized<T> trunc() const {
642
+ __m512 lo, hi;
643
+ cvt_to_fp32<T>(values, lo, hi);
644
+ auto o1 = _mm512_roundscale_ps(lo, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
645
+ auto o2 = _mm512_roundscale_ps(hi, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
646
+ return cvt_from_fp32<T>(o1, o2);
647
+ }
648
+ Vectorized<T> lgamma() const {
649
+ return map(Sleef_lgammaf16_u10);
650
+ }
651
+ Vectorized<T> sqrt() const {
652
+ __m512 lo, hi;
653
+ cvt_to_fp32<T>(values, lo, hi);
654
+ auto o1 = _mm512_sqrt_ps(lo);
655
+ auto o2 = _mm512_sqrt_ps(hi);
656
+ return cvt_from_fp32<T>(o1, o2);
657
+ }
658
+ Vectorized<T> reciprocal() const {
659
+ __m512 lo, hi;
660
+ cvt_to_fp32<T>(values, lo, hi);
661
+ auto ones = _mm512_set1_ps(1);
662
+ auto o1 = _mm512_div_ps(ones, lo);
663
+ auto o2 = _mm512_div_ps(ones, hi);
664
+ return cvt_from_fp32<T>(o1, o2);
665
+ }
666
+ Vectorized<T> rsqrt() const {
667
+ __m512 lo, hi;
668
+ cvt_to_fp32<T>(values, lo, hi);
669
+ auto ones = _mm512_set1_ps(1);
670
+ auto o1 = _mm512_div_ps(ones, _mm512_sqrt_ps(lo));
671
+ auto o2 = _mm512_div_ps(ones, _mm512_sqrt_ps(hi));
672
+ return cvt_from_fp32<T>(o1, o2);
673
+ }
674
+ Vectorized<T> pow(const Vectorized<T> &b) const {
675
+ __m512 lo, hi;
676
+ __m512 b1, b2;
677
+ cvt_to_fp32<T>(values, lo, hi);
678
+ cvt_to_fp32<T>(b.values, b1, b2);
679
+ auto o1 = Sleef_powf16_u10(lo, b1);
680
+ auto o2 = Sleef_powf16_u10(hi, b2);
681
+ return cvt_from_fp32<T>(o1, o2);
682
+ }
683
+ private:
684
+ template<typename Op>
685
+ Vectorized<T> inline binary_compare(const Vectorized<T>& b, Op op) const {
686
+ __m512 a_lo, a_hi;
687
+ __m512 b_lo, b_hi;
688
+ cvt_to_fp32<T>(values, a_lo, a_hi);
689
+ cvt_to_fp32<T>(b.values, b_lo, b_hi);
690
+ auto o1 = op(a_lo, b_lo);
691
+ auto o2 = op(a_hi, b_hi);
692
+ return cvt_from_fp32<T, /*is_compare_op*/true>(o1, o2);
693
+ }
694
+
695
+ public:
696
+ Vectorized<T> inline operator>(const Vectorized<T>& other) const {
697
+ return binary_compare(other, [](__m512 x, __m512 y) {
698
+ auto zero_vec = _mm512_set1_epi32(0);
699
+ auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_GT_OQ);
700
+ return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF));
701
+ });
702
+ }
703
+ Vectorized<T> inline operator<(const Vectorized<T>& other) const {
704
+ return binary_compare(other, [](__m512 x, __m512 y) {
705
+ auto zero_vec = _mm512_set1_epi32(0);
706
+ auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_LT_OQ);
707
+ return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF));
708
+ });
709
+ }
710
+ Vectorized<T> inline operator>=(const Vectorized<T>& other) const {
711
+ return binary_compare(other, [](__m512 x, __m512 y) {
712
+ auto zero_vec = _mm512_set1_epi32(0);
713
+ auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_GE_OQ);
714
+ return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF));
715
+ });
716
+ }
717
+ Vectorized<T> inline operator<=(const Vectorized<T>& other) const {
718
+ return binary_compare(other, [](__m512 x, __m512 y) {
719
+ auto zero_vec = _mm512_set1_epi32(0);
720
+ auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_LE_OQ);
721
+ return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF));
722
+ });
723
+ }
724
+ Vectorized<T> inline operator==(const Vectorized<T>& other) const {
725
+ return binary_compare(other, [](__m512 x, __m512 y) {
726
+ auto zero_vec = _mm512_set1_epi32(0);
727
+ auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_EQ_OQ);
728
+ return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF));
729
+ });
730
+ }
731
+ Vectorized<T> inline operator!=(const Vectorized<T>& other) const {
732
+ return binary_compare(other, [](__m512 x, __m512 y) {
733
+ auto zero_vec = _mm512_set1_epi32(0);
734
+ auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_NEQ_UQ);
735
+ return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF));
736
+ });
737
+ }
738
+ };
739
+
740
+ template<typename T, typename Op>
741
+ static inline Vectorized<T> binary_op_as_fp32(const Vectorized<T>& a, const Vectorized<T>& b, Op op) {
742
+ __m512 a_lo, a_hi;
743
+ __m512 b_lo, b_hi;
744
+ cvt_to_fp32<T>(__m512i(a), a_lo, a_hi);
745
+ cvt_to_fp32<T>(__m512i(b), b_lo, b_hi);
746
+ auto o1 = op(a_lo, b_lo);
747
+ auto o2 = op(a_hi, b_hi);
748
+ return cvt_from_fp32<T>(o1, o2);
749
+ }
750
+
751
+ template <>
752
+ class Vectorized<BFloat16>: public Vectorized16<BFloat16> {
753
+ public:
754
+ using Vectorized16::Vectorized16;
755
+
756
+ Vectorized<BFloat16> frac() const;
757
+
758
+ Vectorized<BFloat16> eq(const Vectorized<BFloat16>& other) const;
759
+ Vectorized<BFloat16> ne(const Vectorized<BFloat16>& other) const;
760
+ Vectorized<BFloat16> gt(const Vectorized<BFloat16>& other) const;
761
+ Vectorized<BFloat16> ge(const Vectorized<BFloat16>& other) const;
762
+ Vectorized<BFloat16> lt(const Vectorized<BFloat16>& other) const;
763
+ Vectorized<BFloat16> le(const Vectorized<BFloat16>& other) const;
764
+ };
765
+
766
+ Vectorized<BFloat16> inline operator+(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
767
+ return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_add_ps(x, y); });
768
+ }
769
+ Vectorized<BFloat16> inline operator-(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
770
+ return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_sub_ps(x, y); });
771
+ }
772
+ Vectorized<BFloat16> inline operator*(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
773
+ return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_mul_ps(x, y); });
774
+ }
775
+ Vectorized<BFloat16> inline operator/(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
776
+ return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_div_ps(x, y); });
777
+ }
778
+ Vectorized<BFloat16> inline operator&(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
779
+ return _mm512_and_si512(a, b);
780
+ }
781
+ Vectorized<BFloat16> inline operator|(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
782
+ return _mm512_or_si512(a, b);
783
+ }
784
+ Vectorized<BFloat16> inline operator^(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
785
+ return _mm512_xor_si512(a, b);
786
+ }
787
+
788
+ inline Vectorized<BFloat16> Vectorized<BFloat16>::eq(const Vectorized<BFloat16>& other) const {
789
+ return (*this == other) & Vectorized<BFloat16>(1.0f);
790
+ }
791
+
792
+ inline Vectorized<BFloat16> Vectorized<BFloat16>::ne(const Vectorized<BFloat16>& other) const {
793
+ return (*this != other) & Vectorized<BFloat16>(1.0f);
794
+ }
795
+
796
+ inline Vectorized<BFloat16> Vectorized<BFloat16>::gt(const Vectorized<BFloat16>& other) const {
797
+ return (*this > other) & Vectorized<BFloat16>(1.0f);
798
+ }
799
+
800
+ inline Vectorized<BFloat16> Vectorized<BFloat16>::ge(const Vectorized<BFloat16>& other) const {
801
+ return (*this >= other) & Vectorized<BFloat16>(1.0f);
802
+ }
803
+
804
+ inline Vectorized<BFloat16> Vectorized<BFloat16>::lt(const Vectorized<BFloat16>& other) const {
805
+ return (*this < other) & Vectorized<BFloat16>(1.0f);
806
+ }
807
+
808
+ inline Vectorized<BFloat16> Vectorized<BFloat16>::le(const Vectorized<BFloat16>& other) const {
809
+ return (*this <= other) & Vectorized<BFloat16>(1.0f);
810
+ }
811
+
812
+ // frac. Implement this here so we can use subtraction
813
+ inline Vectorized<BFloat16> Vectorized<BFloat16>::frac() const {
814
+ return *this - this->trunc();
815
+ }
816
+
817
+ // Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
818
+ // either input is a NaN.
819
+ template <>
820
+ Vectorized<BFloat16> inline maximum(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
821
+ __m512 a_lo, a_hi;
822
+ __m512 b_lo, b_hi;
823
+ cvtbf16_fp32(__m512i(a), a_lo, a_hi);
824
+ cvtbf16_fp32(__m512i(b), b_lo, b_hi);
825
+ auto max_lo = _mm512_max_ps(a_lo, b_lo);
826
+ auto max_hi = _mm512_max_ps(a_hi, b_hi);
827
+ auto nan_lo_mask = _mm512_cmp_ps_mask(a_lo, b_lo, _CMP_UNORD_Q);
828
+ auto nan_hi_mask = _mm512_cmp_ps_mask(a_hi, b_hi, _CMP_UNORD_Q);
829
+ auto nan_lo = _mm512_castsi512_ps(_mm512_set1_epi32(nan_lo_mask));
830
+ auto nan_hi = _mm512_castsi512_ps(_mm512_set1_epi32(nan_hi_mask));
831
+ // Exploit the fact that all-ones is a NaN.
832
+ auto o1 = _mm512_or_ps(max_lo, nan_lo);
833
+ auto o2 = _mm512_or_ps(max_hi, nan_hi);
834
+ return cvtfp32_bf16(o1, o2);
835
+ }
836
+
837
+ // Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
838
+ // either input is a NaN.
839
+ template <>
840
+ Vectorized<BFloat16> inline minimum(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
841
+ __m512 a_lo, a_hi;
842
+ __m512 b_lo, b_hi;
843
+ __m512i zero_vec = _mm512_set1_epi32(0);
844
+ cvtbf16_fp32(__m512i(a), a_lo, a_hi);
845
+ cvtbf16_fp32(__m512i(b), b_lo, b_hi);
846
+ auto min_lo = _mm512_min_ps(a_lo, b_lo);
847
+ auto min_hi = _mm512_min_ps(a_hi, b_hi);
848
+ auto nan_lo_mask = _mm512_cmp_ps_mask(a_lo, b_lo, _CMP_UNORD_Q);
849
+ auto nan_hi_mask = _mm512_cmp_ps_mask(a_hi, b_hi, _CMP_UNORD_Q);
850
+ auto nan_lo = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, nan_lo_mask,
851
+ 0xFFFFFFFF));
852
+ auto nan_hi = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, nan_hi_mask,
853
+ 0xFFFFFFFF));
854
+ // Exploit the fact that all-ones is a NaN.
855
+ auto o1 = _mm512_or_ps(min_lo, nan_lo);
856
+ auto o2 = _mm512_or_ps(min_hi, nan_hi);
857
+ return cvtfp32_bf16(o1, o2);
858
+ }
859
+
860
+ template <>
861
+ Vectorized<BFloat16> inline clamp(const Vectorized<BFloat16>& a,
862
+ const Vectorized<BFloat16>& min, const Vectorized<BFloat16>& max) {
863
+ __m512 a_lo, a_hi;
864
+ __m512 min_lo, min_hi;
865
+ __m512 max_lo, max_hi;
866
+ cvtbf16_fp32(__m512i(a), a_lo, a_hi);
867
+ cvtbf16_fp32(__m512i(min), min_lo, min_hi);
868
+ cvtbf16_fp32(__m512i(max), max_lo, max_hi);
869
+ auto o1 = _mm512_min_ps(max_lo, _mm512_max_ps(min_lo, a_lo));
870
+ auto o2 = _mm512_min_ps(max_hi, _mm512_max_ps(min_hi, a_hi));
871
+ return cvtfp32_bf16(o1, o2);
872
+ }
873
+
874
+ template <>
875
+ Vectorized<BFloat16> inline clamp_max(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& max) {
876
+ __m512 a_lo, a_hi;
877
+ __m512 max_lo, max_hi;
878
+ cvtbf16_fp32(__m512i(a), a_lo, a_hi);
879
+ cvtbf16_fp32(__m512i(max), max_lo, max_hi);
880
+ auto o1 = _mm512_min_ps(max_lo, a_lo);
881
+ auto o2 = _mm512_min_ps(max_hi, a_hi);
882
+ return cvtfp32_bf16(o1, o2);
883
+ }
884
+
885
+ template <>
886
+ Vectorized<BFloat16> inline clamp_min(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& min) {
887
+ __m512 a_lo, a_hi;
888
+ __m512 min_lo, min_hi;
889
+ cvtbf16_fp32(__m512i(a), a_lo, a_hi);
890
+ cvtbf16_fp32(__m512i(min), min_lo, min_hi);
891
+ auto o1 = _mm512_max_ps(min_lo, a_lo);
892
+ auto o2 = _mm512_max_ps(min_hi, a_hi);
893
+ return cvtfp32_bf16(o1, o2);
894
+ }
895
+
896
+ template <>
897
+ inline void convert(const BFloat16* src, BFloat16* dst, int64_t n) {
898
+ int64_t i;
899
+ #pragma unroll
900
+ for (i = 0; i <= (n - Vectorized<BFloat16>::size()); i += Vectorized<BFloat16>::size()) {
901
+ auto vsrc = _mm512_loadu_si512(reinterpret_cast<__m512i*>((void*)(src + i)));
902
+ _mm512_storeu_si512(reinterpret_cast<__m512i*>((void*)(dst + i)), vsrc);
903
+ }
904
+ #pragma unroll
905
+ for (; i < n; i++) {
906
+ dst[i] = src[i];
907
+ }
908
+ }
909
+
910
+ template <>
911
+ inline void convert(const float* src, BFloat16* dst, int64_t n) {
912
+ int64_t i;
913
+ for (i = 0; i + Vectorized<BFloat16>::size() <= n; i += Vectorized<BFloat16>::size()) {
914
+ __m512 a = _mm512_loadu_ps(&src[i]);
915
+ __m512 b = _mm512_loadu_ps(&src[i + 16]);
916
+
917
+ __m512i bf = cvtfp32_bf16(a, b);
918
+ _mm512_storeu_si512(reinterpret_cast<__m512i*>(&dst[i]), bf);
919
+ }
920
+ for (; i < n; i++) {
921
+ dst[i] = c10::convert<BFloat16>(src[i]);
922
+ }
923
+ }
924
+
925
+ template <>
926
+ inline void convert(const double* src, BFloat16* dst, int64_t n) {
927
+ auto load_float = [](const double *src) -> __m512 {
928
+ // Load one float vector from an array of doubles
929
+ __m256 a = _mm512_cvtpd_ps(_mm512_loadu_pd(src));
930
+ __m256 b = _mm512_cvtpd_ps(_mm512_loadu_pd(src + 8));
931
+ return _mm512_insertf32x8(_mm512_castps256_ps512(a), b, 1);
932
+ };
933
+
934
+ int64_t i;
935
+ for (i = 0; i + Vectorized<BFloat16>::size() <= n; i += Vectorized<BFloat16>::size()) {
936
+ __m512 a = load_float(&src[i]);
937
+ __m512 b = load_float(&src[i + 16]);
938
+
939
+ __m512i bf = cvtfp32_bf16(a, b);
940
+ _mm512_storeu_si512(reinterpret_cast<__m512i*>(&dst[i]), bf);
941
+ }
942
+ for (; i < n; i++) {
943
+ dst[i] = c10::convert<BFloat16>(src[i]);
944
+ }
945
+ }
946
+
947
+ template <>
948
+ Vectorized<BFloat16> inline fmadd(const Vectorized<BFloat16>& a,
949
+ const Vectorized<BFloat16>& b, const Vectorized<BFloat16>& c) {
950
+ __m512 a_lo, a_hi;
951
+ __m512 b_lo, b_hi;
952
+ __m512 c_lo, c_hi;
953
+ cvtbf16_fp32(__m512i(a), a_lo, a_hi);
954
+ cvtbf16_fp32(__m512i(b), b_lo, b_hi);
955
+ cvtbf16_fp32(__m512i(c), c_lo, c_hi);
956
+ auto o1 = _mm512_fmadd_ps(a_lo, b_lo, c_lo);
957
+ auto o2 = _mm512_fmadd_ps(a_hi, b_hi, c_hi);
958
+ return cvtfp32_bf16(o1, o2);
959
+ }
960
+
961
+ static inline void _transpose_mxn_half_16_16(__m256i t[], __m512i u[]) {
962
+ __m512i r[8];
963
+ // a0a1 a2a3 a4a5 a6a7 a8a9 a10a11 a12a13 a14a15 e0e1 e2e3 e4e5 e6e7 e8e9 e10e11 e12e13 e14e15
964
+ // b0-b15 f0-f15
965
+ // c0-c15 g0-g15
966
+ // d0-d15 h0-h15
967
+ // i0-i15 m0-m15
968
+ // j0-j15 n0-n15
969
+ // k0-k15 o0-o15
970
+ // l0-l15 p0-p15
971
+ #pragma unroll(4)
972
+ for (int i = 0; i < 4; i++) {
973
+ r[i] = _mm512_inserti64x4(_mm512_castsi256_si512(t[i]), t[i + 4], 0x01);
974
+ r[i + 4] = _mm512_inserti64x4(_mm512_castsi256_si512(t[i + 8]), t[i + 12], 0x01);
975
+ }
976
+
977
+ // u0: a0a1 b0b1 a2a3 b2b3 a8a9 b8b9 a10a11 b10b11 e0e1 f0f1 e2e3 f2f3 e8e9 f8f9 e10e11 f10f11
978
+ // u1: a4a5 b4b5 a6a7 b6b7 a12a13 b12b13 a14a15 b14b15 e4e5 f4f5 e6e7 f6f7 e12e13 f12f13 e14e15 f14f15
979
+ // u2: c0c1 d0d1 c2c3 d2d3 c8c9 d8d9 c10c11 d10d11 g0g1 h0h1 g2g3 h2h3 g8g9 h8h9 g10g11 h10h11
980
+ // u3: c4c5 d4b5 c6c7 d6b7 c12c13 d12d13 c14c15 d14d15 g4g5 h4h5 g6g7 h6h7 g12g13 h12h13 g14g15 h14h15
981
+ // i j m n
982
+ // k l o p
983
+ #pragma unroll(4)
984
+ for (int i = 0; i < 8; i += 2) {
985
+ u[i] = _mm512_unpacklo_epi32(r[i], r[i + 1]);
986
+ u[i + 1] = _mm512_unpackhi_epi32(r[i], r[i + 1]);
987
+ }
988
+
989
+ // r0: a0a1 b0b1 c0c1 d0d1 a8a9 b8b9 c8c9 d8d9 e0e1 f0f1 g0g1 h0h1 e8e9 f8f9 g8g9 h8h9
990
+ // r1: a2a3 b2b3 c2c3 d2d3 a10a11 b10b11 c10c11 d10d11 e2e3 f2f3 g2g3 h2h3 e10e11 f10f11 g10g11 h10h11
991
+ // r2: a4a5 b4b5 c4c5 d4b5 a12a13 b12b13 c12c13 d12d13
992
+ // r3: a6a7 b6b7 c6c7 d6b7 a14a15 b14b15 c14c15 d14d15
993
+ // r4: i j k l m n o p
994
+ r[0] = _mm512_unpacklo_epi64(u[0], u[2]);
995
+ r[1] = _mm512_unpackhi_epi64(u[0], u[2]);
996
+ r[2] = _mm512_unpacklo_epi64(u[1], u[3]);
997
+ r[3] = _mm512_unpackhi_epi64(u[1], u[3]);
998
+ r[4] = _mm512_unpacklo_epi64(u[4], u[6]);
999
+ r[5] = _mm512_unpackhi_epi64(u[4], u[6]);
1000
+ r[6] = _mm512_unpacklo_epi64(u[5], u[7]);
1001
+ r[7] = _mm512_unpackhi_epi64(u[5], u[7]);
1002
+
1003
+ __m512i const1 = _mm512_set_epi32(
1004
+ 0x00370035,
1005
+ 0x00330031,
1006
+ 0x00270025,
1007
+ 0x00230021,
1008
+ 0x00170015,
1009
+ 0x00130011,
1010
+ 0x00070005,
1011
+ 0x00030001,
1012
+ 0x00360034,
1013
+ 0x00320030,
1014
+ 0x00260024,
1015
+ 0x00220020,
1016
+ 0x00160014,
1017
+ 0x00120010,
1018
+ 0x00060004,
1019
+ 0x00020000);
1020
+ __m512i const2 = _mm512_set_epi32(
1021
+ 0x003f003d,
1022
+ 0x003b0039,
1023
+ 0x002f002d,
1024
+ 0x002b0029,
1025
+ 0x001f001d,
1026
+ 0x001b0019,
1027
+ 0x000f000d,
1028
+ 0x000b0009,
1029
+ 0x003e003c,
1030
+ 0x003a0038,
1031
+ 0x002e002c,
1032
+ 0x002a0028,
1033
+ 0x001e001c,
1034
+ 0x001a0018,
1035
+ 0x000e000c,
1036
+ 0x000a0008);
1037
+ // merge values from two regs
1038
+ // 0-- 1--
1039
+ // 8-- 9--
1040
+ // 2-- 3--
1041
+ // 10-- 11--
1042
+ // 4-- 5--
1043
+ // 12-- 13--
1044
+ // 6-- 7--
1045
+ // 14-- 15--
1046
+ #pragma unroll(4)
1047
+ for (int i = 0; i < 4; i++) {
1048
+ u[i] = _mm512_permutex2var_epi16(r[i], const1, r[i + 4]);
1049
+ u[i + 4] = _mm512_permutex2var_epi16(r[i], const2, r[i + 4]);
1050
+ }
1051
+ }
1052
+
1053
+ // TODO(Leslie): Add the AVX2 Version of transpose_mxn for BFloat16 and Float16
1054
+ // Code referred to FBGEMM:
1055
+ // https://github.com/pytorch/FBGEMM/blob/39a423e4ad1a04b77fea81c7d09c3e6f8984fae9/src/UtilsAvx512.cc#L1483-L1607
1056
+ template<>
1057
+ inline void transpose_mxn<BFloat16, 16, 16>(
1058
+ const BFloat16* src,
1059
+ int64_t ld_src,
1060
+ BFloat16* dst,
1061
+ int64_t ld_dst) {
1062
+ __m256i t[16];
1063
+ // load from src to registers
1064
+ // a: a0 a1 a2 a3 a4 a5 a6 a7 a8 a9 a10 a11 a12 a13 a14 a15
1065
+ // b: b0 b1 b2 b3 b4 b5 b6 b7 b8 b9 b10 b11 b12 b13 b14 b15
1066
+ // c: c0 c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 c12 c13 c14 c15
1067
+ // d: d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15
1068
+ // e: e0 e1 e2 e3 e4 e5 e6 e7 e8 e9 e10 e11 e12 e13 e14 e15
1069
+ // f: f0 f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13 f14 f15
1070
+ // g: g0 g1 g2 g3 g4 g5 g6 g7 g8 g9 g10 g11 g12 g13 g14 g15
1071
+ // h: h0 h1 h2 h3 h4 h5 h6 h7 h8 h9 h10 h11 h12 h13 h14 h15
1072
+ // i: i0 i1 i2 i3 i4 i5 i6 i7 i8 i9 i10 i11 i12 i13 i14 i15
1073
+ // j: j0 j1 j2 j3 j4 j5 j6 j7 j8 j9 j10 j11 j12 j13 j14 j15
1074
+ // k: k0 k1 k2 k3 k4 k5 k6 k7 k8 k9 k10 k11 k12 k13 k14 k15
1075
+ // l: l0 l1 l2 l3 l4 l5 l6 l7 l8 l9 l10 l11 l12 l13 l14 l15
1076
+ // m: m0 m1 m2 m3 m4 m5 m6 m7 m8 m9 m10 m11 m12 m13 m14 m15
1077
+ // n: n0 n1 n2 n3 n4 n5 n6 n7 n8 n9 n10 n11 n12 n13 n14 n15
1078
+ // o: o0 o1 o2 o3 o4 o5 o6 o7 o8 o9 o10 o11 o12 o13 o14 o15
1079
+ // p: p0 p1 p2 p3 p4 p5 p6 p7 p8 p9 p10 p11 p12 p13 p14 p15
1080
+ #pragma unroll(16)
1081
+ for (int i = 0; i < 16; i++) {
1082
+ t[i] = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(src + i * ld_src));
1083
+ }
1084
+
1085
+ __m512i u[8];
1086
+ _transpose_mxn_half_16_16(t, u);
1087
+
1088
+ #pragma unroll(8)
1089
+ for (int i = 0; i < 8; i++) {
1090
+ _mm256_storeu_si256(
1091
+ reinterpret_cast<__m256i*>(dst + (i * 2) * ld_dst),
1092
+ _mm512_extracti32x8_epi32(u[i], 0x0));
1093
+ _mm256_storeu_si256(
1094
+ reinterpret_cast<__m256i*>(dst + (i * 2 + 1) * ld_dst),
1095
+ _mm512_extracti32x8_epi32(u[i], 0x01));
1096
+ }
1097
+ }
1098
+
1099
+ // Code referred to FBGEMM:
1100
+ // https://github.com/pytorch/FBGEMM/blob/39a423e4ad1a04b77fea81c7d09c3e6f8984fae9/src/UtilsAvx512.cc#L1483-L1607
1101
+ template<>
1102
+ inline void transpose_mxn<Half, 16, 16>(
1103
+ const Half* src,
1104
+ int64_t ld_src,
1105
+ Half* dst,
1106
+ int64_t ld_dst) {
1107
+ __m256i t[16];
1108
+ // load from src to registers
1109
+ // Same matrix indices as above transpose_mxn<BFloat16, 16, 16>
1110
+ #pragma unroll(16)
1111
+ for (int i = 0; i < 16; i++) {
1112
+ t[i] = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(src + i * ld_src));
1113
+ }
1114
+
1115
+ __m512i u[8];
1116
+ _transpose_mxn_half_16_16(t, u);
1117
+
1118
+ #pragma unroll(8)
1119
+ for (int i = 0; i < 8; i++) {
1120
+ _mm256_storeu_si256(
1121
+ reinterpret_cast<__m256i*>(dst + (i * 2) * ld_dst),
1122
+ _mm512_extracti32x8_epi32(u[i], 0x0));
1123
+ _mm256_storeu_si256(
1124
+ reinterpret_cast<__m256i*>(dst + (i * 2 + 1) * ld_dst),
1125
+ _mm512_extracti32x8_epi32(u[i], 0x01));
1126
+ }
1127
+ }
1128
+
1129
+ static inline void _transpose_mxn_half_32_32(__m512i r[], __m512i d[]) {
1130
+ // t[0]: 0 32 1 33 2 34 3 35 8 40 9 41 10 42 11 43 16 ... 59
1131
+ // t[1]: 4 36 5 37 6 38 7 39 12 44 13 45 14 46 15 47 20 ... 63
1132
+ // t[2]: 64 96 65 97 66 98 67 99 72 104 73 105 74 106 75 ... 123
1133
+ // t[3]: 68 100 69 101 70 102 71 103 76 108 77 109 78 110 79 111 84 ... 127
1134
+ // t[4]: 128 160 129 161 130 162 131 163 136 168 137 169 138 170 139 171 144 ... 187
1135
+ // t[5]: 132 164 133 165 134 166 135 167 140 172 141 173 142 174 143 175 148 ... 191
1136
+ // t[6]: 192 224 193 225 194 226 195 227 200 232 201 233 202 234 203 235 208 ... 251
1137
+ // t[7]: 196 228 197 229 198 230 199 231 204 236 205 237 206 238 207 239 212 ... 255
1138
+ // t[8]: 256 288 257 289 258 290 259 291 264 296 265 297 266 298 267 299 272 ... 315
1139
+ // t[9]: 260 292 261 293 262 294 263 295 268 300 269 301 270 302 271 303 276 ... 319
1140
+ // t[10]: 320 352 321 353 322 354 323 355 328 360 329 361 330 362 331 363 336 ... 379
1141
+ // t[11]: 324 356 325 357 326 358 327 359 332 364 333 365 334 366 335 367 340 ... 383
1142
+ // t[12]: 384 416 385 417 386 418 387 419 392 424 393 425 394 426 395 427 400 ... 443
1143
+ // t[13]: 388 420 389 421 390 422 391 423 396 428 397 429 398 430 399 431 404 ... 447
1144
+ // t[14]: 448 480 449 481 450 482 451 483 456 488 457 489 458 490 459 491 464 ... 507
1145
+ // t[15]: 452 484 453 485 454 486 455 487 460 492 461 493 462 494 463 495 468 ... 511
1146
+ // t[16]: 512 544 513 545 514 546 515 547 520 552 521 553 522 554 523 555 528 ... 571
1147
+ // ...
1148
+ // t[31]: 964 996 965 997 966 998 967 999 972 1004 973 1005 974 1006 975 1007 980 ... 1023
1149
+ #pragma unroll(16)
1150
+ for (int i = 0; i < 16; ++i) {
1151
+ d[i * 2] = _mm512_unpacklo_epi16(r[i * 2], r[i * 2 + 1]);
1152
+ d[i * 2 + 1] = _mm512_unpackhi_epi16(r[i * 2], r[i * 2 + 1]);
1153
+ }
1154
+
1155
+ // t[0]: 0 32 64 96 1 33 65 97 8 40 72 104 9 41 73 105 16 ... 121
1156
+ // t[1]: 2 34 66 98 3 35 67 99 10 42 74 106 11 43 75 107 18 ... 123
1157
+ // t[2]: 4 36 68 100 5 37 69 101 12 44 76 108 13 45 77 109 20 ... 125
1158
+ // t[3]: 6 38 70 102 7 39 71 103 14 46 78 110 15 47 79 111 22 ... 127
1159
+ // t[4]: 128 160 192 224 129 161 193 225 136 168 200 232 137 169 201 233 144 ... 249
1160
+ // t[5]: 130 162 194 226 131 163 195 227 138 170 202 234 139 171 203 235 146 ... 251
1161
+ // t[6]: 132 164 196 228 133 165 197 229 140 172 204 236 141 173 205 237 148 ... 253
1162
+ // t[7]: 134 166 198 230 135 167 199 231 142 174 206 238 143 175 207 239 150 ... 255
1163
+ // t[8]: 256 288 320 352 257 289 321 353 264 296 328 360 265 297 329 361 272 ... 377
1164
+ // t[9]: 258 290 322 354 259 291 323 355 266 298 330 362 267 299 331 363 274 ... 379
1165
+ // t[10]: 260 292 324 356 261 293 325 357 268 300 332 364 269 301 333 365 276 ... 381
1166
+ // t[11]: 262 294 326 358 263 295 327 359 270 302 334 366 271 303 335 367 278 ... 383
1167
+ // t[12]: 384 416 448 480 385 417 449 481 392 424 456 488 393 425 457 489 400 ... 505
1168
+ // t[13]: 386 418 450 482 387 419 451 483 394 426 458 490 395 427 459 491 402 ... 507
1169
+ // t[14]: 388 420 452 484 389 421 453 485 396 428 460 492 397 429 461 493 404 ... 509
1170
+ // t[15]: 390 422 454 486 391 423 455 487 398 430 462 494 399 431 463 495 406 ... 511
1171
+ // t[16]: 512 544 576 608 513 545 577 609 520 552 584 616 521 553 585 617 528 ... 633
1172
+ // ...
1173
+ // t[31]: 902 934 966 998 903 935 967 999 910 942 974 1006 911 943 975 1007 918 ... 1023
1174
+ #pragma unroll(8)
1175
+ for (int i = 0; i < 8; ++i) {
1176
+ r[i * 4] = _mm512_unpacklo_epi32(d[i * 4], d[i * 4 + 2]);
1177
+ r[i * 4 + 1] = _mm512_unpackhi_epi32(d[i * 4], d[i * 4 + 2]);
1178
+ r[i * 4 + 2] = _mm512_unpacklo_epi32(d[i * 4 + 1], d[i * 4 + 3]);
1179
+ r[i * 4 + 3] = _mm512_unpackhi_epi32(d[i * 4 + 1], d[i * 4 + 3]);
1180
+ }
1181
+
1182
+ // t[0]: 0 32 64 96 128 160 192 224 8 40 72 104 136 168 200 232 16 ... 248
1183
+ // t[1]: 1 33 65 97 129 161 193 225 9 41 73 105 137 169 201 233 17 ... 249
1184
+ // t[2]: 2 34 66 98 130 162 194 226 10 42 74 106 138 170 202 234 18 ... 250
1185
+ // t[3]: 3 35 67 99 131 163 195 227 11 43 75 107 139 171 203 235 19 ... 251
1186
+ // t[4]: 4 36 68 100 132 164 196 228 12 44 76 108 140 172 204 236 20 ... 252
1187
+ // t[5]: 5 37 69 101 133 165 197 229 13 45 77 109 141 173 205 237 21 ... 253
1188
+ // t[6]: 6 38 70 102 134 166 198 230 14 46 78 110 142 174 206 238 22 ... 254
1189
+ // t[7]: 7 39 71 103 135 167 199 231 15 47 79 111 143 175 207 239 23 ... 255
1190
+ // t[8]: 256 288 320 352 384 416 448 480 264 296 328 360 392 424 456 488 272 ... 504
1191
+ // t[9]: 257 289 321 353 385 417 449 481 265 297 329 361 393 425 457 489 273 ... 505
1192
+ // t[10]: 258 290 322 354 386 418 450 482 266 298 330 362 394 426 458 490 274 ... 506
1193
+ // t[11]: 259 291 323 355 387 419 451 483 267 299 331 363 395 427 459 491 275 ... 507
1194
+ // t[12]: 260 292 324 356 388 420 452 484 268 300 332 364 396 428 460 492 276 ... 508
1195
+ // t[13]: 261 293 325 357 389 421 453 485 269 301 333 365 397 429 461 493 277 ... 509
1196
+ // t[14]: 262 294 326 358 390 422 454 486 270 302 334 366 398 430 462 494 278 ... 510
1197
+ // t[15]: 263 295 327 359 391 423 455 487 271 303 335 367 399 431 463 495 279 ... 511
1198
+ // t[16]: 512 544 576 608 640 672 704 736 520 552 584 616 648 680 712 744 528 ... 760
1199
+ // ...
1200
+ // t[31]: 775 807 839 871 903 935 967 999 783 815 847 879 911 943 975 1007 791 ... 1023
1201
+ #pragma unroll(4)
1202
+ for (int i = 0; i < 4; ++i) {
1203
+ d[i * 8] = _mm512_unpacklo_epi64(r[i * 8], r[i * 8 + 4]);
1204
+ d[i * 8 + 1] = _mm512_unpackhi_epi64(r[i * 8], r[i * 8 + 4]);
1205
+ d[i * 8 + 2] = _mm512_unpacklo_epi64(r[i * 8 + 1], r[i * 8 + 5]);
1206
+ d[i * 8 + 3] = _mm512_unpackhi_epi64(r[i * 8 + 1], r[i * 8 + 5]);
1207
+ d[i * 8 + 4] = _mm512_unpacklo_epi64(r[i * 8 + 2], r[i * 8 + 6]);
1208
+ d[i * 8 + 5] = _mm512_unpackhi_epi64(r[i * 8 + 2], r[i * 8 + 6]);
1209
+ d[i * 8 + 6] = _mm512_unpacklo_epi64(r[i * 8 + 3], r[i * 8 + 7]);
1210
+ d[i * 8 + 7] = _mm512_unpackhi_epi64(r[i * 8 + 3], r[i * 8 + 7]);
1211
+ }
1212
+
1213
+ // t[0]: 0 32 64 96 128 160 192 224 256 288 320 352 384 416 448 480 16 ... 496
1214
+ // t[1]: 1 33 65 97 129 161 193 225 257 289 321 353 385 417 449 481 17 ... 497
1215
+ // t[2]: 2 34 66 98 130 162 194 226 258 290 322 354 386 418 450 482 18 ... 498
1216
+ // t[3]: 3 35 67 99 131 163 195 227 259 291 323 355 387 419 451 483 19 ... 499
1217
+ // t[4]: 4 36 68 100 132 164 196 228 260 292 324 356 388 420 452 484 20 ... 500
1218
+ // t[5]: 5 37 69 101 133 165 197 229 261 293 325 357 389 421 453 485 21 ... 501
1219
+ // t[6]: 6 38 70 102 134 166 198 230 262 294 326 358 390 422 454 486 22 ... 502
1220
+ // t[7]: 7 39 71 103 135 167 199 231 263 295 327 359 391 423 455 487 23 ... 503
1221
+ // t[8]: 8 40 72 104 136 168 200 232 264 296 328 360 392 424 456 488 24 ... 504
1222
+ // t[9]: 9 41 73 105 137 169 201 233 265 297 329 361 393 425 457 489 25 ... 505
1223
+ // t[10]: 10 42 74 106 138 170 202 234 266 298 330 362 394 426 458 490 26 ... 506
1224
+ // t[11]: 11 43 75 107 139 171 203 235 267 299 331 363 395 427 459 491 27 ... 507
1225
+ // t[12]: 12 44 76 108 140 172 204 236 268 300 332 364 396 428 460 492 28 ... 508
1226
+ // t[13]: 13 45 77 109 141 173 205 237 269 301 333 365 397 429 461 493 29 ... 509
1227
+ // t[14]: 14 46 78 110 142 174 206 238 270 302 334 366 398 430 462 494 30 ... 510
1228
+ // t[15]: 15 47 79 111 143 175 207 239 271 303 335 367 399 431 463 495 31 ... 511
1229
+ // t[16]: 512 544 576 608 640 672 704 736 768 800 832 864 896 928 960 992 528 ... 1008
1230
+ // ...
1231
+ // t[31]: 527 559 591 623 655 687 719 751 783 815 847 879 911 943 975 1007 543 ... 1023
1232
+ __m512i const1 = _mm512_set_epi64(
1233
+ 0x000000000000000d,
1234
+ 0x000000000000000c,
1235
+ 0x0000000000000005,
1236
+ 0x0000000000000004,
1237
+ 0x0000000000000009,
1238
+ 0x0000000000000008,
1239
+ 0x0000000000000001,
1240
+ 0x0000000000000000);
1241
+ __m512i const2 = _mm512_set_epi64(
1242
+ 0x000000000000000f,
1243
+ 0x000000000000000e,
1244
+ 0x0000000000000007,
1245
+ 0x0000000000000006,
1246
+ 0x000000000000000b,
1247
+ 0x000000000000000a,
1248
+ 0x0000000000000003,
1249
+ 0x0000000000000002);
1250
+ #pragma unroll(8)
1251
+ for (int i = 0; i < 8; ++i) {
1252
+ r[i] = _mm512_permutex2var_epi64(d[i], /*idx*/const1, d[i + 8]);
1253
+ r[i + 8] = _mm512_permutex2var_epi64(d[i], /*idx*/const2, d[i + 8]);
1254
+ r[i + 16] = _mm512_permutex2var_epi64(d[i + 16], /*idx*/const1, d[i + 24]);
1255
+ r[i + 24] = _mm512_permutex2var_epi64(d[i + 16], /*idx*/const2, d[i + 24]);
1256
+ }
1257
+
1258
+ // t[0]: 0 32 64 96 128 160 192 224 256 288 320 352 384 416 448 480 512 544 ... 992
1259
+ // t[1]: 1 33 65 97 129 161 193 225 257 289 321 353 385 417 449 481 513 545 ... 993
1260
+ // t[2]: 2 34 66 98 130 162 194 226 258 290 322 354 386 418 450 482 514 546 ... 994
1261
+ // t[3]: 3 35 67 99 131 163 195 227 259 291 323 355 387 419 451 483 515 547 ... 995
1262
+ // t[4]: 4 36 68 100 132 164 196 228 260 292 324 356 388 420 452 484 516 548 ... 996
1263
+ // t[5]: 5 37 69 101 133 165 197 229 261 293 325 357 389 421 453 485 517 549 ... 997
1264
+ // t[6]: 6 38 70 102 134 166 198 230 262 294 326 358 390 422 454 486 518 550 ... 998
1265
+ // t[7]: 7 39 71 103 135 167 199 231 263 295 327 359 391 423 455 487 519 551 ... 999
1266
+ // t[8]: 8 40 72 104 136 168 200 232 264 296 328 360 392 424 456 488 520 552 ... 1000
1267
+ // t[9]: 9 41 73 105 137 169 201 233 265 297 329 361 393 425 457 489 521 553 ... 1001
1268
+ // t[10]: 10 42 74 106 138 170 202 234 266 298 330 362 394 426 458 490 522 554 ... 1002
1269
+ // t[11]: 11 43 75 107 139 171 203 235 267 299 331 363 395 427 459 491 523 555 ... 1003
1270
+ // t[12]: 12 44 76 108 140 172 204 236 268 300 332 364 396 428 460 492 524 556 ... 1004
1271
+ // t[13]: 13 45 77 109 141 173 205 237 269 301 333 365 397 429 461 493 525 557 ... 1005
1272
+ // t[14]: 14 46 78 110 142 174 206 238 270 302 334 366 398 430 462 494 526 558 ... 1006
1273
+ // t[15]: 15 47 79 111 143 175 207 239 271 303 335 367 399 431 463 495 527 559 ... 1007
1274
+ // t[16]: 16 48 80 112 144 176 208 240 272 304 336 368 400 432 464 496 528 560 ... 1008
1275
+ // ...
1276
+ // t[31]: 31 63 95 127 159 191 223 255 287 319 351 383 415 447 479 511 543 575 ... 1023
1277
+ __m512i const3 = _mm512_set_epi64(
1278
+ 0x000000000000000b,
1279
+ 0x000000000000000a,
1280
+ 0x0000000000000009,
1281
+ 0x0000000000000008,
1282
+ 0x0000000000000003,
1283
+ 0x0000000000000002,
1284
+ 0x0000000000000001,
1285
+ 0x0000000000000000);
1286
+ __m512i const4 = _mm512_set_epi64(
1287
+ 0x000000000000000f,
1288
+ 0x000000000000000e,
1289
+ 0x000000000000000d,
1290
+ 0x000000000000000c,
1291
+ 0x0000000000000007,
1292
+ 0x0000000000000006,
1293
+ 0x0000000000000005,
1294
+ 0x0000000000000004);
1295
+ #pragma unroll(16)
1296
+ for (int i = 0; i < 16; ++i) {
1297
+ d[i] = _mm512_permutex2var_epi64(r[i], /*idx*/const3, r[i + 16]);
1298
+ d[i + 16] = _mm512_permutex2var_epi64(r[i], /*idx*/const4, r[i + 16]);
1299
+ }
1300
+ }
1301
+
1302
+ // Code referred to FBGEMM:
1303
+ // https://github.com/pytorch/FBGEMM/blob/39a423e4ad1a04b77fea81c7d09c3e6f8984fae9/src/UtilsAvx512.cc#LL19C6-L19C6
1304
+ template<>
1305
+ inline void transpose_mxn<BFloat16, 32, 32>(
1306
+ const BFloat16* src,
1307
+ int64_t ld_src,
1308
+ BFloat16* dst,
1309
+ int64_t ld_dst) {
1310
+ // Load from memory
1311
+ __m512i r[32];
1312
+ #pragma unroll(32)
1313
+ for (int i = 0; i < 32; ++i) {
1314
+ r[i] = _mm512_loadu_si512(reinterpret_cast<const __m512i*>(src + i* ld_src));
1315
+ }
1316
+
1317
+ __m512i d[32];
1318
+ _transpose_mxn_half_32_32(r, d);
1319
+
1320
+ // Store to dst
1321
+ #pragma unroll(32)
1322
+ for (int i = 0; i < 32; ++i) {
1323
+ _mm512_storeu_si512(dst + i* ld_dst, d[i]);
1324
+ }
1325
+ }
1326
+
1327
+ template<>
1328
+ inline void transpose_mxn<Half, 32, 32>(
1329
+ const Half* src,
1330
+ int64_t ld_src,
1331
+ Half* dst,
1332
+ int64_t ld_dst) {
1333
+ // Load from memory
1334
+ __m512i r[32];
1335
+ #pragma unroll(32)
1336
+ for (int i = 0; i < 32; ++i) {
1337
+ r[i] = _mm512_loadu_si512(reinterpret_cast<const __m512i*>(src + i* ld_src));
1338
+ }
1339
+
1340
+ __m512i d[32];
1341
+ _transpose_mxn_half_32_32(r, d);
1342
+
1343
+ // Store to dst
1344
+ #pragma unroll(32)
1345
+ for (int i = 0; i < 32; ++i) {
1346
+ _mm512_storeu_si512(dst + i* ld_dst, d[i]);
1347
+ }
1348
+ }
1349
+
1350
+ template <>
1351
+ class Vectorized<Half>: public Vectorized16<Half> {
1352
+ public:
1353
+ using Vectorized16::Vectorized16;
1354
+
1355
+ Vectorized<Half> frac() const;
1356
+
1357
+ Vectorized<Half> eq(const Vectorized<Half>& other) const;
1358
+ Vectorized<Half> ne(const Vectorized<Half>& other) const;
1359
+ Vectorized<Half> gt(const Vectorized<Half>& other) const;
1360
+ Vectorized<Half> ge(const Vectorized<Half>& other) const;
1361
+ Vectorized<Half> lt(const Vectorized<Half>& other) const;
1362
+ Vectorized<Half> le(const Vectorized<Half>& other) const;
1363
+ };
1364
+
1365
+ Vectorized<Half> inline operator+(const Vectorized<Half>& a, const Vectorized<Half>& b) {
1366
+ return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_add_ps(x, y); });
1367
+ }
1368
+ Vectorized<Half> inline operator-(const Vectorized<Half>& a, const Vectorized<Half>& b) {
1369
+ return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_sub_ps(x, y); });
1370
+ }
1371
+ Vectorized<Half> inline operator*(const Vectorized<Half>& a, const Vectorized<Half>& b) {
1372
+ return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_mul_ps(x, y); });
1373
+ }
1374
+ Vectorized<Half> inline operator/(const Vectorized<Half>& a, const Vectorized<Half>& b) {
1375
+ return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_div_ps(x, y); });
1376
+ }
1377
+
1378
+ Vectorized<Half> inline operator&(const Vectorized<Half>& a, const Vectorized<Half>& b) {
1379
+ return _mm512_and_si512(a, b);
1380
+ }
1381
+ Vectorized<Half> inline operator|(const Vectorized<Half>& a, const Vectorized<Half>& b) {
1382
+ return _mm512_or_si512(a, b);
1383
+ }
1384
+ Vectorized<Half> inline operator^(const Vectorized<Half>& a, const Vectorized<Half>& b) {
1385
+ return _mm512_xor_si512(a, b);
1386
+ }
1387
+
1388
+ inline Vectorized<Half> Vectorized<Half>::eq(const Vectorized<Half>& other) const {
1389
+ return (*this == other) & Vectorized<Half>(1.0f);
1390
+ }
1391
+
1392
+ inline Vectorized<Half> Vectorized<Half>::ne(const Vectorized<Half>& other) const {
1393
+ return (*this != other) & Vectorized<Half>(1.0f);
1394
+ }
1395
+
1396
+ inline Vectorized<Half> Vectorized<Half>::gt(const Vectorized<Half>& other) const {
1397
+ return (*this > other) & Vectorized<Half>(1.0f);
1398
+ }
1399
+
1400
+ inline Vectorized<Half> Vectorized<Half>::ge(const Vectorized<Half>& other) const {
1401
+ return (*this >= other) & Vectorized<Half>(1.0f);
1402
+ }
1403
+
1404
+ inline Vectorized<Half> Vectorized<Half>::lt(const Vectorized<Half>& other) const {
1405
+ return (*this < other) & Vectorized<Half>(1.0f);
1406
+ }
1407
+
1408
+ inline Vectorized<Half> Vectorized<Half>::le(const Vectorized<Half>& other) const {
1409
+ return (*this <= other) & Vectorized<Half>(1.0f);
1410
+ }
1411
+
1412
+ // frac. Implement this here so we can use subtraction
1413
+ inline Vectorized<Half> Vectorized<Half>::frac() const {
1414
+ return *this - this->trunc();
1415
+ }
1416
+
1417
+ // Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
1418
+ // either input is a NaN.
1419
+ template <>
1420
+ Vectorized<Half> inline maximum(const Vectorized<Half>& a, const Vectorized<Half>& b) {
1421
+ __m512 a_lo, a_hi;
1422
+ __m512 b_lo, b_hi;
1423
+ cvtfp16_fp32(__m512i(a), a_lo, a_hi);
1424
+ cvtfp16_fp32(__m512i(b), b_lo, b_hi);
1425
+ auto max_lo = _mm512_max_ps(a_lo, b_lo);
1426
+ auto max_hi = _mm512_max_ps(a_hi, b_hi);
1427
+ auto nan_lo_mask = _mm512_cmp_ps_mask(a_lo, b_lo, _CMP_UNORD_Q);
1428
+ auto nan_hi_mask = _mm512_cmp_ps_mask(a_hi, b_hi, _CMP_UNORD_Q);
1429
+ auto nan_lo = _mm512_castsi512_ps(_mm512_set1_epi32(nan_lo_mask));
1430
+ auto nan_hi = _mm512_castsi512_ps(_mm512_set1_epi32(nan_hi_mask));
1431
+ // Exploit the fact that all-ones is a NaN.
1432
+ auto o1 = _mm512_or_ps(max_lo, nan_lo);
1433
+ auto o2 = _mm512_or_ps(max_hi, nan_hi);
1434
+ return cvtfp32_fp16(o1, o2);
1435
+ }
1436
+
1437
+ // Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
1438
+ // either input is a NaN.
1439
+ template <>
1440
+ Vectorized<Half> inline minimum(const Vectorized<Half>& a, const Vectorized<Half>& b) {
1441
+ __m512 a_lo, a_hi;
1442
+ __m512 b_lo, b_hi;
1443
+ __m512i zero_vec = _mm512_set1_epi32(0);
1444
+ cvtfp16_fp32(__m512i(a), a_lo, a_hi);
1445
+ cvtfp16_fp32(__m512i(b), b_lo, b_hi);
1446
+ auto min_lo = _mm512_min_ps(a_lo, b_lo);
1447
+ auto min_hi = _mm512_min_ps(a_hi, b_hi);
1448
+ auto nan_lo_mask = _mm512_cmp_ps_mask(a_lo, b_lo, _CMP_UNORD_Q);
1449
+ auto nan_hi_mask = _mm512_cmp_ps_mask(a_hi, b_hi, _CMP_UNORD_Q);
1450
+ auto nan_lo = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, nan_lo_mask,
1451
+ 0xFFFFFFFF));
1452
+ auto nan_hi = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, nan_hi_mask,
1453
+ 0xFFFFFFFF));
1454
+ // Exploit the fact that all-ones is a NaN.
1455
+ auto o1 = _mm512_or_ps(min_lo, nan_lo);
1456
+ auto o2 = _mm512_or_ps(min_hi, nan_hi);
1457
+ return cvtfp32_fp16(o1, o2);
1458
+ }
1459
+
1460
+ template <>
1461
+ Vectorized<Half> inline clamp(const Vectorized<Half>& a,
1462
+ const Vectorized<Half>& min, const Vectorized<Half>& max) {
1463
+ __m512 a_lo, a_hi;
1464
+ __m512 min_lo, min_hi;
1465
+ __m512 max_lo, max_hi;
1466
+ cvtfp16_fp32(__m512i(a), a_lo, a_hi);
1467
+ cvtfp16_fp32(__m512i(min), min_lo, min_hi);
1468
+ cvtfp16_fp32(__m512i(max), max_lo, max_hi);
1469
+ auto o1 = _mm512_min_ps(max_lo, _mm512_max_ps(min_lo, a_lo));
1470
+ auto o2 = _mm512_min_ps(max_hi, _mm512_max_ps(min_hi, a_hi));
1471
+ return cvtfp32_fp16(o1, o2);
1472
+ }
1473
+
1474
+ template <>
1475
+ Vectorized<Half> inline clamp_max(const Vectorized<Half>& a, const Vectorized<Half>& max) {
1476
+ __m512 a_lo, a_hi;
1477
+ __m512 max_lo, max_hi;
1478
+ cvtfp16_fp32(__m512i(a), a_lo, a_hi);
1479
+ cvtfp16_fp32(__m512i(max), max_lo, max_hi);
1480
+ auto o1 = _mm512_min_ps(max_lo, a_lo);
1481
+ auto o2 = _mm512_min_ps(max_hi, a_hi);
1482
+ return cvtfp32_fp16(o1, o2);
1483
+ }
1484
+
1485
+ template <>
1486
+ Vectorized<Half> inline clamp_min(const Vectorized<Half>& a, const Vectorized<Half>& min) {
1487
+ __m512 a_lo, a_hi;
1488
+ __m512 min_lo, min_hi;
1489
+ cvtfp16_fp32(__m512i(a), a_lo, a_hi);
1490
+ cvtfp16_fp32(__m512i(min), min_lo, min_hi);
1491
+ auto o1 = _mm512_max_ps(min_lo, a_lo);
1492
+ auto o2 = _mm512_max_ps(min_hi, a_hi);
1493
+ return cvtfp32_fp16(o1, o2);
1494
+ }
1495
+
1496
+ template <>
1497
+ inline void convert(const Half* src, Half* dst, int64_t n) {
1498
+ int64_t i;
1499
+ #pragma unroll
1500
+ for (i = 0; i <= (n - Vectorized<Half>::size()); i += Vectorized<Half>::size()) {
1501
+ auto vsrc = _mm512_loadu_si512(reinterpret_cast<__m512i*>((void*)(src + i)));
1502
+ _mm512_storeu_si512(reinterpret_cast<__m512i*>((void*)(dst + i)), vsrc);
1503
+ }
1504
+ #pragma unroll
1505
+ for (; i < n; i++) {
1506
+ dst[i] = src[i];
1507
+ }
1508
+ }
1509
+
1510
+ template <>
1511
+ inline void convert(const float* src, Half* dst, int64_t n) {
1512
+ int64_t i;
1513
+ for (i = 0; i + Vectorized<Half>::size() <= n; i += Vectorized<Half>::size()) {
1514
+ __m512 a = _mm512_loadu_ps(&src[i]);
1515
+ __m512 b = _mm512_loadu_ps(&src[i + 16]);
1516
+
1517
+ __m512i bf = cvtfp32_fp16(a, b);
1518
+ _mm512_storeu_si512(reinterpret_cast<__m512i*>(&dst[i]), bf);
1519
+ }
1520
+ for (; i < n; i++) {
1521
+ dst[i] = c10::convert<Half>(src[i]);
1522
+ }
1523
+ }
1524
+
1525
+ template <>
1526
+ inline void convert(const double* src, Half* dst, int64_t n) {
1527
+ auto load_float = [](const double *src) -> __m512 {
1528
+ // Load one float vector from an array of doubles
1529
+ __m256 a = _mm512_cvtpd_ps(_mm512_loadu_pd(src));
1530
+ __m256 b = _mm512_cvtpd_ps(_mm512_loadu_pd(src + 8));
1531
+ return _mm512_insertf32x8(_mm512_castps256_ps512(a), b, 1);
1532
+ };
1533
+
1534
+ int64_t i;
1535
+ for (i = 0; i + Vectorized<Half>::size() <= n; i += Vectorized<Half>::size()) {
1536
+ __m512 a = load_float(&src[i]);
1537
+ __m512 b = load_float(&src[i + 16]);
1538
+
1539
+ __m512i bf = cvtfp32_fp16(a, b);
1540
+ _mm512_storeu_si512(reinterpret_cast<__m512i*>(&dst[i]), bf);
1541
+ }
1542
+ for (; i < n; i++) {
1543
+ dst[i] = c10::convert<Half>(src[i]);
1544
+ }
1545
+ }
1546
+
1547
+ template <>
1548
+ Vectorized<Half> inline fmadd(const Vectorized<Half>& a,
1549
+ const Vectorized<Half>& b, const Vectorized<Half>& c) {
1550
+ __m512 a_lo, a_hi;
1551
+ __m512 b_lo, b_hi;
1552
+ __m512 c_lo, c_hi;
1553
+ cvtfp16_fp32(__m512i(a), a_lo, a_hi);
1554
+ cvtfp16_fp32(__m512i(b), b_lo, b_hi);
1555
+ cvtfp16_fp32(__m512i(c), c_lo, c_hi);
1556
+ auto o1 = _mm512_fmadd_ps(a_lo, b_lo, c_lo);
1557
+ auto o2 = _mm512_fmadd_ps(a_hi, b_hi, c_hi);
1558
+ return cvtfp32_fp16(o1, o2);
1559
+ }
1560
+
1561
+ #define CONVERT_VECTORIZED_INIT(type, name) \
1562
+ inline std::tuple<Vectorized<float>, Vectorized<float>> convert_##name##_float(const Vectorized<type>& a) { \
1563
+ __m512 o1, o2; \
1564
+ cvt_to_fp32<type>(__m512i(a), o1, o2); \
1565
+ return std::make_tuple(o1, o2); \
1566
+ } \
1567
+ \
1568
+ inline Vectorized<type> convert_float_##name(const Vectorized<float>& a, const Vectorized<float>& b) { \
1569
+ return cvt_from_fp32<type>(__m512(a), __m512(b)); \
1570
+ }
1571
+ CONVERT_VECTORIZED_INIT(BFloat16, bfloat16);
1572
+ CONVERT_VECTORIZED_INIT(Half, half);
1573
+
1574
+ #else //defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER)
1575
+
1576
+ #define CONVERT_NON_VECTORIZED_INIT(type, name) \
1577
+ inline std::tuple<Vectorized<float>, Vectorized<float>> convert_##name##_float(const Vectorized<type>& a) { \
1578
+ constexpr int64_t K = Vectorized<type>::size(); \
1579
+ __at_align__ float arr[K]; \
1580
+ __at_align__ type arr2[K]; \
1581
+ a.store(arr2); \
1582
+ for (const auto k : c10::irange(K)) { \
1583
+ arr[k] = c10::convert<float>(arr2[k]); \
1584
+ } \
1585
+ return std::make_tuple( \
1586
+ Vectorized<float>::loadu(arr), \
1587
+ Vectorized<float>::loadu(arr + Vectorized<float>::size())); \
1588
+ } \
1589
+ \
1590
+ inline Vectorized<type> convert_float_##name(const Vectorized<float>& a, const Vectorized<float>& b) { \
1591
+ constexpr int64_t K = Vectorized<type>::size(); \
1592
+ __at_align__ float arr[K]; \
1593
+ __at_align__ type arr2[K]; \
1594
+ a.store(arr); \
1595
+ b.store(arr + Vectorized<float>::size()); \
1596
+ for (const auto k : c10::irange(K)) { \
1597
+ arr2[k] = c10::convert<type>(arr[k]); \
1598
+ } \
1599
+ return Vectorized<type>::loadu(arr2); \
1600
+ }
1601
+ CONVERT_NON_VECTORIZED_INIT(BFloat16, bfloat16);
1602
+ CONVERT_NON_VECTORIZED_INIT(Half, half);
1603
+
1604
+ #endif // defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER)
1605
+
1606
+ #if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER)
1607
+ #define LOAD_FP32_VECTORIZED_INIT(type, name) \
1608
+ inline void load_fp32_from_##name(const type *data, Vectorized<float>& out) { \
1609
+ auto values = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(data)); \
1610
+ __m512 out_values; \
1611
+ cvt_to_fp32<type>(values, out_values); \
1612
+ out = out_values; \
1613
+ } \
1614
+ \
1615
+ inline void load_fp32_from_##name(const type *data, Vectorized<float>& out1, Vectorized<float>& out2) { \
1616
+ auto vec = Vectorized<type>::loadu(data); \
1617
+ __m512 out1_values, out2_values; \
1618
+ cvt_to_fp32<type>(vec, out1_values, out2_values); \
1619
+ out1 = out1_values; \
1620
+ out2 = out2_values; \
1621
+ }
1622
+ LOAD_FP32_VECTORIZED_INIT(BFloat16, bf16);
1623
+ LOAD_FP32_VECTORIZED_INIT(Half, fp16);
1624
+
1625
+ #else // defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER)
1626
+ #define LOAD_FP32_NON_VECTORIZED_INIT(type, name) \
1627
+ inline void load_fp32_from_##name(const type *data, Vectorized<float>& out) { \
1628
+ __at_align__ float values[Vectorized<float>::size()]; \
1629
+ for (const auto k : c10::irange(Vectorized<float>::size())) { \
1630
+ values[k] = data[k]; \
1631
+ } \
1632
+ out = Vectorized<float>::loadu(values); \
1633
+ } \
1634
+ \
1635
+ inline void load_fp32_from_##name(const type *data, Vectorized<float>& out1, Vectorized<float>& out2) { \
1636
+ load_fp32_from_##name(data, out1); \
1637
+ data += Vectorized<float>::size(); \
1638
+ load_fp32_from_##name(data, out2); \
1639
+ }
1640
+ LOAD_FP32_NON_VECTORIZED_INIT(BFloat16, bf16);
1641
+ LOAD_FP32_NON_VECTORIZED_INIT(Half, fp16);
1642
+
1643
+ #endif
1644
+ }}}
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_complex_double.h ADDED
@@ -0,0 +1,512 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <c10/util/complex.h>
7
+ #include <c10/util/irange.h>
8
+ #include <ATen/cpu/vec/intrinsics.h>
9
+ #include <ATen/cpu/vec/vec_base.h>
10
+ #if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER)
11
+ #include <sleef.h>
12
+ #endif
13
+
14
+ namespace at {
15
+ namespace vec {
16
+ // See Note [CPU_CAPABILITY namespace]
17
+ inline namespace CPU_CAPABILITY {
18
+
19
+ #if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER)
20
+
21
+ template <> class Vectorized<c10::complex<double>> {
22
+ private:
23
+ __m512d values;
24
+ static constexpr __m512i zero_vector {0, 0, 0, 0, 0, 0, 0, 0};
25
+ public:
26
+ using value_type = c10::complex<double>;
27
+ using size_type = int;
28
+ static constexpr size_type size() {
29
+ return 4;
30
+ }
31
+ Vectorized() {}
32
+ Vectorized(__m512d v) : values(v) {}
33
+ Vectorized(c10::complex<double> val) {
34
+ double real_value = val.real();
35
+ double imag_value = val.imag();
36
+ values = _mm512_setr_pd(real_value, imag_value, real_value, imag_value,
37
+ real_value, imag_value, real_value, imag_value);
38
+ }
39
+ Vectorized(c10::complex<double> val1, c10::complex<double> val2,
40
+ c10::complex<double> val3, c10::complex<double> val4) {
41
+ values = _mm512_setr_pd(val1.real(), val1.imag(),
42
+ val2.real(), val2.imag(),
43
+ val3.real(), val3.imag(),
44
+ val4.real(), val4.imag());
45
+ }
46
+ operator __m512d() const {
47
+ return values;
48
+ }
49
+ template <int64_t mask>
50
+ static Vectorized<c10::complex<double>> blend(const Vectorized<c10::complex<double>>& a,
51
+ const Vectorized<c10::complex<double>>& b) {
52
+ // convert c10::complex<V> index mask to V index mask: xy -> xxyy
53
+ // NOLINTNEXTLINE(clang-diagnostic-warning)
54
+ switch (mask) {
55
+ case 0:
56
+ return a;
57
+ case 1:
58
+ return _mm512_mask_blend_pd(0x03, a.values, b.values); //b0000 0001 = b0000 0011
59
+ case 2:
60
+ return _mm512_mask_blend_pd(0x0C, a.values, b.values); //b0000 0010 = b0000 1100
61
+ case 3:
62
+ return _mm512_mask_blend_pd(0x0F, a.values, b.values); //b0000 0011 = b0000 1111
63
+ case 4:
64
+ return _mm512_mask_blend_pd(0x30, a.values, b.values); //b0000 0100 = b0011 0000
65
+ case 5:
66
+ return _mm512_mask_blend_pd(0x33, a.values, b.values); //b0000 0101 = b0011 0011
67
+ case 6:
68
+ return _mm512_mask_blend_pd(0x3C, a.values, b.values); //b0000 0110 = b0011 1100
69
+ case 7:
70
+ return _mm512_mask_blend_pd(0x3F, a.values, b.values); //b0000 0111 = b0011 1111
71
+ case 8:
72
+ return _mm512_mask_blend_pd(0xC0, a.values, b.values); //b0000 1000 = b1100 0000
73
+ case 9:
74
+ return _mm512_mask_blend_pd(0xC3, a.values, b.values); //b0000 1001 = b1100 0011
75
+ case 10:
76
+ return _mm512_mask_blend_pd(0xCC, a.values, b.values); //b0000 1010 = b1100 1100
77
+ case 11:
78
+ return _mm512_mask_blend_pd(0xCF, a.values, b.values); //b0000 1011 = b1100 1111
79
+ case 12:
80
+ return _mm512_mask_blend_pd(0xF0, a.values, b.values); //b0000 1100 = b1111 0000
81
+ case 13:
82
+ return _mm512_mask_blend_pd(0xF3, a.values, b.values); //b0000 1101 = b1111 0011
83
+ case 14:
84
+ return _mm512_mask_blend_pd(0xFC, a.values, b.values); //b0000 1110 = b1111 1100
85
+ case 15:
86
+ return _mm512_mask_blend_pd(0xFF, a.values, b.values); //b0000 1111 = b1111 1111
87
+ }
88
+ return b;
89
+ }
90
+ static Vectorized<c10::complex<double>> blendv(const Vectorized<c10::complex<double>>& a,
91
+ const Vectorized<c10::complex<double>>& b,
92
+ const Vectorized<c10::complex<double>>& mask) {
93
+ // convert c10::complex<V> index mask to V index mask: xy -> xxyy
94
+ auto mask_ = _mm512_unpacklo_pd(mask.values, mask.values);
95
+ auto all_ones = _mm512_set1_epi64(0xFFFFFFFFFFFFFFFF);
96
+ auto mmask = _mm512_cmp_epi64_mask(_mm512_castpd_si512(mask_), all_ones, _MM_CMPINT_EQ);
97
+ return _mm512_mask_blend_pd(mmask, a.values, b.values);
98
+ }
99
+ template<typename step_t>
100
+ static Vectorized<c10::complex<double>> arange(c10::complex<double> base = 0.,
101
+ step_t step = static_cast<step_t>(1)) {
102
+ return Vectorized<c10::complex<double>>(base,
103
+ base + c10::complex<double>(1)*step,
104
+ base + c10::complex<double>(2)*step,
105
+ base + c10::complex<double>(3)*step);
106
+ }
107
+ static Vectorized<c10::complex<double>> set(const Vectorized<c10::complex<double>>& a,
108
+ const Vectorized<c10::complex<double>>& b,
109
+ int64_t count = size()) {
110
+ switch (count) {
111
+ case 0:
112
+ return a;
113
+ case 1:
114
+ return blend<1>(a, b);
115
+ case 2:
116
+ return blend<3>(a, b);
117
+ case 3:
118
+ return blend<7>(a, b);
119
+ }
120
+ return b;
121
+ }
122
+ static Vectorized<c10::complex<double>> loadu(const void* ptr, int64_t count = size()) {
123
+ if (count == size())
124
+ return _mm512_loadu_pd(reinterpret_cast<const double*>(ptr));
125
+
126
+ __at_align__ double tmp_values[2*size()];
127
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
128
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
129
+ // instructions while a loop would be compiled to one instruction.
130
+ for (const auto i : c10::irange(2*size())) {
131
+ tmp_values[i] = 0.0;
132
+ }
133
+ std::memcpy(
134
+ tmp_values,
135
+ reinterpret_cast<const double*>(ptr),
136
+ count * sizeof(c10::complex<double>));
137
+ return _mm512_load_pd(tmp_values);
138
+ }
139
+ void store(void* ptr, int count = size()) const {
140
+ if (count == size()) {
141
+ _mm512_storeu_pd(reinterpret_cast<double*>(ptr), values);
142
+ } else if (count > 0) {
143
+ double tmp_values[2*size()];
144
+ _mm512_storeu_pd(reinterpret_cast<double*>(tmp_values), values);
145
+ std::memcpy(ptr, tmp_values, count * sizeof(c10::complex<double>));
146
+ }
147
+ }
148
+ const c10::complex<double>& operator[](int idx) const = delete;
149
+ c10::complex<double>& operator[](int idx) = delete;
150
+ Vectorized<c10::complex<double>> map(c10::complex<double> (*const f)(const c10::complex<double> &)) const {
151
+ __at_align__ c10::complex<double> tmp[size()];
152
+ store(tmp);
153
+ for (const auto i : c10::irange(size())) {
154
+ tmp[i] = f(tmp[i]);
155
+ }
156
+ return loadu(tmp);
157
+ }
158
+ // AVX512 doesn't have horizontal add & horizontal sub instructions.
159
+ // TODO: hadd_pd() & hsub_pd() may have scope for improvement.
160
+ static inline __m512d hadd_pd(__m512d a, __m512d b) {
161
+ __m512i idx1 = _mm512_set_epi64(14, 6, 12, 4, 10, 2, 8, 0);
162
+ __m512i idx2 = _mm512_set_epi64(15, 7, 13, 5, 11, 3, 9, 1);
163
+ return _mm512_add_pd(_mm512_mask_permutex2var_pd(a, 0xff, idx1, b),
164
+ _mm512_mask_permutex2var_pd(a, 0xff, idx2, b));
165
+ }
166
+ static inline __m512d hsub_pd(__m512d a, __m512d b) {
167
+ __m512i idx1 = _mm512_set_epi64(14, 6, 12, 4, 10, 2, 8, 0);
168
+ __m512i idx2 = _mm512_set_epi64(15, 7, 13, 5, 11, 3, 9, 1);
169
+ return _mm512_sub_pd(_mm512_mask_permutex2var_pd(a, 0xff, idx1, b),
170
+ _mm512_mask_permutex2var_pd(a, 0xff, idx2, b));
171
+ }
172
+ __m512d abs_2_() const {
173
+ auto val_2 = _mm512_mul_pd(values, values); // a*a b*b
174
+ return hadd_pd(val_2, val_2); // a*a+b*b a*a+b*b
175
+ }
176
+ __m512d abs_() const {
177
+ auto real = _mm512_movedup_pd(values); // real real
178
+ // movehdup_pd does not exist...
179
+ auto imag = _mm512_permute_pd(values, 0xff); // imag imag
180
+ return Sleef_hypotd8_u05(real, imag); // abs abs
181
+ }
182
+ Vectorized<c10::complex<double>> abs() const {
183
+ const __m512d real_mask = _mm512_castsi512_pd(_mm512_setr_epi64(0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
184
+ 0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
185
+ 0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
186
+ 0xFFFFFFFFFFFFFFFF, 0x0000000000000000));
187
+ return _mm512_and_pd(abs_(), real_mask); // abs 0
188
+ }
189
+ __m512d angle_() const {
190
+ //angle = atan2(b/a)
191
+ auto b_a = _mm512_permute_pd(values, 0x55); // b a
192
+ return Sleef_atan2d8_u10(values, b_a); // 90-angle angle
193
+ }
194
+ Vectorized<c10::complex<double>> angle() const {
195
+ const __m512d real_mask = _mm512_castsi512_pd(_mm512_setr_epi64(0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
196
+ 0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
197
+ 0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
198
+ 0xFFFFFFFFFFFFFFFF, 0x0000000000000000));
199
+ auto angle = _mm512_permute_pd(angle_(), 0x55); // angle 90-angle
200
+ return _mm512_and_pd(angle, real_mask); // angle 0
201
+ }
202
+ Vectorized<c10::complex<double>> sgn() const {
203
+ auto abs = abs_();
204
+ auto zero = _mm512_setzero_pd();
205
+ auto mask = _mm512_cmp_pd_mask(abs, zero, _CMP_EQ_OQ);
206
+ auto div = values / abs;
207
+ return _mm512_mask_blend_pd(mask, div, zero);
208
+ }
209
+ __m512d real_() const {
210
+ const __m512d real_mask = _mm512_castsi512_pd(_mm512_setr_epi64(0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
211
+ 0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
212
+ 0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
213
+ 0xFFFFFFFFFFFFFFFF, 0x0000000000000000));
214
+ return _mm512_and_pd(values, real_mask);
215
+ }
216
+ Vectorized<c10::complex<double>> real() const {
217
+ return real_();
218
+ }
219
+ __m512d imag_() const {
220
+ const __m512d imag_mask = _mm512_castsi512_pd(_mm512_setr_epi64(0x0000000000000000, 0xFFFFFFFFFFFFFFFF,
221
+ 0x0000000000000000, 0xFFFFFFFFFFFFFFFF,
222
+ 0x0000000000000000, 0xFFFFFFFFFFFFFFFF,
223
+ 0x0000000000000000, 0xFFFFFFFFFFFFFFFF));
224
+ return _mm512_and_pd(values, imag_mask);
225
+ }
226
+ Vectorized<c10::complex<double>> imag() const {
227
+ return _mm512_permute_pd(imag_(), 0x55); //b a
228
+ }
229
+ __m512d conj_() const {
230
+ const __m512d sign_mask = _mm512_setr_pd(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0);
231
+ return _mm512_xor_pd(values, sign_mask); // a -b
232
+ }
233
+ Vectorized<c10::complex<double>> conj() const {
234
+ return conj_();
235
+ }
236
+ Vectorized<c10::complex<double>> log() const {
237
+ // Most trigonomic ops use the log() op to improve complex number performance.
238
+ return map(std::log);
239
+ }
240
+ Vectorized<c10::complex<double>> log2() const {
241
+ const __m512d log2_ = _mm512_set1_pd(std::log(2));
242
+ return _mm512_div_pd(log(), log2_);
243
+ }
244
+ Vectorized<c10::complex<double>> log10() const {
245
+ const __m512d log10_ = _mm512_set1_pd(std::log(10));
246
+ return _mm512_div_pd(log(), log10_);
247
+ }
248
+ Vectorized<c10::complex<double>> log1p() const {
249
+ return map(std::log1p);
250
+ }
251
+ Vectorized<c10::complex<double>> asin() const {
252
+ // asin(x)
253
+ // = -i*ln(iz + sqrt(1 -z^2))
254
+ // = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi)))
255
+ // = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi))
256
+ const __m512d one = _mm512_set1_pd(1);
257
+
258
+ auto conj = conj_();
259
+ auto b_a = _mm512_permute_pd(conj, 0x55); //-b a
260
+ auto ab = _mm512_mul_pd(conj, b_a); //-ab -ab
261
+ auto im = _mm512_add_pd(ab, ab); //-2ab -2ab
262
+
263
+ auto val_2 = _mm512_mul_pd(values, values); // a*a b*b
264
+ auto re = hsub_pd(val_2, _mm512_permute_pd(val_2, 0x55)); // a*a-b*b b*b-a*a
265
+ re = _mm512_sub_pd(one, re);
266
+
267
+ auto root = Vectorized(_mm512_mask_blend_pd(0xAA, re, im)).sqrt(); //sqrt(re + i*im)
268
+ auto ln = Vectorized(_mm512_add_pd(b_a, root)).log(); //ln(iz + sqrt())
269
+ return Vectorized(_mm512_permute_pd(ln.values, 0x55)).conj(); //-i*ln()
270
+ }
271
+ Vectorized<c10::complex<double>> acos() const {
272
+ // acos(x) = pi/2 - asin(x)
273
+ constexpr auto pi_2d = c10::pi<double> / 2;
274
+ const __m512d pi_2 = _mm512_setr_pd(pi_2d, 0.0, pi_2d, 0.0, pi_2d, 0.0, pi_2d, 0.0);
275
+ return _mm512_sub_pd(pi_2, asin());
276
+ }
277
+ Vectorized<c10::complex<double>> atan() const;
278
+ Vectorized<c10::complex<double>> atanh() const {
279
+ return map(std::atanh);
280
+ }
281
+ Vectorized<c10::complex<double>> exp() const {
282
+ //exp(a + bi)
283
+ // = exp(a)*(cos(b) + sin(b)i)
284
+ auto exp = Sleef_expd8_u10(values); //exp(a) exp(b)
285
+ exp = _mm512_mask_blend_pd(0xAA, exp, _mm512_permute_pd(exp, 0x55)); //exp(a) exp(a)
286
+
287
+ auto sin_cos = Sleef_sincosd8_u10(values); //[sin(a), cos(a)] [sin(b), cos(b)]
288
+ auto cos_sin = _mm512_mask_blend_pd(0xAA, _mm512_permute_pd(sin_cos.y, 0x55),
289
+ sin_cos.x); //cos(b) sin(b)
290
+ return _mm512_mul_pd(exp, cos_sin);
291
+ }
292
+ Vectorized<c10::complex<double>> exp2() const {
293
+ // Use identity 2**x = exp(log(2) * x)
294
+ const __m512d ln_2 = _mm512_set1_pd(c10::ln_2<double>);
295
+ Vectorized<c10::complex<double>> scaled_values = _mm512_mul_pd(values, ln_2);
296
+ return scaled_values.exp();
297
+ }
298
+ Vectorized<c10::complex<double>> expm1() const {
299
+ return map(std::expm1);
300
+ }
301
+ Vectorized<c10::complex<double>> sin() const {
302
+ return map(std::sin);
303
+ }
304
+ Vectorized<c10::complex<double>> sinh() const {
305
+ return map(std::sinh);
306
+ }
307
+ Vectorized<c10::complex<double>> cos() const {
308
+ return map(std::cos);
309
+ }
310
+ Vectorized<c10::complex<double>> cosh() const {
311
+ return map(std::cosh);
312
+ }
313
+ Vectorized<c10::complex<double>> ceil() const {
314
+ return _mm512_ceil_pd(values);
315
+ }
316
+ Vectorized<c10::complex<double>> floor() const {
317
+ return _mm512_floor_pd(values);
318
+ }
319
+ Vectorized<c10::complex<double>> neg() const {
320
+ auto zero = _mm512_setzero_pd();
321
+ return _mm512_sub_pd(zero, values);
322
+ }
323
+ Vectorized<c10::complex<double>> round() const {
324
+ return _mm512_roundscale_pd(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
325
+ }
326
+ Vectorized<c10::complex<double>> tan() const {
327
+ return map(std::tan);
328
+ }
329
+ Vectorized<c10::complex<double>> tanh() const {
330
+ return map(std::tanh);
331
+ }
332
+ Vectorized<c10::complex<double>> trunc() const {
333
+ return _mm512_roundscale_pd(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
334
+ }
335
+ Vectorized<c10::complex<double>> sqrt() const {
336
+ return map(std::sqrt);
337
+ }
338
+ Vectorized<c10::complex<double>> reciprocal() const;
339
+ Vectorized<c10::complex<double>> rsqrt() const {
340
+ return sqrt().reciprocal();
341
+ }
342
+ Vectorized<c10::complex<double>> pow(const Vectorized<c10::complex<double>> &exp) const {
343
+ __at_align__ c10::complex<double> x_tmp[size()];
344
+ __at_align__ c10::complex<double> y_tmp[size()];
345
+ store(x_tmp);
346
+ exp.store(y_tmp);
347
+ for (const auto i : c10::irange(size())) {
348
+ x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]);
349
+ }
350
+ return loadu(x_tmp);
351
+ }
352
+ // Comparison using the _CMP_**_OQ predicate.
353
+ // `O`: get false if an operand is NaN
354
+ // `Q`: do not raise if an operand is NaN
355
+ Vectorized<c10::complex<double>> operator==(const Vectorized<c10::complex<double>>& other) const {
356
+ auto mask = _mm512_cmp_pd_mask(values, other.values, _CMP_EQ_OQ);
357
+ return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, mask,
358
+ 0xFFFFFFFFFFFFFFFF));
359
+ }
360
+ Vectorized<c10::complex<double>> operator!=(const Vectorized<c10::complex<double>>& other) const {
361
+ auto mask = _mm512_cmp_pd_mask(values, other.values, _CMP_NEQ_UQ);
362
+ return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, mask,
363
+ 0xFFFFFFFFFFFFFFFF));
364
+ }
365
+ Vectorized<c10::complex<double>> operator<(const Vectorized<c10::complex<double>>& other) const {
366
+ TORCH_CHECK(false, "not supported for complex numbers");
367
+ }
368
+ Vectorized<c10::complex<double>> operator<=(const Vectorized<c10::complex<double>>& other) const {
369
+ TORCH_CHECK(false, "not supported for complex numbers");
370
+ }
371
+ Vectorized<c10::complex<double>> operator>(const Vectorized<c10::complex<double>>& other) const {
372
+ TORCH_CHECK(false, "not supported for complex numbers");
373
+ }
374
+ Vectorized<c10::complex<double>> operator>=(const Vectorized<c10::complex<double>>& other) const {
375
+ TORCH_CHECK(false, "not supported for complex numbers");
376
+ }
377
+
378
+ Vectorized<c10::complex<double>> eq(const Vectorized<c10::complex<double>>& other) const;
379
+ Vectorized<c10::complex<double>> ne(const Vectorized<c10::complex<double>>& other) const;
380
+ };
381
+
382
+ template <> Vectorized<c10::complex<double>> inline operator+(const Vectorized<c10::complex<double>> &a,
383
+ const Vectorized<c10::complex<double>> &b) {
384
+ return _mm512_add_pd(a, b);
385
+ }
386
+
387
+ template <> Vectorized<c10::complex<double>> inline operator-(const Vectorized<c10::complex<double>> &a,
388
+ const Vectorized<c10::complex<double>> &b) {
389
+ return _mm512_sub_pd(a, b);
390
+ }
391
+
392
+ template <> Vectorized<c10::complex<double>> inline operator*(const Vectorized<c10::complex<double>> &a,
393
+ const Vectorized<c10::complex<double>> &b) {
394
+ //(a + bi) * (c + di) = (ac - bd) + (ad + bc)i
395
+ const __m512d sign_mask = _mm512_setr_pd(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0);
396
+ auto ac_bd = _mm512_mul_pd(a, b); //ac bd
397
+
398
+ auto d_c = _mm512_permute_pd(b, 0x55); //d c
399
+ d_c = _mm512_xor_pd(sign_mask, d_c); //d -c
400
+ auto ad_bc = _mm512_mul_pd(a, d_c); //ad -bc
401
+
402
+ auto ret = Vectorized<c10::complex<double>>::hsub_pd(ac_bd, ad_bc); //ac - bd ad + bc
403
+ return ret;
404
+ }
405
+
406
+ template <> Vectorized<c10::complex<double>> inline operator/(const Vectorized<c10::complex<double>> &a,
407
+ const Vectorized<c10::complex<double>> &b) {
408
+ //re + im*i = (a + bi) / (c + di)
409
+ auto mask = _mm512_set1_pd(-0.f);
410
+ auto fabs_cd = _mm512_andnot_pd(mask, b); // |c| |d|
411
+ auto fabs_dc = _mm512_permute_pd(fabs_cd, 0x55); // |d| |c|
412
+ auto scale = _mm512_rcp14_pd(_mm512_max_pd(fabs_cd, fabs_dc)); // 1/sc 1/sc
413
+ auto a2 = _mm512_mul_pd(a, scale); // a/sc b/sc
414
+ auto b2 = _mm512_mul_pd(b, scale); // c/sc d/sc
415
+ auto acbd2 = _mm512_mul_pd(a2, b2);
416
+
417
+ const __m512d sign_mask = _mm512_setr_pd(-0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0);
418
+ auto dc2 = _mm512_permute_pd(b2, 0x55); // d/sc c/sc
419
+ dc2 = _mm512_xor_pd(sign_mask, dc2); // -d/|c,d| c/sc
420
+ auto adbc2 = _mm512_mul_pd(a2, dc2); //-ad/sc^2 bc/sc^2
421
+ auto res2 = Vectorized<c10::complex<double>>::hadd_pd(acbd2, adbc2); //(ac+bd)/sc^2 (bc-ad)/sc^2
422
+
423
+ // get the denominator
424
+ auto denom2 = Vectorized<c10::complex<double>>(b2).abs_2_(); // (c^2+d^2)/sc^2 (c^2+d^2)/sc^2
425
+ res2 = _mm512_div_pd(res2, denom2);
426
+ return res2;
427
+ }
428
+
429
+ // reciprocal. Implement this here so we can use multiplication.
430
+ inline Vectorized<c10::complex<double>> Vectorized<c10::complex<double>>::reciprocal() const{
431
+ //re + im*i = (a + bi) / (c + di)
432
+ //re = (ac + bd)/abs_2() = c/abs_2()
433
+ //im = (bc - ad)/abs_2() = d/abs_2()
434
+ const __m512d sign_mask = _mm512_setr_pd(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0);
435
+ auto c_d = _mm512_xor_pd(sign_mask, values); //c -d
436
+ return _mm512_div_pd(c_d, abs_2_());
437
+ }
438
+
439
+ inline Vectorized<c10::complex<double>> Vectorized<c10::complex<double>>::atan() const {
440
+ // atan(x) = i/2 * ln((i + z)/(i - z))
441
+ const __m512d i = _mm512_setr_pd(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0);
442
+ const Vectorized i_half = _mm512_setr_pd(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5);
443
+
444
+ auto sum = Vectorized(_mm512_add_pd(i, values)); // a 1+b
445
+ auto sub = Vectorized(_mm512_sub_pd(i, values)); // -a 1-b
446
+ auto ln = (sum/sub).log(); // ln((i + z)/(i - z))
447
+ return i_half*ln; // i/2*ln()
448
+ }
449
+
450
+ template <>
451
+ Vectorized<c10::complex<double>> inline maximum(const Vectorized<c10::complex<double>>& a,
452
+ const Vectorized<c10::complex<double>>& b) {
453
+ auto zero_vec = _mm512_set1_epi64(0);
454
+ auto abs_a = a.abs_2_();
455
+ auto abs_b = b.abs_2_();
456
+ auto mask = _mm512_cmp_pd_mask(abs_a, abs_b, _CMP_LT_OQ);
457
+ auto max = _mm512_mask_blend_pd(mask, a, b);
458
+ // Exploit the fact that all-ones is a NaN.
459
+ auto isnan_mask = _mm512_cmp_pd_mask(abs_a, abs_b, _CMP_UNORD_Q);
460
+ auto isnan = _mm512_mask_set1_epi64(zero_vec, isnan_mask,
461
+ 0xFFFFFFFFFFFFFFFF);
462
+ return _mm512_or_pd(max, _mm512_castsi512_pd(isnan));
463
+ }
464
+
465
+ template <>
466
+ Vectorized<c10::complex<double>> inline minimum(const Vectorized<c10::complex<double>>& a,
467
+ const Vectorized<c10::complex<double>>& b) {
468
+ auto zero_vec = _mm512_set1_epi64(0);
469
+ auto abs_a = a.abs_2_();
470
+ auto abs_b = b.abs_2_();
471
+ auto mask = _mm512_cmp_pd_mask(abs_a, abs_b, _CMP_GT_OQ);
472
+ auto min = _mm512_mask_blend_pd(mask, a, b);
473
+ // Exploit the fact that all-ones is a NaN.
474
+ auto isnan_mask = _mm512_cmp_pd_mask(abs_a, abs_b, _CMP_UNORD_Q);
475
+ auto isnan = _mm512_mask_set1_epi64(zero_vec, isnan_mask,
476
+ 0xFFFFFFFFFFFFFFFF);
477
+ return _mm512_or_pd(min, _mm512_castsi512_pd(isnan));
478
+ }
479
+
480
+ template <>
481
+ Vectorized<c10::complex<double>> inline operator&(const Vectorized<c10::complex<double>>& a,
482
+ const Vectorized<c10::complex<double>>& b) {
483
+ return _mm512_and_pd(a, b);
484
+ }
485
+
486
+ template <>
487
+ Vectorized<c10::complex<double>> inline operator|(const Vectorized<c10::complex<double>>& a,
488
+ const Vectorized<c10::complex<double>>& b) {
489
+ return _mm512_or_pd(a, b);
490
+ }
491
+
492
+ template <>
493
+ Vectorized<c10::complex<double>> inline operator^(const Vectorized<c10::complex<double>>& a,
494
+ const Vectorized<c10::complex<double>>& b) {
495
+ return _mm512_xor_pd(a, b);
496
+ }
497
+
498
+ inline Vectorized<c10::complex<double>> Vectorized<c10::complex<double>>::eq(const Vectorized<c10::complex<double>>& other) const {
499
+ auto eq = (*this == other); // compares real and imag individually
500
+ // If both real numbers and imag numbers are equal, then the complex numbers are equal
501
+ return (eq.real() & eq.imag()) & Vectorized<c10::complex<double>>(_mm512_set1_pd(1.0));
502
+ }
503
+
504
+ inline Vectorized<c10::complex<double>> Vectorized<c10::complex<double>>::ne(const Vectorized<c10::complex<double>>& other) const {
505
+ auto ne = (*this != other); // compares real and imag individually
506
+ // If either real numbers or imag numbers are not equal, then the complex numbers are not equal
507
+ return (ne.real() | ne.imag()) & Vectorized<c10::complex<double>>(_mm512_set1_pd(1.0));
508
+ }
509
+
510
+ #endif
511
+
512
+ }}}
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_complex_float.h ADDED
@@ -0,0 +1,1018 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <c10/util/complex.h>
7
+ #include <c10/util/irange.h>
8
+ #include <ATen/cpu/vec/intrinsics.h>
9
+ #include <ATen/cpu/vec/vec_base.h>
10
+ #if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER)
11
+ #include <sleef.h>
12
+ #endif
13
+
14
+ namespace at {
15
+ namespace vec {
16
+ // See Note [CPU_CAPABILITY namespace]
17
+ inline namespace CPU_CAPABILITY {
18
+
19
+ #if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER)
20
+
21
+ template <> class Vectorized<c10::complex<float>> {
22
+ private:
23
+ __m512 values;
24
+ static constexpr __m512i zero_vector {0, 0, 0, 0, 0, 0, 0, 0};
25
+ public:
26
+ using value_type = c10::complex<float>;
27
+ using size_type = int;
28
+ static constexpr size_type size() {
29
+ return 8;
30
+ }
31
+ Vectorized() {}
32
+ Vectorized(__m512 v) : values(v) {}
33
+ Vectorized(c10::complex<float> val) {
34
+ float real_value = val.real();
35
+ float imag_value = val.imag();
36
+ values = _mm512_setr_ps(real_value, imag_value,
37
+ real_value, imag_value,
38
+ real_value, imag_value,
39
+ real_value, imag_value,
40
+ real_value, imag_value,
41
+ real_value, imag_value,
42
+ real_value, imag_value,
43
+ real_value, imag_value);
44
+ }
45
+ Vectorized(c10::complex<float> val1, c10::complex<float> val2,
46
+ c10::complex<float> val3, c10::complex<float> val4,
47
+ c10::complex<float> val5, c10::complex<float> val6,
48
+ c10::complex<float> val7, c10::complex<float> val8) {
49
+ values = _mm512_setr_ps(val1.real(), val1.imag(),
50
+ val2.real(), val2.imag(),
51
+ val3.real(), val3.imag(),
52
+ val4.real(), val4.imag(),
53
+ val5.real(), val5.imag(),
54
+ val6.real(), val6.imag(),
55
+ val7.real(), val7.imag(),
56
+ val8.real(), val8.imag());
57
+ }
58
+ operator __m512() const {
59
+ return values;
60
+ }
61
+ template <int64_t mask>
62
+ static Vectorized<c10::complex<float>> blend(const Vectorized<c10::complex<float>>& a,
63
+ const Vectorized<c10::complex<float>>& b) {
64
+ // convert c10::complex<V> index mask to V index mask: xy -> xxyy
65
+ static_assert(mask > -1 && mask < 256, "Unexpected mask value");
66
+ // The compiler would hopefully convert this switch condition
67
+ // into a jump table
68
+ switch (mask) {
69
+ case 0:
70
+ return a;
71
+ case 1:
72
+ return _mm512_mask_blend_ps(0x03, a.values, b.values);
73
+ case 2:
74
+ return _mm512_mask_blend_ps(0x0C, a.values, b.values);
75
+ case 3:
76
+ return _mm512_mask_blend_ps(0x0F, a.values, b.values);
77
+ case 4:
78
+ return _mm512_mask_blend_ps(0x30, a.values, b.values);
79
+ case 5:
80
+ return _mm512_mask_blend_ps(0x33, a.values, b.values);
81
+ case 6:
82
+ return _mm512_mask_blend_ps(0x3C, a.values, b.values);
83
+ case 7:
84
+ return _mm512_mask_blend_ps(0x3F, a.values, b.values);
85
+ case 8:
86
+ return _mm512_mask_blend_ps(0xC0, a.values, b.values);
87
+ case 9:
88
+ return _mm512_mask_blend_ps(0xC3, a.values, b.values);
89
+ case 10:
90
+ return _mm512_mask_blend_ps(0xCC, a.values, b.values);
91
+ case 11:
92
+ return _mm512_mask_blend_ps(0xCF, a.values, b.values);
93
+ case 12:
94
+ return _mm512_mask_blend_ps(0xF0, a.values, b.values);
95
+ case 13:
96
+ return _mm512_mask_blend_ps(0xF3, a.values, b.values);
97
+ case 14:
98
+ return _mm512_mask_blend_ps(0xFC, a.values, b.values);
99
+ case 15:
100
+ return _mm512_mask_blend_ps(0xFF, a.values, b.values);
101
+ case 16:
102
+ return _mm512_mask_blend_ps(0x300, a.values, b.values);
103
+ case 17:
104
+ return _mm512_mask_blend_ps(0x303, a.values, b.values);
105
+ case 18:
106
+ return _mm512_mask_blend_ps(0x30C, a.values, b.values);
107
+ case 19:
108
+ return _mm512_mask_blend_ps(0x30F, a.values, b.values);
109
+ case 20:
110
+ return _mm512_mask_blend_ps(0x330, a.values, b.values);
111
+ case 21:
112
+ return _mm512_mask_blend_ps(0x333, a.values, b.values);
113
+ case 22:
114
+ return _mm512_mask_blend_ps(0x33C, a.values, b.values);
115
+ case 23:
116
+ return _mm512_mask_blend_ps(0x33F, a.values, b.values);
117
+ case 24:
118
+ return _mm512_mask_blend_ps(0x3C0, a.values, b.values);
119
+ case 25:
120
+ return _mm512_mask_blend_ps(0x3C3, a.values, b.values);
121
+ case 26:
122
+ return _mm512_mask_blend_ps(0x3CC, a.values, b.values);
123
+ case 27:
124
+ return _mm512_mask_blend_ps(0x3CF, a.values, b.values);
125
+ case 28:
126
+ return _mm512_mask_blend_ps(0x3F0, a.values, b.values);
127
+ case 29:
128
+ return _mm512_mask_blend_ps(0x3F3, a.values, b.values);
129
+ case 30:
130
+ return _mm512_mask_blend_ps(0x3FC, a.values, b.values);
131
+ case 31:
132
+ return _mm512_mask_blend_ps(0x3FF, a.values, b.values);
133
+ case 32:
134
+ return _mm512_mask_blend_ps(0xC00, a.values, b.values);
135
+ case 33:
136
+ return _mm512_mask_blend_ps(0xC03, a.values, b.values);
137
+ case 34:
138
+ return _mm512_mask_blend_ps(0xC0C, a.values, b.values);
139
+ case 35:
140
+ return _mm512_mask_blend_ps(0xC0F, a.values, b.values);
141
+ case 36:
142
+ return _mm512_mask_blend_ps(0xC30, a.values, b.values);
143
+ case 37:
144
+ return _mm512_mask_blend_ps(0xC33, a.values, b.values);
145
+ case 38:
146
+ return _mm512_mask_blend_ps(0xC3C, a.values, b.values);
147
+ case 39:
148
+ return _mm512_mask_blend_ps(0xC3F, a.values, b.values);
149
+ case 40:
150
+ return _mm512_mask_blend_ps(0xCC0, a.values, b.values);
151
+ case 41:
152
+ return _mm512_mask_blend_ps(0xCC3, a.values, b.values);
153
+ case 42:
154
+ return _mm512_mask_blend_ps(0xCCC, a.values, b.values);
155
+ case 43:
156
+ return _mm512_mask_blend_ps(0xCCF, a.values, b.values);
157
+ case 44:
158
+ return _mm512_mask_blend_ps(0xCF0, a.values, b.values);
159
+ case 45:
160
+ return _mm512_mask_blend_ps(0xCF3, a.values, b.values);
161
+ case 46:
162
+ return _mm512_mask_blend_ps(0xCFC, a.values, b.values);
163
+ case 47:
164
+ return _mm512_mask_blend_ps(0xCFF, a.values, b.values);
165
+ case 48:
166
+ return _mm512_mask_blend_ps(0xF00, a.values, b.values);
167
+ case 49:
168
+ return _mm512_mask_blend_ps(0xF03, a.values, b.values);
169
+ case 50:
170
+ return _mm512_mask_blend_ps(0xF0C, a.values, b.values);
171
+ case 51:
172
+ return _mm512_mask_blend_ps(0xF0F, a.values, b.values);
173
+ case 52:
174
+ return _mm512_mask_blend_ps(0xF30, a.values, b.values);
175
+ case 53:
176
+ return _mm512_mask_blend_ps(0xF33, a.values, b.values);
177
+ case 54:
178
+ return _mm512_mask_blend_ps(0xF3C, a.values, b.values);
179
+ case 55:
180
+ return _mm512_mask_blend_ps(0xF3F, a.values, b.values);
181
+ case 56:
182
+ return _mm512_mask_blend_ps(0xFC0, a.values, b.values);
183
+ case 57:
184
+ return _mm512_mask_blend_ps(0xFC3, a.values, b.values);
185
+ case 58:
186
+ return _mm512_mask_blend_ps(0xFCC, a.values, b.values);
187
+ case 59:
188
+ return _mm512_mask_blend_ps(0xFCF, a.values, b.values);
189
+ case 60:
190
+ return _mm512_mask_blend_ps(0xFF0, a.values, b.values);
191
+ case 61:
192
+ return _mm512_mask_blend_ps(0xFF3, a.values, b.values);
193
+ case 62:
194
+ return _mm512_mask_blend_ps(0xFFC, a.values, b.values);
195
+ case 63:
196
+ return _mm512_mask_blend_ps(0xFFF, a.values, b.values);
197
+ case 64:
198
+ return _mm512_mask_blend_ps(0x3000, a.values, b.values);
199
+ case 65:
200
+ return _mm512_mask_blend_ps(0x3003, a.values, b.values);
201
+ case 66:
202
+ return _mm512_mask_blend_ps(0x300C, a.values, b.values);
203
+ case 67:
204
+ return _mm512_mask_blend_ps(0x300F, a.values, b.values);
205
+ case 68:
206
+ return _mm512_mask_blend_ps(0x3030, a.values, b.values);
207
+ case 69:
208
+ return _mm512_mask_blend_ps(0x3033, a.values, b.values);
209
+ case 70:
210
+ return _mm512_mask_blend_ps(0x303C, a.values, b.values);
211
+ case 71:
212
+ return _mm512_mask_blend_ps(0x303F, a.values, b.values);
213
+ case 72:
214
+ return _mm512_mask_blend_ps(0x30C0, a.values, b.values);
215
+ case 73:
216
+ return _mm512_mask_blend_ps(0X30C3, a.values, b.values);
217
+ case 74:
218
+ return _mm512_mask_blend_ps(0x30CC, a.values, b.values);
219
+ case 75:
220
+ return _mm512_mask_blend_ps(0x30CF, a.values, b.values);
221
+ case 76:
222
+ return _mm512_mask_blend_ps(0x30F0, a.values, b.values);
223
+ case 77:
224
+ return _mm512_mask_blend_ps(0x30F3, a.values, b.values);
225
+ case 78:
226
+ return _mm512_mask_blend_ps(0x30FC, a.values, b.values);
227
+ case 79:
228
+ return _mm512_mask_blend_ps(0x30FF, a.values, b.values);
229
+ case 80:
230
+ return _mm512_mask_blend_ps(0x3300, a.values, b.values);
231
+ case 81:
232
+ return _mm512_mask_blend_ps(0X3303, a.values, b.values);
233
+ case 82:
234
+ return _mm512_mask_blend_ps(0x330C, a.values, b.values);
235
+ case 83:
236
+ return _mm512_mask_blend_ps(0x330F, a.values, b.values);
237
+ case 84:
238
+ return _mm512_mask_blend_ps(0x3330, a.values, b.values);
239
+ case 85:
240
+ return _mm512_mask_blend_ps(0x3333, a.values, b.values);
241
+ case 86:
242
+ return _mm512_mask_blend_ps(0x333C, a.values, b.values);
243
+ case 87:
244
+ return _mm512_mask_blend_ps(0X333F, a.values, b.values);
245
+ case 88:
246
+ return _mm512_mask_blend_ps(0x33C0, a.values, b.values);
247
+ case 89:
248
+ return _mm512_mask_blend_ps(0x33C3, a.values, b.values);
249
+ case 90:
250
+ return _mm512_mask_blend_ps(0x33CC, a.values, b.values);
251
+ case 91:
252
+ return _mm512_mask_blend_ps(0x33CF, a.values, b.values);
253
+ case 92:
254
+ return _mm512_mask_blend_ps(0x33F0, a.values, b.values);
255
+ case 93:
256
+ return _mm512_mask_blend_ps(0x33F3, a.values, b.values);
257
+ case 94:
258
+ return _mm512_mask_blend_ps(0x33FC, a.values, b.values);
259
+ case 95:
260
+ return _mm512_mask_blend_ps(0x33FF, a.values, b.values);
261
+ case 96:
262
+ return _mm512_mask_blend_ps(0X3C00, a.values, b.values);
263
+ case 97:
264
+ return _mm512_mask_blend_ps(0x3C03, a.values, b.values);
265
+ case 98:
266
+ return _mm512_mask_blend_ps(0x3C0C, a.values, b.values);
267
+ case 99:
268
+ return _mm512_mask_blend_ps(0x3C0F, a.values, b.values);
269
+ case 100:
270
+ return _mm512_mask_blend_ps(0x3C30, a.values, b.values);
271
+ case 101:
272
+ return _mm512_mask_blend_ps(0x3C33, a.values, b.values);
273
+ case 102:
274
+ return _mm512_mask_blend_ps(0x3C3C, a.values, b.values);
275
+ case 103:
276
+ return _mm512_mask_blend_ps(0x3C3F, a.values, b.values);
277
+ case 104:
278
+ return _mm512_mask_blend_ps(0x3CC0, a.values, b.values);
279
+ case 105:
280
+ return _mm512_mask_blend_ps(0x3CC3, a.values, b.values);
281
+ case 106:
282
+ return _mm512_mask_blend_ps(0x3CCC, a.values, b.values);
283
+ case 107:
284
+ return _mm512_mask_blend_ps(0x3CCF, a.values, b.values);
285
+ case 108:
286
+ return _mm512_mask_blend_ps(0x3CF0, a.values, b.values);
287
+ case 109:
288
+ return _mm512_mask_blend_ps(0x3CF3, a.values, b.values);
289
+ case 110:
290
+ return _mm512_mask_blend_ps(0x3CFC, a.values, b.values);
291
+ case 111:
292
+ return _mm512_mask_blend_ps(0x3CFF, a.values, b.values);
293
+ case 112:
294
+ return _mm512_mask_blend_ps(0x3F00, a.values, b.values);
295
+ case 113:
296
+ return _mm512_mask_blend_ps(0x3F03, a.values, b.values);
297
+ case 114:
298
+ return _mm512_mask_blend_ps(0x3F0C, a.values, b.values);
299
+ case 115:
300
+ return _mm512_mask_blend_ps(0x3F0F, a.values, b.values);
301
+ case 116:
302
+ return _mm512_mask_blend_ps(0x3F30, a.values, b.values);
303
+ case 117:
304
+ return _mm512_mask_blend_ps(0x3F33, a.values, b.values);
305
+ case 118:
306
+ return _mm512_mask_blend_ps(0x3F3C, a.values, b.values);
307
+ case 119:
308
+ return _mm512_mask_blend_ps(0x3F3F, a.values, b.values);
309
+ case 120:
310
+ return _mm512_mask_blend_ps(0x3FC0, a.values, b.values);
311
+ case 121:
312
+ return _mm512_mask_blend_ps(0x3FC3, a.values, b.values);
313
+ case 122:
314
+ return _mm512_mask_blend_ps(0x3FCC, a.values, b.values);
315
+ case 123:
316
+ return _mm512_mask_blend_ps(0x3FCF, a.values, b.values);
317
+ case 124:
318
+ return _mm512_mask_blend_ps(0x3FF0, a.values, b.values);
319
+ case 125:
320
+ return _mm512_mask_blend_ps(0x3FF3, a.values, b.values);
321
+ case 126:
322
+ return _mm512_mask_blend_ps(0x3FFC, a.values, b.values);
323
+ case 127:
324
+ return _mm512_mask_blend_ps(0x3FFF, a.values, b.values);
325
+ case 128:
326
+ return _mm512_mask_blend_ps(0xC000, a.values, b.values);
327
+ case 129:
328
+ return _mm512_mask_blend_ps(0xC003, a.values, b.values);
329
+ case 130:
330
+ return _mm512_mask_blend_ps(0xC00C, a.values, b.values);
331
+ case 131:
332
+ return _mm512_mask_blend_ps(0xC00F, a.values, b.values);
333
+ case 132:
334
+ return _mm512_mask_blend_ps(0xC030, a.values, b.values);
335
+ case 133:
336
+ return _mm512_mask_blend_ps(0xC033, a.values, b.values);
337
+ case 134:
338
+ return _mm512_mask_blend_ps(0xC03C, a.values, b.values);
339
+ case 135:
340
+ return _mm512_mask_blend_ps(0xC03F, a.values, b.values);
341
+ case 136:
342
+ return _mm512_mask_blend_ps(0xC0C0, a.values, b.values);
343
+ case 137:
344
+ return _mm512_mask_blend_ps(0xC0C3, a.values, b.values);
345
+ case 138:
346
+ return _mm512_mask_blend_ps(0xC0CC, a.values, b.values);
347
+ case 139:
348
+ return _mm512_mask_blend_ps(0xC0CF, a.values, b.values);
349
+ case 140:
350
+ return _mm512_mask_blend_ps(0xC0F0, a.values, b.values);
351
+ case 141:
352
+ return _mm512_mask_blend_ps(0xC0F3, a.values, b.values);
353
+ case 142:
354
+ return _mm512_mask_blend_ps(0xC0FC, a.values, b.values);
355
+ case 143:
356
+ return _mm512_mask_blend_ps(0xC0FF, a.values, b.values);
357
+ case 144:
358
+ return _mm512_mask_blend_ps(0xC300, a.values, b.values);
359
+ case 145:
360
+ return _mm512_mask_blend_ps(0xC303, a.values, b.values);
361
+ case 146:
362
+ return _mm512_mask_blend_ps(0xC30C, a.values, b.values);
363
+ case 147:
364
+ return _mm512_mask_blend_ps(0xC30F, a.values, b.values);
365
+ case 148:
366
+ return _mm512_mask_blend_ps(0xC330, a.values, b.values);
367
+ case 149:
368
+ return _mm512_mask_blend_ps(0xC333, a.values, b.values);
369
+ case 150:
370
+ return _mm512_mask_blend_ps(0xC33C, a.values, b.values);
371
+ case 151:
372
+ return _mm512_mask_blend_ps(0xC33F, a.values, b.values);
373
+ case 152:
374
+ return _mm512_mask_blend_ps(0xC3C0, a.values, b.values);
375
+ case 153:
376
+ return _mm512_mask_blend_ps(0xC3C3, a.values, b.values);
377
+ case 154:
378
+ return _mm512_mask_blend_ps(0xC3CC, a.values, b.values);
379
+ case 155:
380
+ return _mm512_mask_blend_ps(0xC3CF, a.values, b.values);
381
+ case 156:
382
+ return _mm512_mask_blend_ps(0xC3F0, a.values, b.values);
383
+ case 157:
384
+ return _mm512_mask_blend_ps(0xC3F3, a.values, b.values);
385
+ case 158:
386
+ return _mm512_mask_blend_ps(0xC3FC, a.values, b.values);
387
+ case 159:
388
+ return _mm512_mask_blend_ps(0xC3FF, a.values, b.values);
389
+ case 160:
390
+ return _mm512_mask_blend_ps(0xCC00, a.values, b.values);
391
+ case 161:
392
+ return _mm512_mask_blend_ps(0xCC03, a.values, b.values);
393
+ case 162:
394
+ return _mm512_mask_blend_ps(0xCC0C, a.values, b.values);
395
+ case 163:
396
+ return _mm512_mask_blend_ps(0xCC0F, a.values, b.values);
397
+ case 164:
398
+ return _mm512_mask_blend_ps(0xCC30, a.values, b.values);
399
+ case 165:
400
+ return _mm512_mask_blend_ps(0xCC33, a.values, b.values);
401
+ case 166:
402
+ return _mm512_mask_blend_ps(0xCC3C, a.values, b.values);
403
+ case 167:
404
+ return _mm512_mask_blend_ps(0xCC3F, a.values, b.values);
405
+ case 168:
406
+ return _mm512_mask_blend_ps(0xCCC0, a.values, b.values);
407
+ case 169:
408
+ return _mm512_mask_blend_ps(0xCCC3, a.values, b.values);
409
+ case 170:
410
+ return _mm512_mask_blend_ps(0xCCCC, a.values, b.values);
411
+ case 171:
412
+ return _mm512_mask_blend_ps(0xCCCF, a.values, b.values);
413
+ case 172:
414
+ return _mm512_mask_blend_ps(0xCCF0, a.values, b.values);
415
+ case 173:
416
+ return _mm512_mask_blend_ps(0xCCF3, a.values, b.values);
417
+ case 174:
418
+ return _mm512_mask_blend_ps(0xCCFC, a.values, b.values);
419
+ case 175:
420
+ return _mm512_mask_blend_ps(0xCCFF, a.values, b.values);
421
+ case 176:
422
+ return _mm512_mask_blend_ps(0xCF00, a.values, b.values);
423
+ case 177:
424
+ return _mm512_mask_blend_ps(0xCF03, a.values, b.values);
425
+ case 178:
426
+ return _mm512_mask_blend_ps(0xCF0C, a.values, b.values);
427
+ case 179:
428
+ return _mm512_mask_blend_ps(0xCF0F, a.values, b.values);
429
+ case 180:
430
+ return _mm512_mask_blend_ps(0xCF30, a.values, b.values);
431
+ case 181:
432
+ return _mm512_mask_blend_ps(0xCF33, a.values, b.values);
433
+ case 182:
434
+ return _mm512_mask_blend_ps(0xCF3C, a.values, b.values);
435
+ case 183:
436
+ return _mm512_mask_blend_ps(0xCF3F, a.values, b.values);
437
+ case 184:
438
+ return _mm512_mask_blend_ps(0xCFC0, a.values, b.values);
439
+ case 185:
440
+ return _mm512_mask_blend_ps(0xCFC3, a.values, b.values);
441
+ case 186:
442
+ return _mm512_mask_blend_ps(0xCFCC, a.values, b.values);
443
+ case 187:
444
+ return _mm512_mask_blend_ps(0xCFCF, a.values, b.values);
445
+ case 188:
446
+ return _mm512_mask_blend_ps(0xCFF0, a.values, b.values);
447
+ case 189:
448
+ return _mm512_mask_blend_ps(0xCFF3, a.values, b.values);
449
+ case 190:
450
+ return _mm512_mask_blend_ps(0xCFFC, a.values, b.values);
451
+ case 191:
452
+ return _mm512_mask_blend_ps(0xCFFF, a.values, b.values);
453
+ case 192:
454
+ return _mm512_mask_blend_ps(0xF000, a.values, b.values);
455
+ case 193:
456
+ return _mm512_mask_blend_ps(0xF003, a.values, b.values);
457
+ case 194:
458
+ return _mm512_mask_blend_ps(0xF00C, a.values, b.values);
459
+ case 195:
460
+ return _mm512_mask_blend_ps(0xF00F, a.values, b.values);
461
+ case 196:
462
+ return _mm512_mask_blend_ps(0xF030, a.values, b.values);
463
+ case 197:
464
+ return _mm512_mask_blend_ps(0xF033, a.values, b.values);
465
+ case 198:
466
+ return _mm512_mask_blend_ps(0xF03C, a.values, b.values);
467
+ case 199:
468
+ return _mm512_mask_blend_ps(0xF03F, a.values, b.values);
469
+ case 200:
470
+ return _mm512_mask_blend_ps(0XF0C0, a.values, b.values);
471
+ case 201:
472
+ return _mm512_mask_blend_ps(0xF0C3, a.values, b.values);
473
+ case 202:
474
+ return _mm512_mask_blend_ps(0xF0CC, a.values, b.values);
475
+ case 203:
476
+ return _mm512_mask_blend_ps(0xF0CF, a.values, b.values);
477
+ case 204:
478
+ return _mm512_mask_blend_ps(0xF0F0, a.values, b.values);
479
+ case 205:
480
+ return _mm512_mask_blend_ps(0xF0F3, a.values, b.values);
481
+ case 206:
482
+ return _mm512_mask_blend_ps(0xF0FC, a.values, b.values);
483
+ case 207:
484
+ return _mm512_mask_blend_ps(0xF0FF, a.values, b.values);
485
+ case 208:
486
+ return _mm512_mask_blend_ps(0XF300, a.values, b.values);
487
+ case 209:
488
+ return _mm512_mask_blend_ps(0xF303, a.values, b.values);
489
+ case 210:
490
+ return _mm512_mask_blend_ps(0xF30C, a.values, b.values);
491
+ case 211:
492
+ return _mm512_mask_blend_ps(0xF30F, a.values, b.values);
493
+ case 212:
494
+ return _mm512_mask_blend_ps(0xF330, a.values, b.values);
495
+ case 213:
496
+ return _mm512_mask_blend_ps(0xF333, a.values, b.values);
497
+ case 214:
498
+ return _mm512_mask_blend_ps(0XF33C, a.values, b.values);
499
+ case 215:
500
+ return _mm512_mask_blend_ps(0xF33F, a.values, b.values);
501
+ case 216:
502
+ return _mm512_mask_blend_ps(0xF3C0, a.values, b.values);
503
+ case 217:
504
+ return _mm512_mask_blend_ps(0xF3C3, a.values, b.values);
505
+ case 218:
506
+ return _mm512_mask_blend_ps(0xF3CC, a.values, b.values);
507
+ case 219:
508
+ return _mm512_mask_blend_ps(0xF3CF, a.values, b.values);
509
+ case 220:
510
+ return _mm512_mask_blend_ps(0xF3F0, a.values, b.values);
511
+ case 221:
512
+ return _mm512_mask_blend_ps(0xF3F3, a.values, b.values);
513
+ case 222:
514
+ return _mm512_mask_blend_ps(0xF3FC, a.values, b.values);
515
+ case 223:
516
+ return _mm512_mask_blend_ps(0XF3FF, a.values, b.values);
517
+ case 224:
518
+ return _mm512_mask_blend_ps(0xFC00, a.values, b.values);
519
+ case 225:
520
+ return _mm512_mask_blend_ps(0xFC03, a.values, b.values);
521
+ case 226:
522
+ return _mm512_mask_blend_ps(0xFC0C, a.values, b.values);
523
+ case 227:
524
+ return _mm512_mask_blend_ps(0xFC0F, a.values, b.values);
525
+ case 228:
526
+ return _mm512_mask_blend_ps(0xFC30, a.values, b.values);
527
+ case 229:
528
+ return _mm512_mask_blend_ps(0xFC33, a.values, b.values);
529
+ case 230:
530
+ return _mm512_mask_blend_ps(0xFC3C, a.values, b.values);
531
+ case 231:
532
+ return _mm512_mask_blend_ps(0xFC3F, a.values, b.values);
533
+ case 232:
534
+ return _mm512_mask_blend_ps(0xFCC0, a.values, b.values);
535
+ case 233:
536
+ return _mm512_mask_blend_ps(0xFCC3, a.values, b.values);
537
+ case 234:
538
+ return _mm512_mask_blend_ps(0xFCCC, a.values, b.values);
539
+ case 235:
540
+ return _mm512_mask_blend_ps(0xFCCF, a.values, b.values);
541
+ case 236:
542
+ return _mm512_mask_blend_ps(0xFCF0, a.values, b.values);
543
+ case 237:
544
+ return _mm512_mask_blend_ps(0xFCF3, a.values, b.values);
545
+ case 238:
546
+ return _mm512_mask_blend_ps(0xFCFC, a.values, b.values);
547
+ case 239:
548
+ return _mm512_mask_blend_ps(0xFCFF, a.values, b.values);
549
+ case 240:
550
+ return _mm512_mask_blend_ps(0xFF00, a.values, b.values);
551
+ case 241:
552
+ return _mm512_mask_blend_ps(0xFF03, a.values, b.values);
553
+ case 242:
554
+ return _mm512_mask_blend_ps(0xFF0C, a.values, b.values);
555
+ case 243:
556
+ return _mm512_mask_blend_ps(0xFF0F, a.values, b.values);
557
+ case 244:
558
+ return _mm512_mask_blend_ps(0xFF30, a.values, b.values);
559
+ case 245:
560
+ return _mm512_mask_blend_ps(0xFF33, a.values, b.values);
561
+ case 246:
562
+ return _mm512_mask_blend_ps(0xFF3C, a.values, b.values);
563
+ case 247:
564
+ return _mm512_mask_blend_ps(0xFF3F, a.values, b.values);
565
+ case 248:
566
+ return _mm512_mask_blend_ps(0xFFC0, a.values, b.values);
567
+ case 249:
568
+ return _mm512_mask_blend_ps(0xFFC3, a.values, b.values);
569
+ case 250:
570
+ return _mm512_mask_blend_ps(0xFFCC, a.values, b.values);
571
+ case 251:
572
+ return _mm512_mask_blend_ps(0xFFCF, a.values, b.values);
573
+ case 252:
574
+ return _mm512_mask_blend_ps(0xFFF0, a.values, b.values);
575
+ case 253:
576
+ return _mm512_mask_blend_ps(0xFFF3, a.values, b.values);
577
+ case 254:
578
+ return _mm512_mask_blend_ps(0xFFFC, a.values, b.values);
579
+ default: break;
580
+ }
581
+ return b;
582
+ }
583
+ static Vectorized<c10::complex<float>> blendv(const Vectorized<c10::complex<float>>& a,
584
+ const Vectorized<c10::complex<float>>& b,
585
+ const Vectorized<c10::complex<float>>& mask) {
586
+ // convert c10::complex<V> index mask to V index mask: xy -> xxyy
587
+ auto mask_ = _mm512_unpacklo_ps(mask.values, mask.values);
588
+ auto all_ones = _mm512_set1_epi32(0xFFFFFFFF);
589
+ auto mmask = _mm512_cmp_epi32_mask(_mm512_castps_si512(mask_), all_ones, _MM_CMPINT_EQ);
590
+ return _mm512_mask_blend_ps(mmask, a.values, b.values);
591
+ }
592
+ template<typename step_t>
593
+ static Vectorized<c10::complex<float>> arange(c10::complex<float> base = 0.,
594
+ step_t step = static_cast<step_t>(1)) {
595
+ return Vectorized<c10::complex<float>>(base,
596
+ base + step,
597
+ base + c10::complex<float>(2)*step,
598
+ base + c10::complex<float>(3)*step,
599
+ base + c10::complex<float>(4)*step,
600
+ base + c10::complex<float>(5)*step,
601
+ base + c10::complex<float>(6)*step,
602
+ base + c10::complex<float>(7)*step);
603
+ }
604
+ static Vectorized<c10::complex<float>> set(const Vectorized<c10::complex<float>>& a,
605
+ const Vectorized<c10::complex<float>>& b,
606
+ int64_t count = size()) {
607
+ switch (count) {
608
+ case 0:
609
+ return a;
610
+ case 1:
611
+ return blend<1>(a, b);
612
+ case 2:
613
+ return blend<3>(a, b);
614
+ case 3:
615
+ return blend<7>(a, b);
616
+ case 4:
617
+ return blend<15>(a, b);
618
+ case 5:
619
+ return blend<31>(a, b);
620
+ case 6:
621
+ return blend<63>(a, b);
622
+ case 7:
623
+ return blend<127>(a, b);
624
+ }
625
+ return b;
626
+ }
627
+ static Vectorized<c10::complex<float>> loadu(const void* ptr, int64_t count = size()) {
628
+ if (count == size())
629
+ return _mm512_loadu_ps(reinterpret_cast<const float*>(ptr));
630
+
631
+ __at_align__ float tmp_values[2*size()];
632
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
633
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
634
+ // instructions while a loop would be compiled to one instruction.
635
+ for (const auto i : c10::irange(2*size())) {
636
+ tmp_values[i] = 0.0;
637
+ }
638
+ std::memcpy(
639
+ tmp_values,
640
+ reinterpret_cast<const float*>(ptr),
641
+ count * sizeof(c10::complex<float>));
642
+ return _mm512_load_ps(tmp_values);
643
+ }
644
+ void store(void* ptr, int count = size()) const {
645
+ if (count == size()) {
646
+ _mm512_storeu_ps(reinterpret_cast<float*>(ptr), values);
647
+ } else if (count > 0) {
648
+ float tmp_values[2*size()];
649
+ _mm512_storeu_ps(reinterpret_cast<float*>(tmp_values), values);
650
+ std::memcpy(ptr, tmp_values, count * sizeof(c10::complex<float>));
651
+ }
652
+ }
653
+ // AVX512 doesn't have horizontal add & horizontal sub instructions.
654
+ // TODO: hadd_pd() & hsub_pd() may have scope for improvement.
655
+ static inline __m512 hadd_ps(__m512 a, __m512 b) {
656
+ __m512i idx1 = _mm512_set_epi32(30, 14, 28, 12, 26, 10, 24, 8, 22, 6, 20, 4, 18, 2, 16, 0);
657
+ __m512i idx2 = _mm512_set_epi32(31, 15, 29, 13, 27, 11, 25, 9, 23, 7, 21, 5, 19, 3, 17, 1);
658
+ return _mm512_add_ps(_mm512_mask_permutex2var_ps(a, 0xffff, idx1, b),
659
+ _mm512_mask_permutex2var_ps(a, 0xffff, idx2, b));
660
+ }
661
+ static inline __m512 hsub_ps(__m512 a, __m512 b) {
662
+ __m512i idx1 = _mm512_set_epi32(30, 14, 28, 12, 26, 10, 24, 8, 22, 6, 20, 4, 18, 2, 16, 0);
663
+ __m512i idx2 = _mm512_set_epi32(31, 15, 29, 13, 27, 11, 25, 9, 23, 7, 21, 5, 19, 3, 17, 1);
664
+ return _mm512_sub_ps(_mm512_mask_permutex2var_ps(a, 0xffff, idx1, b),
665
+ _mm512_mask_permutex2var_ps(a, 0xffff, idx2, b));
666
+ }
667
+ const c10::complex<float>& operator[](int idx) const = delete;
668
+ c10::complex<float>& operator[](int idx) = delete;
669
+ Vectorized<c10::complex<float>> map(c10::complex<float> (*const f)(const c10::complex<float> &)) const {
670
+ __at_align__ c10::complex<float> tmp[size()];
671
+ store(tmp);
672
+ for (const auto i : c10::irange(size())) {
673
+ tmp[i] = f(tmp[i]);
674
+ }
675
+ return loadu(tmp);
676
+ }
677
+ __m512 abs_2_() const {
678
+ auto val_2 = _mm512_mul_ps(values, values); // a*a b*b
679
+ auto ret = hadd_ps(val_2, val_2); // a*a+b*b a*a+b*b
680
+ return ret;
681
+ }
682
+ __m512 abs_() const {
683
+ auto real = _mm512_moveldup_ps(values); // real real
684
+ auto imag = _mm512_movehdup_ps(values); // imag imag
685
+ return Sleef_hypotf16_u05(real, imag); // abs abs
686
+ }
687
+ Vectorized<c10::complex<float>> abs() const {
688
+ const __m512 real_mask = _mm512_castsi512_ps(_mm512_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
689
+ 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
690
+ 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
691
+ 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000));
692
+ return _mm512_and_ps(abs_(), real_mask); // abs 0
693
+ }
694
+ __m512 angle_() const {
695
+ //angle = atan2(b/a)
696
+ auto b_a = _mm512_permute_ps(values, 0xB1); // b a
697
+ return Sleef_atan2f16_u10(values, b_a); // 90-angle angle
698
+ }
699
+ Vectorized<c10::complex<float>> angle() const {
700
+ const __m512 real_mask = _mm512_castsi512_ps(_mm512_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
701
+ 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
702
+ 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
703
+ 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000));
704
+ auto angle = _mm512_permute_ps(angle_(), 0xB1); // angle 90-angle
705
+ return _mm512_and_ps(angle, real_mask); // angle 0
706
+ }
707
+ Vectorized<c10::complex<float>> sgn() const {
708
+ auto abs = abs_();
709
+ auto zero = _mm512_setzero_ps();
710
+ auto mask = _mm512_cmp_ps_mask(abs, zero, _CMP_EQ_OQ);
711
+ auto div = values / abs;
712
+ return _mm512_mask_blend_ps(mask, div, zero);
713
+ }
714
+ __m512 real_() const {
715
+ const __m512 real_mask = _mm512_castsi512_ps(_mm512_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
716
+ 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
717
+ 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
718
+ 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000));
719
+ return _mm512_and_ps(values, real_mask);
720
+ }
721
+ Vectorized<c10::complex<float>> real() const {
722
+ return real_();
723
+ }
724
+ __m512 imag_() const {
725
+ const __m512 imag_mask = _mm512_castsi512_ps(_mm512_setr_epi32(0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF,
726
+ 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF,
727
+ 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF,
728
+ 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF));
729
+ return _mm512_and_ps(values, imag_mask);
730
+ }
731
+ Vectorized<c10::complex<float>> imag() const {
732
+ return _mm512_permute_ps(imag_(), 0xB1); //b a
733
+ }
734
+ __m512 conj_() const {
735
+ const __m512 sign_mask = _mm512_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0,
736
+ 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0);
737
+ return _mm512_xor_ps(values, sign_mask); // a -b
738
+ }
739
+ Vectorized<c10::complex<float>> conj() const {
740
+ return conj_();
741
+ }
742
+ Vectorized<c10::complex<float>> log() const {
743
+ // Most trigonomic ops use the log() op to improve complex number performance.
744
+ return map(std::log);
745
+ }
746
+ Vectorized<c10::complex<float>> log2() const {
747
+ const __m512 log2_ = _mm512_set1_ps(std::log(2));
748
+ return _mm512_div_ps(log(), log2_);
749
+ }
750
+ Vectorized<c10::complex<float>> log10() const {
751
+ const __m512 log10_ = _mm512_set1_ps(std::log(10));
752
+ return _mm512_div_ps(log(), log10_);
753
+ }
754
+ Vectorized<c10::complex<float>> log1p() const {
755
+ return map(std::log1p);
756
+ }
757
+ Vectorized<c10::complex<float>> asin() const {
758
+ // asin(x)
759
+ // = -i*ln(iz + sqrt(1 -z^2))
760
+ // = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi)))
761
+ // = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi))
762
+ const __m512 one = _mm512_set1_ps(1);
763
+
764
+ auto conj = conj_();
765
+ auto b_a = _mm512_permute_ps(conj, 0xB1); //-b a
766
+ auto ab = _mm512_mul_ps(conj, b_a); //-ab -ab
767
+ auto im = _mm512_add_ps(ab, ab); //-2ab -2ab
768
+
769
+ auto val_2 = _mm512_mul_ps(values, values); // a*a b*b
770
+ auto re = hsub_ps(val_2, _mm512_permute_ps(val_2, 0xB1)); // a*a-b*b b*b-a*a
771
+ re = _mm512_sub_ps(one, re);
772
+
773
+ auto root = Vectorized(_mm512_mask_blend_ps(0xAAAA, re, im)).sqrt(); //sqrt(re + i*im)
774
+ auto ln = Vectorized(_mm512_add_ps(b_a, root)).log(); //ln(iz + sqrt())
775
+ return Vectorized(_mm512_permute_ps(ln.values, 0xB1)).conj(); //-i*ln()
776
+ }
777
+ Vectorized<c10::complex<float>> acos() const {
778
+ return map(std::acos);
779
+ }
780
+ Vectorized<c10::complex<float>> atan() const;
781
+ Vectorized<c10::complex<float>> atanh() const {
782
+ return map(std::atanh);
783
+ }
784
+ Vectorized<c10::complex<float>> exp() const {
785
+ //exp(a + bi)
786
+ // = exp(a)*(cos(b) + sin(b)i)
787
+ auto exp = Sleef_expf16_u10(values); //exp(a) exp(b)
788
+ exp = _mm512_mask_blend_ps(0xAAAA, exp, _mm512_permute_ps(exp, 0xB1)); //exp(a) exp(a)
789
+
790
+ auto sin_cos = Sleef_sincosf16_u10(values); //[sin(a), cos(a)] [sin(b), cos(b)]
791
+ auto cos_sin = _mm512_mask_blend_ps(0xAAAA, _mm512_permute_ps(sin_cos.y, 0xB1),
792
+ sin_cos.x); //cos(b) sin(b)
793
+ return _mm512_mul_ps(exp, cos_sin);
794
+ }
795
+ Vectorized<c10::complex<float>> exp2() const {
796
+ // Use identity 2**x = exp(log(2) * x)
797
+ const __m512 ln_2 = _mm512_set1_ps(c10::ln_2<float>);
798
+ Vectorized<c10::complex<float>> scaled_values = _mm512_mul_ps(values, ln_2);
799
+ return scaled_values.exp();
800
+ }
801
+ Vectorized<c10::complex<float>> expm1() const {
802
+ return map(std::expm1);
803
+ }
804
+ Vectorized<c10::complex<float>> sin() const {
805
+ return map(std::sin);
806
+ }
807
+ Vectorized<c10::complex<float>> sinh() const {
808
+ return map(std::sinh);
809
+ }
810
+ Vectorized<c10::complex<float>> cos() const {
811
+ return map(std::cos);
812
+ }
813
+ Vectorized<c10::complex<float>> cosh() const {
814
+ return map(std::cosh);
815
+ }
816
+ Vectorized<c10::complex<float>> ceil() const {
817
+ return _mm512_ceil_ps(values);
818
+ }
819
+ Vectorized<c10::complex<float>> floor() const {
820
+ return _mm512_floor_ps(values);
821
+ }
822
+ Vectorized<c10::complex<float>> neg() const {
823
+ auto zero = _mm512_setzero_ps();
824
+ return _mm512_sub_ps(zero, values);
825
+ }
826
+ Vectorized<c10::complex<float>> round() const {
827
+ return _mm512_roundscale_ps(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
828
+ }
829
+ Vectorized<c10::complex<float>> tan() const {
830
+ return map(std::tan);
831
+ }
832
+ Vectorized<c10::complex<float>> tanh() const {
833
+ return map(std::tanh);
834
+ }
835
+ Vectorized<c10::complex<float>> trunc() const {
836
+ return _mm512_roundscale_ps(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
837
+ }
838
+ Vectorized<c10::complex<float>> sqrt() const {
839
+ return map(std::sqrt);
840
+ }
841
+ Vectorized<c10::complex<float>> reciprocal() const;
842
+ Vectorized<c10::complex<float>> rsqrt() const {
843
+ return sqrt().reciprocal();
844
+ }
845
+ Vectorized<c10::complex<float>> pow(const Vectorized<c10::complex<float>> &exp) const {
846
+ __at_align__ c10::complex<float> x_tmp[size()];
847
+ __at_align__ c10::complex<float> y_tmp[size()];
848
+ store(x_tmp);
849
+ exp.store(y_tmp);
850
+ for (const auto i : c10::irange(size())) {
851
+ x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]);
852
+ }
853
+ return loadu(x_tmp);
854
+ }
855
+ // Comparison using the _CMP_**_OQ predicate.
856
+ // `O`: get false if an operand is NaN
857
+ // `Q`: do not raise if an operand is NaN
858
+ Vectorized<c10::complex<float>> operator==(const Vectorized<c10::complex<float>>& other) const {
859
+ auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_EQ_OQ);
860
+ return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vector, mask, 0xFFFFFFFF));
861
+ }
862
+ Vectorized<c10::complex<float>> operator!=(const Vectorized<c10::complex<float>>& other) const {
863
+ auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_NEQ_UQ);
864
+ return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vector, mask, 0xFFFFFFFF));
865
+ }
866
+ Vectorized<c10::complex<float>> operator<(const Vectorized<c10::complex<float>>& other) const {
867
+ TORCH_CHECK(false, "not supported for complex numbers");
868
+ }
869
+ Vectorized<c10::complex<float>> operator<=(const Vectorized<c10::complex<float>>& other) const {
870
+ TORCH_CHECK(false, "not supported for complex numbers");
871
+ }
872
+ Vectorized<c10::complex<float>> operator>(const Vectorized<c10::complex<float>>& other) const {
873
+ TORCH_CHECK(false, "not supported for complex numbers");
874
+ }
875
+ Vectorized<c10::complex<float>> operator>=(const Vectorized<c10::complex<float>>& other) const {
876
+ TORCH_CHECK(false, "not supported for complex numbers");
877
+ }
878
+
879
+ Vectorized<c10::complex<float>> eq(const Vectorized<c10::complex<float>>& other) const;
880
+ Vectorized<c10::complex<float>> ne(const Vectorized<c10::complex<float>>& other) const;
881
+ };
882
+
883
+ template <> Vectorized<c10::complex<float>> inline operator+(const Vectorized<c10::complex<float>> &a,
884
+ const Vectorized<c10::complex<float>> &b) {
885
+ return _mm512_add_ps(a, b);
886
+ }
887
+
888
+ template <> Vectorized<c10::complex<float>> inline operator-(const Vectorized<c10::complex<float>> &a,
889
+ const Vectorized<c10::complex<float>> &b) {
890
+ return _mm512_sub_ps(a, b);
891
+ }
892
+
893
+ template <> Vectorized<c10::complex<float>> inline operator*(const Vectorized<c10::complex<float>> &a,
894
+ const Vectorized<c10::complex<float>> &b) {
895
+ //(a + bi) * (c + di) = (ac - bd) + (ad + bc)i
896
+ const __m512 sign_mask = _mm512_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0,
897
+ 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0);
898
+ auto ac_bd = _mm512_mul_ps(a, b); //ac bd
899
+
900
+ auto d_c = _mm512_permute_ps(b, 0xB1); //d c
901
+ d_c = _mm512_xor_ps(sign_mask, d_c); //d -c
902
+ auto ad_bc = _mm512_mul_ps(a, d_c); //ad -bc
903
+
904
+ auto ret = Vectorized<c10::complex<float>>::hsub_ps(ac_bd, ad_bc); //ac - bd ad + bc
905
+ return ret;
906
+ }
907
+
908
+ template <> Vectorized<c10::complex<float>> inline operator/(const Vectorized<c10::complex<float>> &a,
909
+ const Vectorized<c10::complex<float>> &b) {
910
+ //re + im*i = (a + bi) / (c + di)
911
+ auto mask = _mm512_set1_ps(-0.f);
912
+ auto fabs_cd = _mm512_andnot_ps(mask, b); // |c| |d|
913
+ auto fabs_dc = _mm512_permute_ps(fabs_cd, 0xB1); // |d| |c|
914
+ auto scale = _mm512_rcp14_ps(_mm512_max_ps(fabs_cd, fabs_dc)); // 1/sc 1/sc
915
+ auto a2 = _mm512_mul_ps(a, scale); // a/sc b/sc
916
+ auto b2 = _mm512_mul_ps(b, scale); // c/sc d/sc
917
+ auto acbd2 = _mm512_mul_ps(a2, b2);
918
+
919
+ const __m512 sign_mask = _mm512_setr_ps(-0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0,
920
+ -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0);
921
+ auto dc2 = _mm512_permute_ps(b2, 0xB1); // d/sc c/sc
922
+ dc2 = _mm512_xor_ps(sign_mask, dc2); // -d/|c,d| c/sc
923
+ auto adbc2 = _mm512_mul_ps(a2, dc2); //-ad/sc^2 bc/sc^2
924
+ auto res2 = Vectorized<c10::complex<float>>::hadd_ps(acbd2, adbc2); //(ac+bd)/sc^2 (bc-ad)/sc^2
925
+
926
+ // get the denominator
927
+ auto denom2 = Vectorized<c10::complex<float>>(b2).abs_2_(); // (c^2+d^2)/sc^2 (c^2+d^2)/sc^2
928
+ res2 = _mm512_div_ps(res2, denom2);
929
+ return res2;
930
+ }
931
+
932
+ // reciprocal. Implement this here so we can use multiplication.
933
+ inline Vectorized<c10::complex<float>> Vectorized<c10::complex<float>>::reciprocal() const {
934
+ //re + im*i = (a + bi) / (c + di)
935
+ //re = (ac + bd)/abs_2() = c/abs_2()
936
+ //im = (bc - ad)/abs_2() = d/abs_2()
937
+ const __m512 sign_mask = _mm512_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0,
938
+ 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0);
939
+ auto c_d = _mm512_xor_ps(sign_mask, values); //c -d
940
+ return _mm512_div_ps(c_d, abs_2_());
941
+ }
942
+
943
+ inline Vectorized<c10::complex<float>> Vectorized<c10::complex<float>>::atan() const {
944
+ // atan(x) = i/2 * ln((i + z)/(i - z))
945
+ const __m512 i = _mm512_setr_ps(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0,
946
+ 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0);
947
+ const Vectorized i_half = _mm512_setr_ps(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5,
948
+ 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5);
949
+
950
+ auto sum = Vectorized(_mm512_add_ps(i, values)); // a 1+b
951
+ auto sub = Vectorized(_mm512_sub_ps(i, values)); // -a 1-b
952
+ auto ln = (sum/sub).log(); // ln((i + z)/(i - z))
953
+ return i_half*ln; // i/2*ln()
954
+ }
955
+
956
+ template <>
957
+ Vectorized<c10::complex<float>> inline maximum(const Vectorized<c10::complex<float>>& a,
958
+ const Vectorized<c10::complex<float>>& b) {
959
+ auto zero_vector = _mm512_set1_epi32(0);
960
+ auto abs_a = a.abs_2_();
961
+ auto abs_b = b.abs_2_();
962
+ auto mask = _mm512_cmp_ps_mask(abs_a, abs_b, _CMP_LT_OQ);
963
+ auto max = _mm512_mask_blend_ps(mask, a, b);
964
+ // Exploit the fact that all-ones is a NaN.
965
+ auto isnan_mask = _mm512_cmp_ps_mask(abs_a, abs_b, _CMP_UNORD_Q);
966
+ auto isnan = _mm512_mask_set1_epi32(zero_vector, isnan_mask, 0xFFFFFFFF);
967
+ return _mm512_or_ps(max, _mm512_castsi512_ps(isnan));
968
+ }
969
+
970
+ template <>
971
+ Vectorized<c10::complex<float>> inline minimum(const Vectorized<c10::complex<float>>& a,
972
+ const Vectorized<c10::complex<float>>& b) {
973
+ auto zero_vector = _mm512_set1_epi32(0);
974
+ auto abs_a = a.abs_2_();
975
+ auto abs_b = b.abs_2_();
976
+ auto mask = _mm512_cmp_ps_mask(abs_a, abs_b, _CMP_GT_OQ);
977
+ auto min = _mm512_mask_blend_ps(mask, a, b);
978
+ // Exploit the fact that all-ones is a NaN.
979
+ auto isnan_mask = _mm512_cmp_ps_mask(abs_a, abs_b, _CMP_UNORD_Q);
980
+ auto isnan = _mm512_mask_set1_epi32(zero_vector, isnan_mask, 0xFFFFFFFF);
981
+ return _mm512_or_ps(min, _mm512_castsi512_ps(isnan));
982
+ }
983
+
984
+ template <>
985
+ Vectorized<c10::complex<float>> inline operator&(const Vectorized<c10::complex<float>>& a,
986
+ const Vectorized<c10::complex<float>>& b) {
987
+ return _mm512_and_ps(a, b);
988
+ }
989
+
990
+ template <>
991
+ Vectorized<c10::complex<float>> inline operator|(const Vectorized<c10::complex<float>>& a,
992
+ const Vectorized<c10::complex<float>>& b) {
993
+ return _mm512_or_ps(a, b);
994
+ }
995
+
996
+ template <>
997
+ Vectorized<c10::complex<float>> inline operator^(const Vectorized<c10::complex<float>>& a,
998
+ const Vectorized<c10::complex<float>>& b) {
999
+ return _mm512_xor_ps(a, b);
1000
+ }
1001
+
1002
+ inline Vectorized<c10::complex<float>> Vectorized<c10::complex<float>>::eq(
1003
+ const Vectorized<c10::complex<float>>& other) const {
1004
+ auto eq = (*this == other); // compares real and imag individually
1005
+ // If both real numbers and imag numbers are equal, then the complex numbers are equal
1006
+ return (eq.real() & eq.imag()) & Vectorized<c10::complex<float>>(_mm512_set1_ps(1.0f));
1007
+ }
1008
+
1009
+ inline Vectorized<c10::complex<float>> Vectorized<c10::complex<float>>::ne(
1010
+ const Vectorized<c10::complex<float>>& other) const {
1011
+ auto ne = (*this != other); // compares real and imag individually
1012
+ // If either real numbers or imag numbers are not equal, then the complex numbers are not equal
1013
+ return (ne.real() | ne.imag()) & Vectorized<c10::complex<float>>(_mm512_set1_ps(1.0f));
1014
+ }
1015
+
1016
+ #endif
1017
+
1018
+ }}}
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_double.h ADDED
@@ -0,0 +1,467 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/intrinsics.h>
7
+ #include <ATen/cpu/vec/vec_base.h>
8
+ #include <c10/util/irange.h>
9
+ #if (defined(CPU_CAPABILITY_AVX512)) && !defined(_MSC_VER)
10
+ #include <sleef.h>
11
+ #endif
12
+
13
+ namespace at {
14
+ namespace vec {
15
+ // See Note [CPU_CAPABILITY namespace]
16
+ inline namespace CPU_CAPABILITY {
17
+
18
+ #if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER)
19
+
20
+ template <> class Vectorized<double> {
21
+ private:
22
+ static constexpr __m512i zero_vector {0, 0, 0, 0, 0, 0, 0, 0};
23
+ public:
24
+ // values needs to be public for compilation with clang
25
+ // as vec512.h uses it
26
+ __m512d values;
27
+ using value_type = double;
28
+ using size_type = int;
29
+ static constexpr size_type size() {
30
+ return 8;
31
+ }
32
+ Vectorized() {}
33
+ Vectorized(__m512d v) : values(v) {}
34
+ Vectorized(double val) {
35
+ values = _mm512_set1_pd(val);
36
+ }
37
+ Vectorized(double val1, double val2, double val3, double val4,
38
+ double val5, double val6, double val7, double val8) {
39
+ values = _mm512_setr_pd(val1, val2, val3, val4, val5, val6, val7, val8);
40
+ }
41
+ operator __m512d() const {
42
+ return values;
43
+ }
44
+ template <int64_t mask>
45
+ static Vectorized<double> blend(const Vectorized<double>& a, const Vectorized<double>& b) {
46
+ return _mm512_mask_blend_pd(mask, a.values, b.values);
47
+ }
48
+ static Vectorized<double> blendv(const Vectorized<double>& a, const Vectorized<double>& b,
49
+ const Vectorized<double>& mask) {
50
+ auto all_ones = _mm512_set1_epi64(0xFFFFFFFFFFFFFFFF);
51
+ auto mmask = _mm512_cmp_epi64_mask(_mm512_castpd_si512(mask.values), all_ones, _MM_CMPINT_EQ);
52
+ return _mm512_mask_blend_pd(mmask, a.values, b.values);
53
+ }
54
+ template<typename step_t>
55
+ static Vectorized<double> arange(double base = 0., step_t step = static_cast<step_t>(1)) {
56
+ return Vectorized<double>(base, base + step, base + 2 * step, base + 3 * step,
57
+ base + 4 * step, base + 5 * step, base + 6 * step,
58
+ base + 7 * step);
59
+ }
60
+ static Vectorized<double> set(const Vectorized<double>& a, const Vectorized<double>& b,
61
+ int64_t count = size()) {
62
+ switch (count) {
63
+ case 0:
64
+ return a;
65
+ case 1:
66
+ return blend<1>(a, b);
67
+ case 2:
68
+ return blend<3>(a, b);
69
+ case 3:
70
+ return blend<7>(a, b);
71
+ case 4:
72
+ return blend<15>(a, b);
73
+ case 5:
74
+ return blend<31>(a, b);
75
+ case 6:
76
+ return blend<63>(a, b);
77
+ case 7:
78
+ return blend<127>(a, b);
79
+ }
80
+ return b;
81
+ }
82
+ static Vectorized<double> loadu(const void* ptr, int64_t count = size()) {
83
+ if (count == size())
84
+ return _mm512_loadu_pd(reinterpret_cast<const double*>(ptr));
85
+
86
+ __mmask8 mask = (1ULL << count) - 1;
87
+ return _mm512_maskz_loadu_pd(mask, ptr);
88
+ }
89
+ void store(void* ptr, int count = size()) const {
90
+ if (count == size()) {
91
+ _mm512_storeu_pd(reinterpret_cast<double*>(ptr), values);
92
+ } else if (count > 0) {
93
+ __mmask8 mask = (1ULL << count) - 1;
94
+ _mm512_mask_storeu_pd(reinterpret_cast<double*>(ptr), mask, values);
95
+ }
96
+ }
97
+ const double& operator[](int idx) const = delete;
98
+ double& operator[](int idx) = delete;
99
+ int zero_mask() const {
100
+ // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit
101
+ __mmask8 cmp = _mm512_cmp_pd_mask(values, _mm512_set1_pd(0.0), _CMP_EQ_OQ);
102
+ return static_cast<int32_t>(cmp);
103
+ }
104
+ Vectorized<double> isnan() const {
105
+ auto cmp_mask = _mm512_cmp_pd_mask(values, _mm512_set1_pd(0.0), _CMP_UNORD_Q);
106
+ return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask,
107
+ 0xFFFFFFFFFFFFFFFF));
108
+ }
109
+ bool has_inf_nan() const {
110
+ __m512d self_sub = _mm512_sub_pd(values, values);
111
+ return (_mm512_movepi8_mask(_mm512_castpd_si512(self_sub)) & 0x7777777777777777) != 0;
112
+ }
113
+ Vectorized<double> map(double (*const f)(double)) const {
114
+ __at_align__ double tmp[size()];
115
+ store(tmp);
116
+ for (const auto i : c10::irange(size())) {
117
+ tmp[i] = f(tmp[i]);
118
+ }
119
+ return loadu(tmp);
120
+ }
121
+ Vectorized<double> abs() const {
122
+ auto mask = _mm512_set1_pd(-0.f);
123
+ return _mm512_andnot_pd(mask, values);
124
+ }
125
+ Vectorized<double> angle() const {
126
+ const auto zero_vec = _mm512_castsi512_pd(zero_vector);
127
+ const auto nan_vec = _mm512_set1_pd(NAN);
128
+ const auto not_nan_mask = _mm512_cmp_pd_mask(values, values, _CMP_EQ_OQ);
129
+ const auto not_nan = _mm512_mask_set1_epi64(zero_vector, not_nan_mask,
130
+ 0xFFFFFFFFFFFFFFFF);
131
+ const auto nan_mask = _mm512_cmp_pd_mask(_mm512_castsi512_pd(not_nan),
132
+ zero_vec, _CMP_EQ_OQ);
133
+ const auto pi = _mm512_set1_pd(c10::pi<double>);
134
+
135
+ const auto neg_mask = _mm512_cmp_pd_mask(values, zero_vec, _CMP_LT_OQ);
136
+ auto angle = _mm512_mask_blend_pd(neg_mask, zero_vec, pi);
137
+ angle = _mm512_mask_blend_pd(nan_mask, angle, nan_vec);
138
+ return angle;
139
+ }
140
+ Vectorized<double> real() const {
141
+ return *this;
142
+ }
143
+ Vectorized<double> imag() const {
144
+ return _mm512_set1_pd(0);
145
+ }
146
+ Vectorized<double> conj() const {
147
+ return *this;
148
+ }
149
+ Vectorized<double> acos() const {
150
+ return Vectorized<double>(Sleef_acosd8_u10(values));
151
+ }
152
+ Vectorized<double> acosh() const {
153
+ return Vectorized<double>(Sleef_acoshd8_u10(values));
154
+ }
155
+ Vectorized<double> asin() const {
156
+ return Vectorized<double>(Sleef_asind8_u10(values));
157
+ }
158
+ Vectorized<double> atan() const {
159
+ return Vectorized<double>(Sleef_atand8_u10(values));
160
+ }
161
+ Vectorized<double> atanh() const {
162
+ return Vectorized<double>(Sleef_atanhd8_u10(values));
163
+ }
164
+ Vectorized<double> atan2(const Vectorized<double> &b) const {
165
+ return Vectorized<double>(Sleef_atan2d8_u10(values, b));
166
+ }
167
+ Vectorized<double> copysign(const Vectorized<double> &sign) const {
168
+ return Vectorized<double>(Sleef_copysignd8(values, sign));
169
+ }
170
+ Vectorized<double> erf() const {
171
+ return Vectorized<double>(Sleef_erfd8_u10(values));
172
+ }
173
+ Vectorized<double> erfc() const {
174
+ return Vectorized<double>(Sleef_erfcd8_u15(values));
175
+ }
176
+ Vectorized<double> erfinv() const {
177
+ return map(calc_erfinv);
178
+ }
179
+ Vectorized<double> exp() const {
180
+ return Vectorized<double>(Sleef_expd8_u10(values));
181
+ }
182
+ Vectorized<double> exp2() const {
183
+ return Vectorized<double>(Sleef_exp2d8_u10(values));
184
+ }
185
+ Vectorized<double> expm1() const {
186
+ return Vectorized<double>(Sleef_expm1d8_u10(values));
187
+ }
188
+ Vectorized<double> exp_u20() const {
189
+ return exp();
190
+ }
191
+ Vectorized<double> fmod(const Vectorized<double>& q) const {
192
+ return Vectorized<double>(Sleef_fmodd8(values, q));
193
+ }
194
+ Vectorized<double> hypot(const Vectorized<double> &b) const {
195
+ return Vectorized<double>(Sleef_hypotd8_u05(values, b));
196
+ }
197
+ Vectorized<double> i0() const {
198
+ return map(calc_i0);
199
+ }
200
+ Vectorized<double> i0e() const {
201
+ return map(calc_i0e);
202
+ }
203
+ Vectorized<double> digamma() const {
204
+ return map(calc_digamma);
205
+ }
206
+ Vectorized<double> igamma(const Vectorized<double> &x) const {
207
+ __at_align__ double tmp[size()];
208
+ __at_align__ double tmp_x[size()];
209
+ store(tmp);
210
+ x.store(tmp_x);
211
+ for (const auto i : c10::irange(size())) {
212
+ tmp[i] = calc_igamma(tmp[i], tmp_x[i]);
213
+ }
214
+ return loadu(tmp);
215
+ }
216
+ Vectorized<double> igammac(const Vectorized<double> &x) const {
217
+ __at_align__ double tmp[size()];
218
+ __at_align__ double tmp_x[size()];
219
+ store(tmp);
220
+ x.store(tmp_x);
221
+ for (const auto i : c10::irange(size())) {
222
+ tmp[i] = calc_igammac(tmp[i], tmp_x[i]);
223
+ }
224
+ return loadu(tmp);
225
+ }
226
+ Vectorized<double> log() const {
227
+ return Vectorized<double>(Sleef_logd8_u10(values));
228
+ }
229
+ Vectorized<double> log2() const {
230
+ return Vectorized<double>(Sleef_log2d8_u10(values));
231
+ }
232
+ Vectorized<double> log10() const {
233
+ return Vectorized<double>(Sleef_log10d8_u10(values));
234
+ }
235
+ Vectorized<double> log1p() const {
236
+ return Vectorized<double>(Sleef_log1pd8_u10(values));
237
+ }
238
+ Vectorized<double> sin() const {
239
+ return Vectorized<double>(Sleef_sind8_u10(values));
240
+ }
241
+ Vectorized<double> sinh() const {
242
+ return Vectorized<double>(Sleef_sinhd8_u10(values));
243
+ }
244
+ Vectorized<double> cos() const {
245
+ return Vectorized<double>(Sleef_cosd8_u10(values));
246
+ }
247
+ Vectorized<double> cosh() const {
248
+ return Vectorized<double>(Sleef_coshd8_u10(values));
249
+ }
250
+ Vectorized<double> ceil() const {
251
+ return _mm512_ceil_pd(values);
252
+ }
253
+ Vectorized<double> floor() const {
254
+ return _mm512_floor_pd(values);
255
+ }
256
+ Vectorized<double> frac() const;
257
+ Vectorized<double> neg() const {
258
+ return _mm512_xor_pd(_mm512_set1_pd(-0.), values);
259
+ }
260
+ Vectorized<double> nextafter(const Vectorized<double> &b) const {
261
+ return Vectorized<double>(Sleef_nextafterd8(values, b));
262
+ }
263
+ Vectorized<double> round() const {
264
+ return _mm512_roundscale_pd(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
265
+ }
266
+ Vectorized<double> tan() const {
267
+ return Vectorized<double>(Sleef_tand8_u10(values));
268
+ }
269
+ Vectorized<double> tanh() const {
270
+ return Vectorized<double>(Sleef_tanhd8_u10(values));
271
+ }
272
+ Vectorized<double> trunc() const {
273
+ return _mm512_roundscale_pd(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
274
+ }
275
+ Vectorized<double> lgamma() const {
276
+ return Vectorized<double>(Sleef_lgammad8_u10(values));
277
+ }
278
+ Vectorized<double> sqrt() const {
279
+ return _mm512_sqrt_pd(values);
280
+ }
281
+ Vectorized<double> reciprocal() const {
282
+ return _mm512_div_pd(_mm512_set1_pd(1), values);
283
+ }
284
+ Vectorized<double> rsqrt() const {
285
+ return _mm512_div_pd(_mm512_set1_pd(1), _mm512_sqrt_pd(values));
286
+ }
287
+ Vectorized<double> pow(const Vectorized<double> &b) const {
288
+ return Vectorized<double>(Sleef_powd8_u10(values, b));
289
+ }
290
+ // Comparison using the _CMP_**_OQ predicate.
291
+ // `O`: get false if an operand is NaN
292
+ // `Q`: do not raise if an operand is NaN
293
+ Vectorized<double> operator==(const Vectorized<double>& other) const {
294
+ auto cmp_mask = _mm512_cmp_pd_mask(values, other.values, _CMP_EQ_OQ);
295
+ return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask,
296
+ 0xFFFFFFFFFFFFFFFF));
297
+ }
298
+
299
+ Vectorized<double> operator!=(const Vectorized<double>& other) const {
300
+ auto cmp_mask = _mm512_cmp_pd_mask(values, other.values, _CMP_NEQ_UQ);
301
+ return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask,
302
+ 0xFFFFFFFFFFFFFFFF));
303
+ }
304
+
305
+ Vectorized<double> operator<(const Vectorized<double>& other) const {
306
+ auto cmp_mask = _mm512_cmp_pd_mask(values, other.values, _CMP_LT_OQ);
307
+ return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask,
308
+ 0xFFFFFFFFFFFFFFFF));
309
+ }
310
+
311
+ Vectorized<double> operator<=(const Vectorized<double>& other) const {
312
+ auto cmp_mask = _mm512_cmp_pd_mask(values, other.values, _CMP_LE_OQ);
313
+ return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask,
314
+ 0xFFFFFFFFFFFFFFFF));
315
+ }
316
+
317
+ Vectorized<double> operator>(const Vectorized<double>& other) const {
318
+ auto cmp_mask = _mm512_cmp_pd_mask(values, other.values, _CMP_GT_OQ);
319
+ return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask,
320
+ 0xFFFFFFFFFFFFFFFF));
321
+ }
322
+
323
+ Vectorized<double> operator>=(const Vectorized<double>& other) const {
324
+ auto cmp_mask = _mm512_cmp_pd_mask(values, other.values, _CMP_GE_OQ);
325
+ return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask,
326
+ 0xFFFFFFFFFFFFFFFF));
327
+ }
328
+
329
+ Vectorized<double> eq(const Vectorized<double>& other) const;
330
+ Vectorized<double> ne(const Vectorized<double>& other) const;
331
+ Vectorized<double> lt(const Vectorized<double>& other) const;
332
+ Vectorized<double> le(const Vectorized<double>& other) const;
333
+ Vectorized<double> gt(const Vectorized<double>& other) const;
334
+ Vectorized<double> ge(const Vectorized<double>& other) const;
335
+ };
336
+
337
+ template <>
338
+ Vectorized<double> inline operator+(const Vectorized<double>& a, const Vectorized<double>& b) {
339
+ return _mm512_add_pd(a, b);
340
+ }
341
+
342
+ template <>
343
+ Vectorized<double> inline operator-(const Vectorized<double>& a, const Vectorized<double>& b) {
344
+ return _mm512_sub_pd(a, b);
345
+ }
346
+
347
+ template <>
348
+ Vectorized<double> inline operator*(const Vectorized<double>& a, const Vectorized<double>& b) {
349
+ return _mm512_mul_pd(a, b);
350
+ }
351
+
352
+ template <>
353
+ Vectorized<double> inline operator/(const Vectorized<double>& a, const Vectorized<double>& b) {
354
+ return _mm512_div_pd(a, b);
355
+ }
356
+
357
+ // frac. Implement this here so we can use subtraction.
358
+ inline Vectorized<double> Vectorized<double>::frac() const {
359
+ return *this - this->trunc();
360
+ }
361
+
362
+ // Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
363
+ // either input is a NaN.
364
+ template <>
365
+ Vectorized<double> inline maximum(const Vectorized<double>& a, const Vectorized<double>& b) {
366
+ auto zero_vec = _mm512_set1_epi64(0);
367
+ Vectorized<double> max = _mm512_max_pd(a, b);
368
+ auto isnan_mask = _mm512_cmp_pd_mask(a, b, _CMP_UNORD_Q);
369
+ auto isnan = _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vec, isnan_mask,
370
+ 0xFFFFFFFFFFFFFFFF));
371
+ // Exploit the fact that all-ones is a NaN.
372
+ return _mm512_or_pd(max, isnan);
373
+ }
374
+
375
+ // Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
376
+ // either input is a NaN.
377
+ template <>
378
+ Vectorized<double> inline minimum(const Vectorized<double>& a, const Vectorized<double>& b) {
379
+ auto zero_vec = _mm512_set1_epi64(0);
380
+ Vectorized<double> min = _mm512_min_pd(a, b);
381
+ auto isnan_mask = _mm512_cmp_pd_mask(a, b, _CMP_UNORD_Q);
382
+ auto isnan = _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vec, isnan_mask,
383
+ 0xFFFFFFFFFFFFFFFF));
384
+ // Exploit the fact that all-ones is a NaN.
385
+ return _mm512_or_pd(min, isnan);
386
+ }
387
+
388
+ template <>
389
+ Vectorized<double> inline clamp(const Vectorized<double>& a, const Vectorized<double>& min, const Vectorized<double>& max) {
390
+ return _mm512_min_pd(max, _mm512_max_pd(min, a));
391
+ }
392
+
393
+ template <>
394
+ Vectorized<double> inline clamp_min(const Vectorized<double>& a, const Vectorized<double>& min) {
395
+ return _mm512_max_pd(min, a);
396
+ }
397
+
398
+ template <>
399
+ Vectorized<double> inline clamp_max(const Vectorized<double>& a, const Vectorized<double>& max) {
400
+ return _mm512_min_pd(max, a);
401
+ }
402
+
403
+ template <>
404
+ Vectorized<double> inline operator&(const Vectorized<double>& a, const Vectorized<double>& b) {
405
+ return _mm512_and_pd(a, b);
406
+ }
407
+
408
+ template <>
409
+ Vectorized<double> inline operator|(const Vectorized<double>& a, const Vectorized<double>& b) {
410
+ return _mm512_or_pd(a, b);
411
+ }
412
+
413
+ template <>
414
+ Vectorized<double> inline operator^(const Vectorized<double>& a, const Vectorized<double>& b) {
415
+ return _mm512_xor_pd(a, b);
416
+ }
417
+
418
+ inline Vectorized<double> Vectorized<double>::eq(const Vectorized<double>& other) const {
419
+ return (*this == other) & Vectorized<double>(1.0);
420
+ }
421
+
422
+ inline Vectorized<double> Vectorized<double>::ne(const Vectorized<double>& other) const {
423
+ return (*this != other) & Vectorized<double>(1.0);
424
+ }
425
+
426
+ inline Vectorized<double> Vectorized<double>::gt(const Vectorized<double>& other) const {
427
+ return (*this > other) & Vectorized<double>(1.0);
428
+ }
429
+
430
+ inline Vectorized<double> Vectorized<double>::ge(const Vectorized<double>& other) const {
431
+ return (*this >= other) & Vectorized<double>(1.0);
432
+ }
433
+
434
+ inline Vectorized<double> Vectorized<double>::lt(const Vectorized<double>& other) const {
435
+ return (*this < other) & Vectorized<double>(1.0);
436
+ }
437
+
438
+ inline Vectorized<double> Vectorized<double>::le(const Vectorized<double>& other) const {
439
+ return (*this <= other) & Vectorized<double>(1.0);
440
+ }
441
+
442
+ template <>
443
+ inline void convert(const double* src, double* dst, int64_t n) {
444
+ int64_t i;
445
+ #pragma unroll
446
+ for (i = 0; i <= (n - Vectorized<double>::size()); i += Vectorized<double>::size()) {
447
+ _mm512_storeu_pd(dst + i, _mm512_loadu_pd(src + i));
448
+ }
449
+ #pragma unroll
450
+ for (; i < n; i++) {
451
+ dst[i] = src[i];
452
+ }
453
+ }
454
+
455
+ template <>
456
+ Vectorized<double> inline fmadd(const Vectorized<double>& a, const Vectorized<double>& b, const Vectorized<double>& c) {
457
+ return _mm512_fmadd_pd(a, b, c);
458
+ }
459
+
460
+ template <>
461
+ Vectorized<double> inline fmsub(const Vectorized<double>& a, const Vectorized<double>& b, const Vectorized<double>& c) {
462
+ return _mm512_fmsub_pd(a, b, c);
463
+ }
464
+
465
+ #endif
466
+
467
+ }}}
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_float.h ADDED
@@ -0,0 +1,793 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/intrinsics.h>
7
+ #include <ATen/cpu/vec/vec_base.h>
8
+ #include <c10/util/irange.h>
9
+ #if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER)
10
+ #include <sleef.h>
11
+ #endif
12
+
13
+ namespace at {
14
+ namespace vec {
15
+ // See Note [CPU_CAPABILITY namespace]
16
+ inline namespace CPU_CAPABILITY {
17
+
18
+ #if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER)
19
+
20
+ template <> class Vectorized<float> {
21
+ private:
22
+ static constexpr __m512i zero_vec {0, 0, 0, 0, 0, 0, 0, 0};
23
+ public:
24
+ __m512 values;
25
+ using value_type = float;
26
+ using size_type = int;
27
+ static constexpr size_type size() {
28
+ return 16;
29
+ }
30
+ Vectorized() {}
31
+ Vectorized(__m512 v) : values(v) {}
32
+ Vectorized(float val) {
33
+ values = _mm512_set1_ps(val);
34
+ }
35
+ Vectorized(float val1, float val2, float val3, float val4,
36
+ float val5, float val6, float val7, float val8,
37
+ float val9, float val10, float val11, float val12,
38
+ float val13, float val14, float val15, float val16) {
39
+ values = _mm512_setr_ps(val1, val2, val3, val4, val5, val6, val7, val8,
40
+ val9, val10, val11, val12, val13, val14, val15, val16);
41
+ }
42
+ operator __m512() const {
43
+ return values;
44
+ }
45
+ template <int64_t mask>
46
+ static Vectorized<float> blend(const Vectorized<float>& a, const Vectorized<float>& b) {
47
+ return _mm512_mask_blend_ps(mask, a.values, b.values);
48
+ }
49
+ static Vectorized<float> blendv(const Vectorized<float>& a, const Vectorized<float>& b,
50
+ const Vectorized<float>& mask) {
51
+ auto all_ones = _mm512_set1_epi32(0xFFFFFFFF);
52
+ auto mmask = _mm512_cmp_epi32_mask(_mm512_castps_si512(mask.values), all_ones, _MM_CMPINT_EQ);
53
+ return _mm512_mask_blend_ps(mmask, a.values, b.values);
54
+ }
55
+ template<typename step_t>
56
+ static Vectorized<float> arange(float base = 0.f, step_t step = static_cast<step_t>(1)) {
57
+ return Vectorized<float>(
58
+ base, base + step, base + 2 * step, base + 3 * step,
59
+ base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step,
60
+ base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step,
61
+ base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step);
62
+ }
63
+ static Vectorized<float> set(const Vectorized<float>& a, const Vectorized<float>& b,
64
+ int64_t count = size()) {
65
+ switch (count) {
66
+ case 0:
67
+ return a;
68
+ case 1:
69
+ return blend<1>(a, b);
70
+ case 2:
71
+ return blend<3>(a, b);
72
+ case 3:
73
+ return blend<7>(a, b);
74
+ case 4:
75
+ return blend<15>(a, b);
76
+ case 5:
77
+ return blend<31>(a, b);
78
+ case 6:
79
+ return blend<63>(a, b);
80
+ case 7:
81
+ return blend<127>(a, b);
82
+ case 8:
83
+ return blend<255>(a, b);
84
+ case 9:
85
+ return blend<511>(a, b);
86
+ case 10:
87
+ return blend<1023>(a, b);
88
+ case 11:
89
+ return blend<2047>(a, b);
90
+ case 12:
91
+ return blend<4095>(a, b);
92
+ case 13:
93
+ return blend<8191>(a, b);
94
+ case 14:
95
+ return blend<16383>(a, b);
96
+ case 15:
97
+ return blend<32767>(a, b);
98
+ }
99
+ return b;
100
+ }
101
+ static Vectorized<float> loadu(const void* ptr, int64_t count = size()) {
102
+ if (count == size())
103
+ return _mm512_loadu_ps(reinterpret_cast<const float*>(ptr));
104
+
105
+ __mmask16 mask = (1ULL << count) - 1;
106
+ return _mm512_maskz_loadu_ps(mask, ptr);
107
+ }
108
+ void store(void* ptr, int64_t count = size()) const {
109
+ if (count == size()) {
110
+ _mm512_storeu_ps(reinterpret_cast<float*>(ptr), values);
111
+ } else if (count > 0) {
112
+ __mmask16 mask = (1ULL << count) - 1;
113
+ _mm512_mask_storeu_ps(reinterpret_cast<float*>(ptr), mask, values);
114
+ }
115
+ }
116
+ const float& operator[](int idx) const = delete;
117
+ float& operator[](int idx) = delete;
118
+ int zero_mask() const {
119
+ // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit
120
+ __mmask16 cmp = _mm512_cmp_ps_mask(values, _mm512_set1_ps(0.0), _CMP_EQ_OQ);
121
+ return static_cast<int32_t>(cmp);
122
+ }
123
+ Vectorized<float> isnan() const {
124
+ auto mask = _mm512_cmp_ps_mask(values, _mm512_set1_ps(0.0), _CMP_UNORD_Q);
125
+ return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask,
126
+ 0xFFFFFFFF));
127
+ }
128
+ bool has_inf_nan() const {
129
+ __m512 self_sub = _mm512_sub_ps(values, values);
130
+ return (_mm512_movepi8_mask(_mm512_castps_si512(self_sub)) & 0x7777777777777777) != 0;
131
+ }
132
+ Vectorized<float> map(float (*const f)(float)) const {
133
+ __at_align__ float tmp[size()];
134
+ store(tmp);
135
+ for (const auto i : c10::irange(size())) {
136
+ tmp[i] = f(tmp[i]);
137
+ }
138
+ return loadu(tmp);
139
+ }
140
+ Vectorized<float> abs() const {
141
+ auto mask = _mm512_set1_ps(-0.f);
142
+ return _mm512_andnot_ps(mask, values);
143
+ }
144
+ Vectorized<float> angle() const {
145
+ __m512 zero_vec = _mm512_set1_ps(0.f);
146
+ const auto nan_vec = _mm512_set1_ps(NAN);
147
+ const auto not_nan_mask = _mm512_cmp_ps_mask(values, values, _CMP_EQ_OQ);
148
+ const auto not_nan_vec = _mm512_mask_set1_epi32(_mm512_castps_si512(zero_vec),
149
+ not_nan_mask, 0xFFFFFFFF);
150
+ const auto nan_mask = _mm512_cmp_ps_mask(_mm512_castsi512_ps(not_nan_vec),
151
+ zero_vec, _CMP_EQ_OQ);
152
+ const auto pi = _mm512_set1_ps(c10::pi<double>);
153
+
154
+ const auto neg_mask = _mm512_cmp_ps_mask(values, zero_vec, _CMP_LT_OQ);
155
+ auto angle = _mm512_mask_blend_ps(neg_mask, zero_vec, pi);
156
+ angle = _mm512_mask_blend_ps(nan_mask, angle, nan_vec);
157
+ return angle;
158
+ }
159
+ Vectorized<float> real() const {
160
+ return *this;
161
+ }
162
+ Vectorized<float> imag() const {
163
+ return _mm512_set1_ps(0);
164
+ }
165
+ Vectorized<float> conj() const {
166
+ return *this;
167
+ }
168
+ Vectorized<float> acos() const {
169
+ return Vectorized<float>(Sleef_acosf16_u10(values));
170
+ }
171
+ Vectorized<float> acosh() const {
172
+ return Vectorized<float>(Sleef_acoshf16_u10(values));
173
+ }
174
+ Vectorized<float> asin() const {
175
+ return Vectorized<float>(Sleef_asinf16_u10(values));
176
+ }
177
+ Vectorized<float> atan() const {
178
+ return Vectorized<float>(Sleef_atanf16_u10(values));
179
+ }
180
+ Vectorized<float> atanh() const {
181
+ return Vectorized<float>(Sleef_atanhf16_u10(values));
182
+ }
183
+ Vectorized<float> atan2(const Vectorized<float> &b) const {
184
+ return Vectorized<float>(Sleef_atan2f16_u10(values, b));
185
+ }
186
+ Vectorized<float> copysign(const Vectorized<float> &sign) const {
187
+ return Vectorized<float>(Sleef_copysignf16(values, sign));
188
+ }
189
+ Vectorized<float> erf() const {
190
+ // constants
191
+ const auto neg_zero_vec = _mm512_set1_ps(-0.f);
192
+ const auto one_vec = _mm512_set1_ps(1.0f);
193
+ const auto p = _mm512_set1_ps(0.3275911f);
194
+ const auto p1 = _mm512_set1_ps(0.254829592f);
195
+ const auto p2 = _mm512_set1_ps(-0.284496736f);
196
+ const auto p3 = _mm512_set1_ps(1.421413741f);
197
+ const auto p4 = _mm512_set1_ps(-1.453152027f);
198
+ const auto p5 = _mm512_set1_ps(1.061405429f);
199
+ // sign(x)
200
+ auto sign_mask = _mm512_and_ps(neg_zero_vec, values);
201
+ auto abs_vec = _mm512_abs_ps(values);
202
+ // t = 1 / (p * abs(x) + 1)
203
+ auto tmp0 = _mm512_fmadd_ps(p, abs_vec, one_vec);
204
+ auto t = _mm512_div_ps(one_vec, tmp0);
205
+ // r = p5 * t ^ 4 + p4 * t ^ 3 + p3 * t ^ 2 + p2 * t + p1
206
+ auto tmp1 = _mm512_fmadd_ps(p5, t, p4);
207
+ auto tmp2 = _mm512_fmadd_ps(tmp1, t, p3);
208
+ auto tmp3 = _mm512_fmadd_ps(tmp2, t, p2);
209
+ auto r = _mm512_fmadd_ps(tmp3, t, p1);
210
+ // - exp(- x * x)
211
+ auto pow_2 = _mm512_mul_ps(values, values);
212
+ auto neg_pow_2 = _mm512_xor_ps(neg_zero_vec, pow_2);
213
+ // auto tmp4 = exp(neg_pow_2);
214
+ auto tmp4 = Vectorized<float>(Sleef_expf16_u10(neg_pow_2));
215
+ auto tmp5 = _mm512_xor_ps(neg_zero_vec, tmp4);
216
+ // erf(x) = sign(x) * (1 - r * t * exp(- x * x))
217
+ auto tmp6 = _mm512_mul_ps(tmp5, t);
218
+ auto tmp7 = _mm512_fmadd_ps(tmp6, r, one_vec);
219
+ return _mm512_xor_ps(sign_mask, tmp7);
220
+ }
221
+ Vectorized<float> erfc() const {
222
+ return Vectorized<float>(Sleef_erfcf16_u15(values));
223
+ }
224
+ Vectorized<float> erfinv() const {
225
+ return map(calc_erfinv);
226
+ }
227
+ Vectorized<float> exp() const {
228
+ return Vectorized<float>(Sleef_expf16_u10(values));
229
+ }
230
+ Vectorized<float> exp2() const {
231
+ return Vectorized<float>(Sleef_exp2f16_u10(values));
232
+ }
233
+ Vectorized<float> expm1() const {
234
+ return Vectorized<float>(Sleef_expm1f16_u10(values));
235
+ }
236
+ Vectorized<float> exp_u20() const {
237
+ // A faster version of exp with ULP=20
238
+ static __m512 vec_factorial_1 =
239
+ _mm512_set1_ps(0.999999701f); // 1/factorial(1)
240
+ static __m512 vec_factorial_2 =
241
+ _mm512_set1_ps(0.499991506f); // 1/factorial(2)
242
+ static __m512 vec_factorial_3 =
243
+ _mm512_set1_ps(0.166676521f); // 1/factorial(3)
244
+ static __m512 vec_factorial_4 =
245
+ _mm512_set1_ps(0.0418978221f); // 1/factorial(4)
246
+ static __m512 vec_factorial_5 =
247
+ _mm512_set1_ps(0.00828929059f); // 1/factorial(5)
248
+ static __m512 vec_exp_log2ef =
249
+ (__m512)_mm512_set1_epi32(0x3fb8aa3b); // log2(e)
250
+ static __m512 vec_half = _mm512_set1_ps(0.5f);
251
+ static __m512 vec_one = _mm512_set1_ps(1.f);
252
+ static __m512 vec_zero = _mm512_set1_ps(0.f);
253
+ static __m512 vec_two = _mm512_set1_ps(2.f);
254
+ static __m512 vec_ln2f = (__m512)_mm512_set1_epi32(0x3f317218); // ln(2)
255
+ static __m512 vec_ln_flt_min = (__m512)_mm512_set1_epi32(0xc2aeac50);
256
+ static __m512 vec_ln_flt_max = (__m512)_mm512_set1_epi32(0x42b17218);
257
+ static __m512i vec_127 = _mm512_set1_epi32(0x0000007f);
258
+ static int n_mantissa_bits = 23;
259
+
260
+ // exp(x) =
261
+ // = exp(n * ln(2) + r) // divide x by ln(2) and get quot and rem
262
+ // = 2^n * exp(r) // simplify the exp(n*ln(2)) expression
263
+
264
+ auto less_ln_flt_min_mask =
265
+ _mm512_cmp_ps_mask(values, vec_ln_flt_min, 1 /*_CMP_LT_OS*/);
266
+ auto vec_src = _mm512_min_ps(values, vec_ln_flt_max);
267
+ vec_src = _mm512_max_ps(vec_src, vec_ln_flt_min);
268
+
269
+ // fx = floorf(x * log2ef + 0.5)
270
+ auto vec_fx = _mm512_fmadd_ps(vec_src, vec_exp_log2ef, vec_half);
271
+ auto vec_fx_i = _mm512_cvt_roundps_epi32(
272
+ vec_fx, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC);
273
+ vec_fx = _mm512_cvtepi32_ps(vec_fx_i);
274
+
275
+ // x = x - fx * ln2
276
+ auto vec_exp_poly = _mm512_fnmadd_ps(vec_fx, vec_ln2f, vec_src);
277
+
278
+ // compute polynomial
279
+ auto vec_res =
280
+ _mm512_fmadd_ps(vec_exp_poly, vec_factorial_5, vec_factorial_4);
281
+ vec_res = _mm512_fmadd_ps(vec_exp_poly, vec_res, vec_factorial_3);
282
+ vec_res = _mm512_fmadd_ps(vec_exp_poly, vec_res, vec_factorial_2);
283
+ vec_res = _mm512_fmadd_ps(vec_exp_poly, vec_res, vec_factorial_1);
284
+ vec_res = _mm512_fmadd_ps(vec_exp_poly, vec_res, vec_one);
285
+
286
+ // compute 2^(n-1)
287
+ auto vec_exp_number = _mm512_sub_ps(vec_fx, vec_one);
288
+ auto vec_exp_number_i = _mm512_cvtps_epi32(vec_exp_number);
289
+ auto vec_two_pow_n_i = _mm512_add_epi32(vec_exp_number_i, vec_127);
290
+ vec_two_pow_n_i = _mm512_slli_epi32(vec_two_pow_n_i, n_mantissa_bits);
291
+ auto vec_two_pow_n = (__m512)vec_two_pow_n_i;
292
+ vec_two_pow_n =
293
+ _mm512_mask_blend_ps(less_ln_flt_min_mask, vec_two_pow_n, vec_zero);
294
+
295
+ // y = y * 2^n
296
+ vec_res = _mm512_mul_ps(vec_res, vec_two_pow_n);
297
+ vec_res = _mm512_mul_ps(vec_res, vec_two);
298
+ return vec_res;
299
+ }
300
+ Vectorized<float> fmod(const Vectorized<float>& q) const {
301
+ return Vectorized<float>(Sleef_fmodf16(values, q));
302
+ }
303
+ Vectorized<float> log() const {
304
+ return Vectorized<float>(Sleef_logf16_u10(values));
305
+ }
306
+ Vectorized<float> log2() const {
307
+ return Vectorized<float>(Sleef_log2f16_u10(values));
308
+ }
309
+ Vectorized<float> log10() const {
310
+ return Vectorized<float>(Sleef_log10f16_u10(values));
311
+ }
312
+ Vectorized<float> log1p() const {
313
+ return Vectorized<float>(Sleef_log1pf16_u10(values));
314
+ }
315
+ Vectorized<float> frac() const;
316
+ Vectorized<float> sin() const {
317
+ return Vectorized<float>(Sleef_sinf16_u35(values));
318
+ }
319
+ Vectorized<float> sinh() const {
320
+ return Vectorized<float>(Sleef_sinhf16_u10(values));
321
+ }
322
+ Vectorized<float> cos() const {
323
+ return Vectorized<float>(Sleef_cosf16_u35(values));
324
+ }
325
+ Vectorized<float> cosh() const {
326
+ return Vectorized<float>(Sleef_coshf16_u10(values));
327
+ }
328
+ Vectorized<float> ceil() const {
329
+ return _mm512_ceil_ps(values);
330
+ }
331
+ Vectorized<float> floor() const {
332
+ return _mm512_floor_ps(values);
333
+ }
334
+ Vectorized<float> hypot(const Vectorized<float> &b) const {
335
+ return Vectorized<float>(Sleef_hypotf16_u05(values, b));
336
+ }
337
+ Vectorized<float> i0() const {
338
+ return map(calc_i0);
339
+ }
340
+ Vectorized<float> i0e() const {
341
+ return map(calc_i0e);
342
+ }
343
+ Vectorized<float> digamma() const {
344
+ return map(calc_digamma);
345
+ }
346
+ Vectorized<float> igamma(const Vectorized<float> &x) const {
347
+ __at_align__ float tmp[size()];
348
+ __at_align__ float tmp_x[size()];
349
+ store(tmp);
350
+ x.store(tmp_x);
351
+ for (const auto i : c10::irange(size())) {
352
+ tmp[i] = calc_igamma(tmp[i], tmp_x[i]);
353
+ }
354
+ return loadu(tmp);
355
+ }
356
+ Vectorized<float> igammac(const Vectorized<float> &x) const {
357
+ __at_align__ float tmp[size()];
358
+ __at_align__ float tmp_x[size()];
359
+ store(tmp);
360
+ x.store(tmp_x);
361
+ for (const auto i : c10::irange(size())) {
362
+ tmp[i] = calc_igammac(tmp[i], tmp_x[i]);
363
+ }
364
+ return loadu(tmp);
365
+ }
366
+ Vectorized<float> neg() const {
367
+ return _mm512_xor_ps(_mm512_set1_ps(-0.f), values);
368
+ }
369
+ Vectorized<float> nextafter(const Vectorized<float> &b) const {
370
+ return Vectorized<float>(Sleef_nextafterf16(values, b));
371
+ }
372
+ Vectorized<float> round() const {
373
+ return _mm512_roundscale_ps(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
374
+ }
375
+ Vectorized<float> tan() const {
376
+ return Vectorized<float>(Sleef_tanf16_u10(values));
377
+ }
378
+ Vectorized<float> tanh() const {
379
+ return Vectorized<float>(Sleef_tanhf16_u10(values));
380
+ }
381
+ Vectorized<float> trunc() const {
382
+ return _mm512_roundscale_ps(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
383
+ }
384
+ Vectorized<float> lgamma() const {
385
+ return Vectorized<float>(Sleef_lgammaf16_u10(values));
386
+ }
387
+ Vectorized<float> sqrt() const {
388
+ return _mm512_sqrt_ps(values);
389
+ }
390
+ Vectorized<float> reciprocal() const {
391
+ return _mm512_div_ps(_mm512_set1_ps(1), values);
392
+ }
393
+ Vectorized<float> rsqrt() const {
394
+ return _mm512_div_ps(_mm512_set1_ps(1), _mm512_sqrt_ps(values));
395
+ }
396
+ Vectorized<float> pow(const Vectorized<float> &b) const {
397
+ return Vectorized<float>(Sleef_powf16_u10(values, b));
398
+ }
399
+ // Comparison using the _CMP_**_OQ predicate.
400
+ // `O`: get false if an operand is NaN
401
+ // `Q`: do not raise if an operand is NaN
402
+ Vectorized<float> operator==(const Vectorized<float>& other) const {
403
+ auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_EQ_OQ);
404
+ return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask,
405
+ 0xFFFFFFFF));
406
+ }
407
+
408
+ Vectorized<float> operator!=(const Vectorized<float>& other) const {
409
+ auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_NEQ_UQ);
410
+ return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask,
411
+ 0xFFFFFFFF));
412
+ }
413
+
414
+ Vectorized<float> operator<(const Vectorized<float>& other) const {
415
+ auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_LT_OQ);
416
+ return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask,
417
+ 0xFFFFFFFF));
418
+ }
419
+
420
+ Vectorized<float> operator<=(const Vectorized<float>& other) const {
421
+ auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_LE_OQ);
422
+ return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask,
423
+ 0xFFFFFFFF));
424
+ }
425
+
426
+ Vectorized<float> operator>(const Vectorized<float>& other) const {
427
+ auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_GT_OQ);
428
+ return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask,
429
+ 0xFFFFFFFF));
430
+ }
431
+
432
+ Vectorized<float> operator>=(const Vectorized<float>& other) const {
433
+ auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_GE_OQ);
434
+ return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask,
435
+ 0xFFFFFFFF));
436
+ }
437
+
438
+ Vectorized<float> eq(const Vectorized<float>& other) const;
439
+ Vectorized<float> ne(const Vectorized<float>& other) const;
440
+ Vectorized<float> gt(const Vectorized<float>& other) const;
441
+ Vectorized<float> ge(const Vectorized<float>& other) const;
442
+ Vectorized<float> lt(const Vectorized<float>& other) const;
443
+ Vectorized<float> le(const Vectorized<float>& other) const;
444
+ };
445
+
446
+ template <>
447
+ Vectorized<float> inline operator+(const Vectorized<float>& a, const Vectorized<float>& b) {
448
+ return _mm512_add_ps(a, b);
449
+ }
450
+
451
+ template <>
452
+ Vectorized<float> inline operator-(const Vectorized<float>& a, const Vectorized<float>& b) {
453
+ return _mm512_sub_ps(a, b);
454
+ }
455
+
456
+ template <>
457
+ Vectorized<float> inline operator*(const Vectorized<float>& a, const Vectorized<float>& b) {
458
+ return _mm512_mul_ps(a, b);
459
+ }
460
+
461
+ template <>
462
+ Vectorized<float> inline operator/(const Vectorized<float>& a, const Vectorized<float>& b) {
463
+ return _mm512_div_ps(a, b);
464
+ }
465
+
466
+ // frac. Implement this here so we can use subtraction
467
+ inline Vectorized<float> Vectorized<float>::frac() const {
468
+ return *this - this->trunc();
469
+ }
470
+
471
+ // Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
472
+ // either input is a NaN.
473
+ template <>
474
+ Vectorized<float> inline maximum(const Vectorized<float>& a, const Vectorized<float>& b) {
475
+ auto zero_vec = _mm512_set1_epi32(0);
476
+ auto max = _mm512_max_ps(a, b);
477
+ auto isnan_mask = _mm512_cmp_ps_mask(a, b, _CMP_UNORD_Q);
478
+ auto isnan = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, isnan_mask,
479
+ 0xFFFFFFFF));
480
+ // Exploit the fact that all-ones is a NaN.
481
+ return _mm512_or_ps(max, isnan);
482
+ }
483
+
484
+ // Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
485
+ // either input is a NaN.
486
+ template <>
487
+ Vectorized<float> inline minimum(const Vectorized<float>& a, const Vectorized<float>& b) {
488
+ auto zero_vec = _mm512_set1_epi32(0);
489
+ auto min = _mm512_min_ps(a, b);
490
+ auto isnan_mask = _mm512_cmp_ps_mask(a, b, _CMP_UNORD_Q);
491
+ auto isnan = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, isnan_mask,
492
+ 0xFFFFFFFF));
493
+ // Exploit the fact that all-ones is a NaN.
494
+ return _mm512_or_ps(min, isnan);
495
+ }
496
+
497
+ template <>
498
+ Vectorized<float> inline clamp(const Vectorized<float>& a, const Vectorized<float>& min, const Vectorized<float>& max) {
499
+ return _mm512_min_ps(max, _mm512_max_ps(min, a));
500
+ }
501
+
502
+ template <>
503
+ Vectorized<float> inline clamp_max(const Vectorized<float>& a, const Vectorized<float>& max) {
504
+ return _mm512_min_ps(max, a);
505
+ }
506
+
507
+ template <>
508
+ Vectorized<float> inline clamp_min(const Vectorized<float>& a, const Vectorized<float>& min) {
509
+ return _mm512_max_ps(min, a);
510
+ }
511
+
512
+ template <>
513
+ Vectorized<float> inline operator&(const Vectorized<float>& a, const Vectorized<float>& b) {
514
+ return _mm512_and_ps(a, b);
515
+ }
516
+
517
+ template <>
518
+ Vectorized<float> inline operator|(const Vectorized<float>& a, const Vectorized<float>& b) {
519
+ return _mm512_or_ps(a, b);
520
+ }
521
+
522
+ template <>
523
+ Vectorized<float> inline operator^(const Vectorized<float>& a, const Vectorized<float>& b) {
524
+ return _mm512_xor_ps(a, b);
525
+ }
526
+
527
+ inline Vectorized<float> Vectorized<float>::eq(const Vectorized<float>& other) const {
528
+ return (*this == other) & Vectorized<float>(1.0f);
529
+ }
530
+
531
+ inline Vectorized<float> Vectorized<float>::ne(const Vectorized<float>& other) const {
532
+ return (*this != other) & Vectorized<float>(1.0f);
533
+ }
534
+
535
+ inline Vectorized<float> Vectorized<float>::gt(const Vectorized<float>& other) const {
536
+ return (*this > other) & Vectorized<float>(1.0f);
537
+ }
538
+
539
+ inline Vectorized<float> Vectorized<float>::ge(const Vectorized<float>& other) const {
540
+ return (*this >= other) & Vectorized<float>(1.0f);
541
+ }
542
+
543
+ inline Vectorized<float> Vectorized<float>::lt(const Vectorized<float>& other) const {
544
+ return (*this < other) & Vectorized<float>(1.0f);
545
+ }
546
+
547
+ inline Vectorized<float> Vectorized<float>::le(const Vectorized<float>& other) const {
548
+ return (*this <= other) & Vectorized<float>(1.0f);
549
+ }
550
+
551
+ template <>
552
+ inline void convert(const float* src, float* dst, int64_t n) {
553
+ int64_t i;
554
+ #pragma unroll
555
+ for (i = 0; i <= (n - Vectorized<float>::size()); i += Vectorized<float>::size()) {
556
+ _mm512_storeu_ps(dst + i, _mm512_loadu_ps(src + i));
557
+ }
558
+ #pragma unroll
559
+ for (; i < n; i++) {
560
+ dst[i] = src[i];
561
+ }
562
+ }
563
+
564
+ template <>
565
+ Vectorized<float> inline fmadd(const Vectorized<float>& a, const Vectorized<float>& b, const Vectorized<float>& c) {
566
+ return _mm512_fmadd_ps(a, b, c);
567
+ }
568
+
569
+ template <>
570
+ Vectorized<float> inline fmsub(const Vectorized<float>& a, const Vectorized<float>& b, const Vectorized<float>& c) {
571
+ return _mm512_fmsub_ps(a, b, c);
572
+ }
573
+
574
+ // TODO(jgong5): rewrite with ATEN vectorized (need to add unpack and shuffle)
575
+ // Used by Inductor CPP codegen
576
+ // Code referred to FBGEMM:
577
+ // https://github.com/pytorch/FBGEMM/blob/39a423e4ad1a04b77fea81c7d09c3e6f8984fae9/src/UtilsAvx512.cc#LL19C6-L19C6
578
+ // 16 * 6 = 96 instructions
579
+ template<>
580
+ inline void transpose_mxn<float, 16, 16>(
581
+ const float* src,
582
+ int64_t ld_src,
583
+ float* dst,
584
+ int64_t ld_dst) {
585
+ // load from src to registers
586
+ // a: a0 a1 a2 a3 a4 a5 a6 a7 a8 a9 a10 a11 a12 a13 a14 a15
587
+ // b: b0 b1 b2 b3 b4 b5 b6 b7 b8 b9 b10 b11 b12 b13 b14 b15
588
+ // c: c0 c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 c12 c13 c14 c15
589
+ // d: d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15
590
+ // e: e0 e1 e2 e3 e4 e5 e6 e7 e8 e9 e10 e11 e12 e13 e14 e15
591
+ // f: f0 f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13 f14 f15
592
+ // g: g0 g1 g2 g3 g4 g5 g6 g7 g8 g9 g10 g11 g12 g13 g14 g15
593
+ // h: h0 h1 h2 h3 h4 h5 h6 h7 h8 h9 h10 h11 h12 h13 h14 h15
594
+ // i: i0 i1 i2 i3 i4 i5 i6 i7 i8 i9 i10 i11 i12 i13 i14 i15
595
+ // j: j0 j1 j2 j3 j4 j5 j6 j7 j8 j9 j10 j11 j12 j13 j14 j15
596
+ // k: k0 k1 k2 k3 k4 k5 k6 k7 k8 k9 k10 k11 k12 k13 k14 k15
597
+ // l: l0 l1 l2 l3 l4 l5 l6 l7 l8 l9 l10 l11 l12 l13 l14 l15
598
+ // m: m0 m1 m2 m3 m4 m5 m6 m7 m8 m9 m10 m11 m12 m13 m14 m15
599
+ // n: n0 n1 n2 n3 n4 n5 n6 n7 n8 n9 n10 n11 n12 n13 n14 n15
600
+ // o: o0 o1 o2 o3 o4 o5 o6 o7 o8 o9 o10 o11 o12 o13 o14 o15
601
+ // p: p0 p1 p2 p3 p4 p5 p6 p7 p8 p9 p10 p11 p12 p13 p14 p15
602
+ __m512 a = _mm512_loadu_ps(&src[0 * ld_src]);
603
+ __m512 b = _mm512_loadu_ps(&src[1 * ld_src]);
604
+ __m512 c = _mm512_loadu_ps(&src[2 * ld_src]);
605
+ __m512 d = _mm512_loadu_ps(&src[3 * ld_src]);
606
+ __m512 e = _mm512_loadu_ps(&src[4 * ld_src]);
607
+ __m512 f = _mm512_loadu_ps(&src[5 * ld_src]);
608
+ __m512 g = _mm512_loadu_ps(&src[6 * ld_src]);
609
+ __m512 h = _mm512_loadu_ps(&src[7 * ld_src]);
610
+ __m512 i = _mm512_loadu_ps(&src[8 * ld_src]);
611
+ __m512 j = _mm512_loadu_ps(&src[9 * ld_src]);
612
+ __m512 k = _mm512_loadu_ps(&src[10 * ld_src]);
613
+ __m512 l = _mm512_loadu_ps(&src[11 * ld_src]);
614
+ __m512 m = _mm512_loadu_ps(&src[12 * ld_src]);
615
+ __m512 n = _mm512_loadu_ps(&src[13 * ld_src]);
616
+ __m512 o = _mm512_loadu_ps(&src[14 * ld_src]);
617
+ __m512 p = _mm512_loadu_ps(&src[15 * ld_src]);
618
+
619
+ __m512 ta, tb, tc, td, te, tf, tg, th, ti, tj, tk, tl, tm, tn, to, tq;
620
+ // unpacking and interleaving 32-bit elements
621
+ // a0 b0 a1 b1 a4 b4 a5 b5 a8 b8 a9 b9 a12 b12 a13 b13
622
+ // a2 b2 a3 b3 a6 b6 a7 b7 a10 b10 a11 b11 a14 b14 a15 b15
623
+ // c0 d0 c1 d1 ...
624
+ // c2 d2 c3 d3 ...
625
+ // e0 f0 e1 f1 ...
626
+ // e2 f2 e3 f3 ...
627
+ // g0 h0 g1 h1 ...
628
+ // g2 h2 g3 h3 ...
629
+ // i0 ...
630
+ // i2 ...
631
+ // k0 ...
632
+ // k2 ...
633
+ // m0 ...
634
+ // m2 ...
635
+ // o0 ...
636
+ // o1 ...
637
+ ta = _mm512_unpacklo_ps(a, b);
638
+ tb = _mm512_unpackhi_ps(a, b);
639
+ tc = _mm512_unpacklo_ps(c, d);
640
+ td = _mm512_unpackhi_ps(c, d);
641
+ te = _mm512_unpacklo_ps(e, f);
642
+ tf = _mm512_unpackhi_ps(e, f);
643
+ tg = _mm512_unpacklo_ps(g, h);
644
+ th = _mm512_unpackhi_ps(g, h);
645
+ ti = _mm512_unpacklo_ps(i, j);
646
+ tj = _mm512_unpackhi_ps(i, j);
647
+ tk = _mm512_unpacklo_ps(k, l);
648
+ tl = _mm512_unpackhi_ps(k, l);
649
+ tm = _mm512_unpacklo_ps(m, n);
650
+ tn = _mm512_unpackhi_ps(m, n);
651
+ to = _mm512_unpacklo_ps(o, p);
652
+ tq = _mm512_unpackhi_ps(o, p);
653
+
654
+ // unpacking and interleaving 64-bit elements
655
+ // a0 b0 c0 d0 a4 b4 c4 d4 a8 b8 c8 d8 a12 b12 c12 d12
656
+ // a1 b1 c1 d1 ...
657
+ // a2 b2 c2 d2 ...
658
+ // a3 b3 c3 d3 ...
659
+ // e0 f0 g0 h0 e4 f4 g4 h4 e8 f8 g8 h8 e12 f12 g12 h12
660
+ // e1 f1 g1 h1 ...
661
+ // e2 f2 g2 h2 ...
662
+ // e3 f3 g3 h3 ...
663
+ // i0 j0 k0 l0 ...
664
+ // i1 j1 k1 l1 ...
665
+ // i2 j2 k2 l2 ...
666
+ // i3 j3 k3 l3 ...
667
+ // m0 n0 o0 p0 ...
668
+ // m1 n1 o1 p1 ...
669
+ // m2 n2 o2 p2 ...
670
+ // m3 n3 o3 p3 ...
671
+ a = _mm512_castpd_ps(
672
+ _mm512_unpacklo_pd(_mm512_castps_pd(ta), _mm512_castps_pd(tc)));
673
+ b = _mm512_castpd_ps(
674
+ _mm512_unpackhi_pd(_mm512_castps_pd(ta), _mm512_castps_pd(tc)));
675
+ c = _mm512_castpd_ps(
676
+ _mm512_unpacklo_pd(_mm512_castps_pd(tb), _mm512_castps_pd(td)));
677
+ d = _mm512_castpd_ps(
678
+ _mm512_unpackhi_pd(_mm512_castps_pd(tb), _mm512_castps_pd(td)));
679
+ e = _mm512_castpd_ps(
680
+ _mm512_unpacklo_pd(_mm512_castps_pd(te), _mm512_castps_pd(tg)));
681
+ f = _mm512_castpd_ps(
682
+ _mm512_unpackhi_pd(_mm512_castps_pd(te), _mm512_castps_pd(tg)));
683
+ g = _mm512_castpd_ps(
684
+ _mm512_unpacklo_pd(_mm512_castps_pd(tf), _mm512_castps_pd(th)));
685
+ h = _mm512_castpd_ps(
686
+ _mm512_unpackhi_pd(_mm512_castps_pd(tf), _mm512_castps_pd(th)));
687
+ i = _mm512_castpd_ps(
688
+ _mm512_unpacklo_pd(_mm512_castps_pd(ti), _mm512_castps_pd(tk)));
689
+ j = _mm512_castpd_ps(
690
+ _mm512_unpackhi_pd(_mm512_castps_pd(ti), _mm512_castps_pd(tk)));
691
+ k = _mm512_castpd_ps(
692
+ _mm512_unpacklo_pd(_mm512_castps_pd(tj), _mm512_castps_pd(tl)));
693
+ l = _mm512_castpd_ps(
694
+ _mm512_unpackhi_pd(_mm512_castps_pd(tj), _mm512_castps_pd(tl)));
695
+ m = _mm512_castpd_ps(
696
+ _mm512_unpacklo_pd(_mm512_castps_pd(tm), _mm512_castps_pd(to)));
697
+ n = _mm512_castpd_ps(
698
+ _mm512_unpackhi_pd(_mm512_castps_pd(tm), _mm512_castps_pd(to)));
699
+ o = _mm512_castpd_ps(
700
+ _mm512_unpacklo_pd(_mm512_castps_pd(tn), _mm512_castps_pd(tq)));
701
+ p = _mm512_castpd_ps(
702
+ _mm512_unpackhi_pd(_mm512_castps_pd(tn), _mm512_castps_pd(tq)));
703
+
704
+ // shuffle 128-bits (composed of 4 32-bit elements)
705
+ // a0 b0 c0 d0 a8 b8 c8 d8 e0 f0 g0 h0 e8 f8 g8 h8
706
+ // a1 b1 c1 d1 ...
707
+ // a2 b2 c2 d2 ...
708
+ // a3 b3 c3 d3 ...
709
+ // a4 b4 c4 d4 ...
710
+ // a5 b5 c5 d5 ...
711
+ // a6 b6 c6 d6 ...
712
+ // a7 b7 c7 d7 ...
713
+ // i0 j0 k0 l0 i8 j8 k8 l8 m0 n0 o0 p0 m8 n8 o8 p8
714
+ // i1 j1 k1 l1 ...
715
+ // i2 j2 k2 l2 ...
716
+ // i3 j3 k3 l3 ...
717
+ // i4 j4 k4 l4 ...
718
+ // i5 j5 k5 l5 ...
719
+ // i6 j6 k6 l6 ...
720
+ // i7 j7 k7 l7 ...
721
+ ta = _mm512_shuffle_f32x4(a, e, 0x88);
722
+ tb = _mm512_shuffle_f32x4(b, f, 0x88);
723
+ tc = _mm512_shuffle_f32x4(c, g, 0x88);
724
+ td = _mm512_shuffle_f32x4(d, h, 0x88);
725
+ te = _mm512_shuffle_f32x4(a, e, 0xdd);
726
+ tf = _mm512_shuffle_f32x4(b, f, 0xdd);
727
+ tg = _mm512_shuffle_f32x4(c, g, 0xdd);
728
+ th = _mm512_shuffle_f32x4(d, h, 0xdd);
729
+ ti = _mm512_shuffle_f32x4(i, m, 0x88);
730
+ tj = _mm512_shuffle_f32x4(j, n, 0x88);
731
+ tk = _mm512_shuffle_f32x4(k, o, 0x88);
732
+ tl = _mm512_shuffle_f32x4(l, p, 0x88);
733
+ tm = _mm512_shuffle_f32x4(i, m, 0xdd);
734
+ tn = _mm512_shuffle_f32x4(j, n, 0xdd);
735
+ to = _mm512_shuffle_f32x4(k, o, 0xdd);
736
+ tq = _mm512_shuffle_f32x4(l, p, 0xdd);
737
+
738
+ // shuffle 128-bits (composed of 4 32-bit elements)
739
+ // a0 b0 c0 d0 ... o0
740
+ // a1 b1 c1 d1 ... o1
741
+ // a2 b2 c2 d2 ... o2
742
+ // a3 b3 c3 d3 ... o3
743
+ // a4 ...
744
+ // a5 ...
745
+ // a6 ...
746
+ // a7 ...
747
+ // a8 ...
748
+ // a9 ...
749
+ // a10 ...
750
+ // a11 ...
751
+ // a12 ...
752
+ // a13 ...
753
+ // a14 ...
754
+ // a15 b15 c15 d15 ... o15
755
+ a = _mm512_shuffle_f32x4(ta, ti, 0x88);
756
+ b = _mm512_shuffle_f32x4(tb, tj, 0x88);
757
+ c = _mm512_shuffle_f32x4(tc, tk, 0x88);
758
+ d = _mm512_shuffle_f32x4(td, tl, 0x88);
759
+ e = _mm512_shuffle_f32x4(te, tm, 0x88);
760
+ f = _mm512_shuffle_f32x4(tf, tn, 0x88);
761
+ g = _mm512_shuffle_f32x4(tg, to, 0x88);
762
+ h = _mm512_shuffle_f32x4(th, tq, 0x88);
763
+ i = _mm512_shuffle_f32x4(ta, ti, 0xdd);
764
+ j = _mm512_shuffle_f32x4(tb, tj, 0xdd);
765
+ k = _mm512_shuffle_f32x4(tc, tk, 0xdd);
766
+ l = _mm512_shuffle_f32x4(td, tl, 0xdd);
767
+ m = _mm512_shuffle_f32x4(te, tm, 0xdd);
768
+ n = _mm512_shuffle_f32x4(tf, tn, 0xdd);
769
+ o = _mm512_shuffle_f32x4(tg, to, 0xdd);
770
+ p = _mm512_shuffle_f32x4(th, tq, 0xdd);
771
+
772
+ // store from registers to dst
773
+ _mm512_storeu_ps(&dst[0 * ld_dst], a);
774
+ _mm512_storeu_ps(&dst[1 * ld_dst], b);
775
+ _mm512_storeu_ps(&dst[2 * ld_dst], c);
776
+ _mm512_storeu_ps(&dst[3 * ld_dst], d);
777
+ _mm512_storeu_ps(&dst[4 * ld_dst], e);
778
+ _mm512_storeu_ps(&dst[5 * ld_dst], f);
779
+ _mm512_storeu_ps(&dst[6 * ld_dst], g);
780
+ _mm512_storeu_ps(&dst[7 * ld_dst], h);
781
+ _mm512_storeu_ps(&dst[8 * ld_dst], i);
782
+ _mm512_storeu_ps(&dst[9 * ld_dst], j);
783
+ _mm512_storeu_ps(&dst[10 * ld_dst], k);
784
+ _mm512_storeu_ps(&dst[11 * ld_dst], l);
785
+ _mm512_storeu_ps(&dst[12 * ld_dst], m);
786
+ _mm512_storeu_ps(&dst[13 * ld_dst], n);
787
+ _mm512_storeu_ps(&dst[14 * ld_dst], o);
788
+ _mm512_storeu_ps(&dst[15 * ld_dst], p);
789
+ }
790
+
791
+ #endif
792
+
793
+ }}}
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_int.h ADDED
@@ -0,0 +1,1459 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/intrinsics.h>
7
+ #include <ATen/cpu/vec/vec_base.h>
8
+ #include <c10/macros/Macros.h>
9
+ #include <c10/util/irange.h>
10
+
11
+ namespace at {
12
+ namespace vec {
13
+ inline namespace CPU_CAPABILITY {
14
+
15
+ #ifdef CPU_CAPABILITY_AVX512
16
+
17
+ struct Vectorizedi {
18
+ protected:
19
+ __m512i values;
20
+ static constexpr __m512i zero_vector {0, 0, 0, 0, 0, 0, 0, 0};
21
+ static inline __m512i invert(const __m512i& v) {
22
+ const auto ones = _mm512_set1_epi64(-1);
23
+ return _mm512_xor_si512(ones, v);
24
+ }
25
+ public:
26
+ Vectorizedi() {}
27
+ Vectorizedi(__m512i v) : values(v) {}
28
+ operator __m512i() const {
29
+ return values;
30
+ }
31
+ };
32
+
33
+ #else
34
+
35
+ struct Vectorizedi {}; // dummy definition to make Vectorizedi always defined
36
+
37
+ #endif // CPU_CAPABILITY_AVX512
38
+
39
+ #ifdef CPU_CAPABILITY_AVX512
40
+
41
+ template <>
42
+ class Vectorized<int64_t> : public Vectorizedi {
43
+ private:
44
+ static const Vectorized<int64_t> ones;
45
+ public:
46
+ using value_type = int64_t;
47
+ using size_type = int;
48
+ static constexpr size_type size() {
49
+ return 8;
50
+ }
51
+ using Vectorizedi::Vectorizedi;
52
+ Vectorized() {}
53
+ Vectorized(int64_t v) { values = _mm512_set1_epi64(v); }
54
+ Vectorized(int64_t val1, int64_t val2, int64_t val3, int64_t val4,
55
+ int64_t val5, int64_t val6, int64_t val7, int64_t val8) {
56
+ values = _mm512_setr_epi64(val1, val2, val3, val4,
57
+ val5, val6, val7, val8);
58
+ }
59
+ template <int64_t mask>
60
+ static Vectorized<int64_t> blend(Vectorized<int64_t> a, Vectorized<int64_t> b) {
61
+ return _mm512_mask_blend_epi64(mask, a.values, b.values);
62
+ }
63
+ static Vectorized<int64_t> blendv(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b,
64
+ const Vectorized<int64_t>& mask) {
65
+ auto msb_one = _mm512_set1_epi64(0xFFFFFFFFFFFFFFFF);
66
+ auto mask_ = _mm512_cmp_epi64_mask(mask, msb_one, _MM_CMPINT_EQ);
67
+ return _mm512_mask_blend_epi64(mask_, a.values, b.values);
68
+ }
69
+ template <typename step_t>
70
+ static Vectorized<int64_t> arange(int64_t base = 0, step_t step = static_cast<step_t>(1)) {
71
+ return Vectorized<int64_t>(base, base + step, base + 2 * step, base + 3 * step,
72
+ base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step);
73
+ }
74
+ static Vectorized<int64_t>
75
+ set(Vectorized<int64_t> a, Vectorized<int64_t> b, int64_t count = size()) {
76
+ switch (count) {
77
+ case 0:
78
+ return a;
79
+ case 1:
80
+ return blend<1>(a, b);
81
+ case 2:
82
+ return blend<3>(a, b);
83
+ case 3:
84
+ return blend<7>(a, b);
85
+ case 4:
86
+ return blend<15>(a, b);
87
+ case 5:
88
+ return blend<31>(a, b);
89
+ case 6:
90
+ return blend<63>(a, b);
91
+ case 7:
92
+ return blend<127>(a, b);
93
+ }
94
+ return b;
95
+ }
96
+ static Vectorized<int64_t> loadu(const void* ptr) {
97
+ return _mm512_loadu_si512(reinterpret_cast<const __m512i*>(ptr));
98
+ }
99
+ static Vectorized<int64_t> loadu(const void* ptr, int64_t count) {
100
+ if (count == size()) {
101
+ return _mm512_loadu_si512(reinterpret_cast<const __m512i*>(ptr));
102
+ } else {
103
+ __mmask8 mask = (1ULL << count) - 1;
104
+ return _mm512_maskz_loadu_epi64(mask, ptr);
105
+ }
106
+ }
107
+ void store(void* ptr, int count = size()) const {
108
+ if (count == size()) {
109
+ // ptr need not to be aligned here. See
110
+ // https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm512-storeu-si512.html
111
+ _mm512_storeu_si512(reinterpret_cast<__m512i*>(ptr), values);
112
+ } else if (count > 0) {
113
+ __mmask8 mask = (1ULL << count) - 1;
114
+ _mm512_mask_storeu_epi64(ptr, mask, values);
115
+ }
116
+ }
117
+ const int64_t& operator[](int idx) const = delete;
118
+ int64_t& operator[](int idx) = delete;
119
+ Vectorized<int64_t> abs() const {
120
+ auto is_larger_mask = _mm512_cmpgt_epi64_mask(zero_vector, values);
121
+ auto is_larger = _mm512_mask_set1_epi64(zero_vector, is_larger_mask, 0xFFFFFFFFFFFFFFFF);
122
+ auto inverse = _mm512_xor_si512(values, is_larger);
123
+ return _mm512_sub_epi64(inverse, is_larger);
124
+ }
125
+ Vectorized<int64_t> real() const {
126
+ return *this;
127
+ }
128
+ Vectorized<int64_t> imag() const {
129
+ return _mm512_set1_epi64(0);
130
+ }
131
+ Vectorized<int64_t> conj() const {
132
+ return *this;
133
+ }
134
+ Vectorized<int64_t> neg() const;
135
+ Vectorized<int64_t> operator==(const Vectorized<int64_t>& other) const {
136
+ auto mask = _mm512_cmpeq_epi64_mask(values, other.values);
137
+ return _mm512_mask_set1_epi64(zero_vector, mask, 0xFFFFFFFFFFFFFFFF);
138
+ }
139
+ Vectorized<int64_t> operator!=(const Vectorized<int64_t>& other) const {
140
+ auto mask = _mm512_cmpneq_epi64_mask(values, other.values);
141
+ return _mm512_mask_set1_epi64(zero_vector, mask, 0xFFFFFFFFFFFFFFFF);
142
+ }
143
+ Vectorized<int64_t> operator<(const Vectorized<int64_t>& other) const {
144
+ auto mask = _mm512_cmplt_epi64_mask(values, other.values);
145
+ return _mm512_mask_set1_epi64(zero_vector, mask, 0xFFFFFFFFFFFFFFFF);
146
+ }
147
+ Vectorized<int64_t> operator<=(const Vectorized<int64_t>& other) const {
148
+ auto mask = _mm512_cmple_epi64_mask(values, other.values);
149
+ return _mm512_mask_set1_epi64(zero_vector, mask, 0xFFFFFFFFFFFFFFFF);
150
+ }
151
+ Vectorized<int64_t> operator>(const Vectorized<int64_t>& other) const {
152
+ auto mask = _mm512_cmpgt_epi64_mask(values, other.values);
153
+ return _mm512_mask_set1_epi64(zero_vector, mask, 0xFFFFFFFFFFFFFFFF);
154
+ }
155
+ Vectorized<int64_t> operator>=(const Vectorized<int64_t>& other) const {
156
+ auto mask = _mm512_cmpge_epi64_mask(values, other.values);
157
+ return _mm512_mask_set1_epi64(zero_vector, mask, 0xFFFFFFFFFFFFFFFF);
158
+ }
159
+
160
+ Vectorized<int64_t> eq(const Vectorized<int64_t>& other) const;
161
+ Vectorized<int64_t> ne(const Vectorized<int64_t>& other) const;
162
+ Vectorized<int64_t> gt(const Vectorized<int64_t>& other) const;
163
+ Vectorized<int64_t> ge(const Vectorized<int64_t>& other) const;
164
+ Vectorized<int64_t> lt(const Vectorized<int64_t>& other) const;
165
+ Vectorized<int64_t> le(const Vectorized<int64_t>& other) const;
166
+ };
167
+
168
+ template <>
169
+ class Vectorized<int32_t> : public Vectorizedi {
170
+ private:
171
+ static constexpr __m512i zero_vector {0, 0, 0, 0, 0, 0, 0, 0};
172
+ static const Vectorized<int32_t> ones;
173
+ public:
174
+ using value_type = int32_t;
175
+ static constexpr int size() {
176
+ return 16;
177
+ }
178
+ using Vectorizedi::Vectorizedi;
179
+ Vectorized() {}
180
+ Vectorized(int32_t v) { values = _mm512_set1_epi32(v); }
181
+ Vectorized(int32_t val1, int32_t val2, int32_t val3, int32_t val4,
182
+ int32_t val5, int32_t val6, int32_t val7, int32_t val8,
183
+ int32_t val9, int32_t val10, int32_t val11, int32_t val12,
184
+ int32_t val13, int32_t val14, int32_t val15, int32_t val16) {
185
+ values = _mm512_setr_epi32(val1, val2, val3, val4, val5, val6, val7, val8,
186
+ val9, val10, val11, val12, val13, val14, val15, val16);
187
+ }
188
+ template <int64_t mask>
189
+ static Vectorized<int32_t> blend(Vectorized<int32_t> a, Vectorized<int32_t> b) {
190
+ return _mm512_mask_blend_epi32(mask, a.values, b.values);
191
+ }
192
+ static Vectorized<int32_t> blendv(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b,
193
+ const Vectorized<int32_t>& mask) {
194
+ auto msb_one = _mm512_set1_epi32(0xFFFFFFFF);
195
+ auto mask_ = _mm512_cmp_epi32_mask(mask, msb_one, _MM_CMPINT_EQ);
196
+ return _mm512_mask_blend_epi32(mask_, a.values, b.values);
197
+ }
198
+ template <typename step_t>
199
+ static Vectorized<int32_t> arange(int32_t base = 0, step_t step = static_cast<step_t>(1)) {
200
+ return Vectorized<int32_t>(
201
+ base, base + step, base + 2 * step, base + 3 * step,
202
+ base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step,
203
+ base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step,
204
+ base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step);
205
+ }
206
+ static Vectorized<int32_t>
207
+ set(Vectorized<int32_t> a, Vectorized<int32_t> b, int32_t count = size()) {
208
+ switch (count) {
209
+ case 0:
210
+ return a;
211
+ case 1:
212
+ return blend<1>(a, b);
213
+ case 2:
214
+ return blend<3>(a, b);
215
+ case 3:
216
+ return blend<7>(a, b);
217
+ case 4:
218
+ return blend<15>(a, b);
219
+ case 5:
220
+ return blend<31>(a, b);
221
+ case 6:
222
+ return blend<63>(a, b);
223
+ case 7:
224
+ return blend<127>(a, b);
225
+ case 8:
226
+ return blend<255>(a, b);
227
+ case 9:
228
+ return blend<511>(a, b);
229
+ case 10:
230
+ return blend<1023>(a, b);
231
+ case 11:
232
+ return blend<2047>(a, b);
233
+ case 12:
234
+ return blend<4095>(a, b);
235
+ case 13:
236
+ return blend<8191>(a, b);
237
+ case 14:
238
+ return blend<16383>(a, b);
239
+ case 15:
240
+ return blend<32767>(a, b);
241
+ }
242
+ return b;
243
+ }
244
+ static Vectorized<int32_t> loadu(const void* ptr) {
245
+ return _mm512_loadu_si512(reinterpret_cast<const __m512i*>(ptr));
246
+ }
247
+ static Vectorized<int32_t> loadu(const void* ptr, int32_t count) {
248
+ if (count == size()) {
249
+ return _mm512_loadu_si512(reinterpret_cast<const __m512i*>(ptr));
250
+ } else {
251
+ __mmask16 mask = (1ULL << count) - 1;
252
+ return _mm512_maskz_loadu_epi32(mask, ptr);
253
+ }
254
+ }
255
+ void store(void* ptr, int count = size()) const {
256
+ if (count == size()) {
257
+ // ptr need not to be aligned here. See
258
+ // https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm512-storeu-si512.html
259
+ _mm512_storeu_si512(reinterpret_cast<__m512i*>(ptr), values);
260
+ } else if (count > 0) {
261
+ __mmask16 mask = (1ULL << count) - 1;
262
+ _mm512_mask_storeu_epi32(ptr, mask, values);
263
+ }
264
+ }
265
+ const int32_t& operator[](int idx) const = delete;
266
+ int32_t& operator[](int idx) = delete;
267
+ Vectorized<int32_t> abs() const {
268
+ return _mm512_abs_epi32(values);
269
+ }
270
+ Vectorized<int32_t> real() const {
271
+ return *this;
272
+ }
273
+ Vectorized<int32_t> imag() const {
274
+ return _mm512_set1_epi32(0);
275
+ }
276
+ Vectorized<int32_t> conj() const {
277
+ return *this;
278
+ }
279
+ Vectorized<int32_t> neg() const;
280
+ Vectorized<int32_t> operator==(const Vectorized<int32_t>& other) const {
281
+ auto mask = _mm512_cmpeq_epi32_mask(values, other.values);
282
+ return _mm512_mask_set1_epi32(zero_vector, mask, 0xFFFFFFFF);
283
+ }
284
+ Vectorized<int32_t> operator!=(const Vectorized<int32_t>& other) const {
285
+ auto mask = _mm512_cmpneq_epi32_mask(values, other.values);
286
+ return _mm512_mask_set1_epi32(zero_vector, mask, 0xFFFFFFFF);
287
+ }
288
+ Vectorized<int32_t> operator<(const Vectorized<int32_t>& other) const {
289
+ auto mask = _mm512_cmplt_epi32_mask(values, other.values);
290
+ return _mm512_mask_set1_epi32(zero_vector, mask, 0xFFFFFFFF);
291
+ }
292
+ Vectorized<int32_t> operator<=(const Vectorized<int32_t>& other) const {
293
+ auto mask = _mm512_cmple_epi32_mask(values, other.values);
294
+ return _mm512_mask_set1_epi32(zero_vector, mask, 0xFFFFFFFF);
295
+ }
296
+ Vectorized<int32_t> operator>(const Vectorized<int32_t>& other) const {
297
+ auto mask = _mm512_cmpgt_epi32_mask(values, other.values);
298
+ return _mm512_mask_set1_epi32(zero_vector, mask, 0xFFFFFFFF);
299
+ }
300
+ Vectorized<int32_t> operator>=(const Vectorized<int32_t>& other) const {
301
+ auto mask = _mm512_cmpge_epi32_mask(values, other.values);
302
+ return _mm512_mask_set1_epi32(zero_vector, mask, 0xFFFFFFFF);
303
+ }
304
+ Vectorized<int32_t> eq(const Vectorized<int32_t>& other) const;
305
+ Vectorized<int32_t> ne(const Vectorized<int32_t>& other) const;
306
+ Vectorized<int32_t> gt(const Vectorized<int32_t>& other) const;
307
+ Vectorized<int32_t> ge(const Vectorized<int32_t>& other) const;
308
+ Vectorized<int32_t> lt(const Vectorized<int32_t>& other) const;
309
+ Vectorized<int32_t> le(const Vectorized<int32_t>& other) const;
310
+ };
311
+
312
+ template <>
313
+ inline void convert(const int32_t *src, float *dst, int64_t n) {
314
+ int64_t i;
315
+ // int32_t and float have same size
316
+ #ifndef _MSC_VER
317
+ # pragma unroll
318
+ #endif
319
+ for (i = 0; i <= (n - Vectorized<int32_t>::size()); i += Vectorized<int32_t>::size()) {
320
+ auto input_vec = _mm512_loadu_si512(reinterpret_cast<const __m512i*>(src + i));
321
+ auto output_vec = _mm512_cvtepi32_ps(input_vec);
322
+ _mm512_storeu_ps(reinterpret_cast<float*>(dst + i), output_vec);
323
+ }
324
+ #ifndef _MSC_VER
325
+ # pragma unroll
326
+ #endif
327
+ for (; i < n; i++) {
328
+ dst[i] = static_cast<float>(src[i]);
329
+ }
330
+ }
331
+
332
+ template <>
333
+ inline void convert(const int32_t *src, double *dst, int64_t n) {
334
+ int64_t i;
335
+ // int32_t has half the size of double
336
+ #ifndef _MSC_VER
337
+ # pragma unroll
338
+ #endif
339
+ for (i = 0; i <= (n - Vectorized<double>::size()); i += Vectorized<double>::size()) {
340
+ auto input_256_vec = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(src + i));
341
+ auto output_vec = _mm512_cvtepi32_pd(input_256_vec);
342
+ _mm512_storeu_pd(reinterpret_cast<double*>(dst + i), output_vec);
343
+ }
344
+ #ifndef _MSC_VER
345
+ # pragma unroll
346
+ #endif
347
+ for (; i < n; i++) {
348
+ dst[i] = static_cast<double>(src[i]);
349
+ }
350
+ }
351
+
352
+ template <>
353
+ class Vectorized<int16_t> : public Vectorizedi {
354
+ private:
355
+ static const Vectorized<int16_t> ones;
356
+ static constexpr __m512i zero_vector {0, 0, 0, 0, 0, 0, 0, 0};
357
+ public:
358
+ using value_type = int16_t;
359
+ static constexpr int size() {
360
+ return 32;
361
+ }
362
+ using Vectorizedi::Vectorizedi;
363
+ Vectorized() {}
364
+ Vectorized(int16_t v) { values = _mm512_set1_epi16(v); }
365
+ Vectorized(int16_t val1, int16_t val2, int16_t val3, int16_t val4,
366
+ int16_t val5, int16_t val6, int16_t val7, int16_t val8,
367
+ int16_t val9, int16_t val10, int16_t val11, int16_t val12,
368
+ int16_t val13, int16_t val14, int16_t val15, int16_t val16,
369
+ int16_t val17, int16_t val18, int16_t val19, int16_t val20,
370
+ int16_t val21, int16_t val22, int16_t val23, int16_t val24,
371
+ int16_t val25, int16_t val26, int16_t val27, int16_t val28,
372
+ int16_t val29, int16_t val30, int16_t val31, int16_t val32) {
373
+ values = _mm512_set_epi16(val32, val31, val30, val29, val28, val27, val26, val25,
374
+ val24, val23, val22, val21, val20, val19, val18, val17,
375
+ val16, val15, val14, val13, val12, val11, val10, val9,
376
+ val8, val7, val6, val5, val4, val3, val2, val1);
377
+ }
378
+ template <int64_t mask>
379
+ static Vectorized<int16_t> blend(Vectorized<int16_t> a, Vectorized<int16_t> b) {
380
+ return _mm512_mask_blend_epi16(mask, a.values, b.values);
381
+ }
382
+ static Vectorized<int16_t> blendv(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b,
383
+ const Vectorized<int16_t>& mask) {
384
+ auto msb_one = _mm512_set1_epi16(0xFFFF);
385
+ auto mask_ = _mm512_cmp_epi16_mask(mask, msb_one, _MM_CMPINT_EQ);
386
+ return _mm512_mask_blend_epi16(mask_, a.values, b.values);
387
+ }
388
+ template <typename step_t>
389
+ static Vectorized<int16_t> arange(int16_t base = 0, step_t step = static_cast<step_t>(1)) {
390
+ return Vectorized<int16_t>(
391
+ base, base + step, base + 2 * step, base + 3 * step,
392
+ base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step,
393
+ base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step,
394
+ base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step,
395
+ base + 16 * step, base + 17 * step, base + 18 * step, base + 19 * step,
396
+ base + 20 * step, base + 21 * step, base + 22 * step, base + 23 * step,
397
+ base + 24 * step, base + 25 * step, base + 26 * step, base + 27 * step,
398
+ base + 28 * step, base + 29 * step, base + 30 * step, base + 31 * step
399
+ );
400
+ }
401
+ static Vectorized<int16_t>
402
+ set(Vectorized<int16_t> a, Vectorized<int16_t> b, int16_t count = size()) {
403
+ switch (count) {
404
+ case 0:
405
+ return a;
406
+ case 1:
407
+ return blend<0x1>(a, b);
408
+ case 2:
409
+ return blend<0x3>(a, b);
410
+ case 3:
411
+ return blend<0x7>(a, b);
412
+ case 4:
413
+ return blend<0xF>(a, b);
414
+ case 5:
415
+ return blend<0x1F>(a, b);
416
+ case 6:
417
+ return blend<0x3F>(a, b);
418
+ case 7:
419
+ return blend<0x7F>(a, b);
420
+ case 8:
421
+ return blend<0xFF>(a, b);
422
+ case 9:
423
+ return blend<0x1FF>(a, b);
424
+ case 10:
425
+ return blend<0x3FF>(a, b);
426
+ case 11:
427
+ return blend<0x7FF>(a, b);
428
+ case 12:
429
+ return blend<0xFFF>(a, b);
430
+ case 13:
431
+ return blend<0x1FFF>(a, b);
432
+ case 14:
433
+ return blend<0x3FFF>(a, b);
434
+ case 15:
435
+ return blend<0x7FFF>(a, b);
436
+ case 16:
437
+ return blend<0xFFFF>(a, b);
438
+ case 17:
439
+ return blend<0x1FFFF>(a, b);
440
+ case 18:
441
+ return blend<0x3FFFF>(a, b);
442
+ case 19:
443
+ return blend<0x7FFFF>(a, b);
444
+ case 20:
445
+ return blend<0xFFFFF>(a, b);
446
+ case 21:
447
+ return blend<0x1FFFFF>(a, b);
448
+ case 22:
449
+ return blend<0x3FFFFF>(a, b);
450
+ case 23:
451
+ return blend<0x7FFFFF>(a, b);
452
+ case 24:
453
+ return blend<0xFFFFFF>(a, b);
454
+ case 25:
455
+ return blend<0x1FFFFFF>(a, b);
456
+ case 26:
457
+ return blend<0x3FFFFFF>(a, b);
458
+ case 27:
459
+ return blend<0x7FFFFFF>(a, b);
460
+ case 28:
461
+ return blend<0xFFFFFFF>(a, b);
462
+ case 29:
463
+ return blend<0x1FFFFFFF>(a, b);
464
+ case 30:
465
+ return blend<0x3FFFFFFF>(a, b);
466
+ case 31:
467
+ return blend<0x7FFFFFFF>(a, b);
468
+ }
469
+ return b;
470
+ }
471
+ static Vectorized<int16_t> loadu(const void* ptr) {
472
+ return _mm512_loadu_si512(reinterpret_cast<const __m512i*>(ptr));
473
+ }
474
+ static Vectorized<int16_t> loadu(const void* ptr, int16_t count) {
475
+ if (count == size()) {
476
+ return _mm512_loadu_si512(reinterpret_cast<const __m512i*>(ptr));
477
+ } else {
478
+ __mmask32 mask = (1ULL << count) - 1;
479
+ return _mm512_maskz_loadu_epi16(mask, ptr);
480
+ }
481
+ }
482
+ void store(void* ptr, int count = size()) const {
483
+ if (count == size()) {
484
+ // ptr need not to be aligned here. See
485
+ // https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm512-storeu-si512.html
486
+ _mm512_storeu_si512(reinterpret_cast<__m512i*>(ptr), values);
487
+ } else if (count > 0) {
488
+ __mmask32 mask = (1ULL << count) - 1;
489
+ _mm512_mask_storeu_epi16(ptr, mask, values);
490
+ }
491
+ }
492
+ const int16_t& operator[](int idx) const = delete;
493
+ int16_t& operator[](int idx) = delete;
494
+ Vectorized<int16_t> abs() const {
495
+ return _mm512_abs_epi16(values);
496
+ }
497
+ Vectorized<int16_t> real() const {
498
+ return *this;
499
+ }
500
+ Vectorized<int16_t> imag() const {
501
+ return _mm512_set1_epi16(0);
502
+ }
503
+ Vectorized<int16_t> conj() const {
504
+ return *this;
505
+ }
506
+ Vectorized<int16_t> neg() const;
507
+ Vectorized<int16_t> operator==(const Vectorized<int16_t>& other) const {
508
+ auto mask = _mm512_cmpeq_epi16_mask(values, other.values);
509
+ return _mm512_mask_set1_epi16(zero_vector, mask, 0xFFFF);
510
+ }
511
+ Vectorized<int16_t> operator!=(const Vectorized<int16_t>& other) const {
512
+ auto mask = _mm512_cmpneq_epi16_mask(values, other.values);
513
+ return _mm512_mask_set1_epi16(zero_vector, mask, 0xFFFF);
514
+ }
515
+ Vectorized<int16_t> operator<(const Vectorized<int16_t>& other) const {
516
+ auto mask = _mm512_cmplt_epi16_mask(values, other.values);
517
+ return _mm512_mask_set1_epi16(zero_vector, mask, 0xFFFF);
518
+ }
519
+ Vectorized<int16_t> operator<=(const Vectorized<int16_t>& other) const {
520
+ auto mask = _mm512_cmple_epi16_mask(values, other.values);
521
+ return _mm512_mask_set1_epi16(zero_vector, mask, 0xFFFF);
522
+ }
523
+ Vectorized<int16_t> operator>(const Vectorized<int16_t>& other) const {
524
+ auto mask = _mm512_cmpgt_epi16_mask(values, other.values);
525
+ return _mm512_mask_set1_epi16(zero_vector, mask, 0xFFFF);
526
+ }
527
+ Vectorized<int16_t> operator>=(const Vectorized<int16_t>& other) const {
528
+ auto mask = _mm512_cmpge_epi16_mask(values, other.values);
529
+ return _mm512_mask_set1_epi16(zero_vector, mask, 0xFFFF);
530
+ }
531
+
532
+ Vectorized<int16_t> eq(const Vectorized<int16_t>& other) const;
533
+ Vectorized<int16_t> ne(const Vectorized<int16_t>& other) const;
534
+ Vectorized<int16_t> gt(const Vectorized<int16_t>& other) const;
535
+ Vectorized<int16_t> ge(const Vectorized<int16_t>& other) const;
536
+ Vectorized<int16_t> lt(const Vectorized<int16_t>& other) const;
537
+ Vectorized<int16_t> le(const Vectorized<int16_t>& other) const;
538
+ };
539
+
540
+ template <typename T>
541
+ class Vectorized8 : public Vectorizedi {
542
+ static_assert(
543
+ std::is_same<T, int8_t>::value || std::is_same<T, uint8_t>::value,
544
+ "Only int8_t/uint8_t are supported");
545
+ protected:
546
+ static constexpr __m512i zero_vector {0, 0, 0, 0, 0, 0, 0, 0};
547
+ static const Vectorized<T> ones;
548
+ public:
549
+ using value_type = T;
550
+ static constexpr int size() {
551
+ return 64;
552
+ }
553
+ using Vectorizedi::Vectorizedi;
554
+ Vectorized8() {}
555
+ Vectorized8(T v) { values = _mm512_set1_epi8(v); }
556
+ Vectorized8(T val1, T val2, T val3, T val4,
557
+ T val5, T val6, T val7, T val8,
558
+ T val9, T val10, T val11, T val12,
559
+ T val13, T val14, T val15, T val16,
560
+ T val17, T val18, T val19, T val20,
561
+ T val21, T val22, T val23, T val24,
562
+ T val25, T val26, T val27, T val28,
563
+ T val29, T val30, T val31, T val32,
564
+ T val33, T val34, T val35, T val36,
565
+ T val37, T val38, T val39, T val40,
566
+ T val41, T val42, T val43, T val44,
567
+ T val45, T val46, T val47, T val48,
568
+ T val49, T val50, T val51, T val52,
569
+ T val53, T val54, T val55, T val56,
570
+ T val57, T val58, T val59, T val60,
571
+ T val61, T val62, T val63, T val64){
572
+ values = _mm512_set_epi8(val64, val63, val62, val61, val60, val59, val58, val57,
573
+ val56, val55, val54, val53,val52, val51, val50, val49,
574
+ val48, val47, val46, val45, val44, val43, val42, val41,
575
+ val40, val39, val38, val37, val36, val35, val34, val33,
576
+ val32, val31, val30, val29, val28, val27, val26, val25,
577
+ val24, val23, val22, val21, val20, val19, val18, val17,
578
+ val16, val15, val14, val13, val12, val11, val10, val9,
579
+ val8, val7, val6, val5, val4, val3, val2, val1);
580
+ }
581
+ template <int64_t mask>
582
+ static Vectorized<T> blend(Vectorized<T> a, Vectorized<T> b) {
583
+ return _mm512_mask_blend_epi8(mask, a.values, b.values);
584
+ }
585
+ template <typename step_t>
586
+ static Vectorized<T> arange(T base = 0, step_t step = static_cast<step_t>(1)) {
587
+ return Vectorized<T>(
588
+ base, base + step, base + 2 * step, base + 3 * step,
589
+ base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step,
590
+ base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step,
591
+ base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step,
592
+ base + 16 * step, base + 17 * step, base + 18 * step, base + 19 * step,
593
+ base + 20 * step, base + 21 * step, base + 22 * step, base + 23 * step,
594
+ base + 24 * step, base + 25 * step, base + 26 * step, base + 27 * step,
595
+ base + 28 * step, base + 29 * step, base + 30 * step, base + 31 * step,
596
+ base + 32 * step, base + 33 * step, base + 34 * step, base + 35 * step,
597
+ base + 36 * step, base + 37 * step, base + 38 * step, base + 39 * step,
598
+ base + 40 * step, base + 41 * step, base + 42 * step, base + 43 * step,
599
+ base + 44 * step, base + 45 * step, base + 46 * step, base + 47 * step,
600
+ base + 48 * step, base + 49 * step, base + 50 * step, base + 51 * step,
601
+ base + 52 * step, base + 53 * step, base + 54 * step, base + 55 * step,
602
+ base + 56 * step, base + 57 * step, base + 58 * step, base + 59 * step,
603
+ base + 60 * step, base + 61 * step, base + 62 * step, base + 63 * step);
604
+ }
605
+ static Vectorized<T>
606
+ set(Vectorized<T> a, Vectorized<T> b, T count = size()) {
607
+ switch (count) {
608
+ case 0:
609
+ return a;
610
+ case 1:
611
+ return blend<0x1>(a, b);
612
+ case 2:
613
+ return blend<0x3>(a, b);
614
+ case 3:
615
+ return blend<0x7>(a, b);
616
+ case 4:
617
+ return blend<0xF>(a, b);
618
+ case 5:
619
+ return blend<0x1F>(a, b);
620
+ case 6:
621
+ return blend<0x3F>(a, b);
622
+ case 7:
623
+ return blend<0x7F>(a, b);
624
+ case 8:
625
+ return blend<0xFF>(a, b);
626
+ case 9:
627
+ return blend<0x1FF>(a, b);
628
+ case 10:
629
+ return blend<0x3FF>(a, b);
630
+ case 11:
631
+ return blend<0x7FF>(a, b);
632
+ case 12:
633
+ return blend<0xFFF>(a, b);
634
+ case 13:
635
+ return blend<0x1FFF>(a, b);
636
+ case 14:
637
+ return blend<0x3FFF>(a, b);
638
+ case 15:
639
+ return blend<0x7FFF>(a, b);
640
+ case 16:
641
+ return blend<0xFFFF>(a, b);
642
+ case 17:
643
+ return blend<0x1FFFF>(a, b);
644
+ case 18:
645
+ return blend<0x3FFFF>(a, b);
646
+ case 19:
647
+ return blend<0x7FFFF>(a, b);
648
+ case 20:
649
+ return blend<0xFFFFF>(a, b);
650
+ case 21:
651
+ return blend<0x1FFFFF>(a, b);
652
+ case 22:
653
+ return blend<0x3FFFFF>(a, b);
654
+ case 23:
655
+ return blend<0x7FFFFF>(a, b);
656
+ case 24:
657
+ return blend<0xFFFFFF>(a, b);
658
+ case 25:
659
+ return blend<0x1FFFFFF>(a, b);
660
+ case 26:
661
+ return blend<0x3FFFFFF>(a, b);
662
+ case 27:
663
+ return blend<0x7FFFFFF>(a, b);
664
+ case 28:
665
+ return blend<0xFFFFFFF>(a, b);
666
+ case 29:
667
+ return blend<0x1FFFFFFF>(a, b);
668
+ case 30:
669
+ return blend<0x3FFFFFFF>(a, b);
670
+ case 31:
671
+ return blend<0x7FFFFFFF>(a, b);
672
+ case 32:
673
+ return blend<0xFFFFFFFF>(a, b);
674
+ case 33:
675
+ return blend<0x1FFFFFFFF>(a, b);
676
+ case 34:
677
+ return blend<0x3FFFFFFFF>(a, b);
678
+ case 35:
679
+ return blend<0x7FFFFFFFF>(a, b);
680
+ case 36:
681
+ return blend<0xFFFFFFFFF>(a, b);
682
+ case 37:
683
+ return blend<0x1FFFFFFFFF>(a, b);
684
+ case 38:
685
+ return blend<0x3FFFFFFFFF>(a, b);
686
+ case 39:
687
+ return blend<0x7FFFFFFFFF>(a, b);
688
+ case 40:
689
+ return blend<0xFFFFFFFFFF>(a, b);
690
+ case 41:
691
+ return blend<0x1FFFFFFFFFF>(a, b);
692
+ case 42:
693
+ return blend<0x3FFFFFFFFFF>(a, b);
694
+ case 43:
695
+ return blend<0x7FFFFFFFFFF>(a, b);
696
+ case 44:
697
+ return blend<0xFFFFFFFFFFF>(a, b);
698
+ case 45:
699
+ return blend<0x1FFFFFFFFFFF>(a, b);
700
+ case 46:
701
+ return blend<0x3FFFFFFFFFFF>(a, b);
702
+ case 47:
703
+ return blend<0x7FFFFFFFFFFF>(a, b);
704
+ case 48:
705
+ return blend<0xFFFFFFFFFFFF>(a, b);
706
+ case 49:
707
+ return blend<0x1FFFFFFFFFFFF>(a, b);
708
+ case 50:
709
+ return blend<0x3FFFFFFFFFFFF>(a, b);
710
+ case 51:
711
+ return blend<0x7FFFFFFFFFFFF>(a, b);
712
+ case 52:
713
+ return blend<0xFFFFFFFFFFFFF>(a, b);
714
+ case 53:
715
+ return blend<0x1FFFFFFFFFFFFF>(a, b);
716
+ case 54:
717
+ return blend<0x3FFFFFFFFFFFFF>(a, b);
718
+ case 55:
719
+ return blend<0x7FFFFFFFFFFFFF>(a, b);
720
+ case 56:
721
+ return blend<0xFFFFFFFFFFFFFF>(a, b);
722
+ case 57:
723
+ return blend<0x1FFFFFFFFFFFFFF>(a, b);
724
+ case 58:
725
+ return blend<0x3FFFFFFFFFFFFFF>(a, b);
726
+ case 59:
727
+ return blend<0x7FFFFFFFFFFFFFF>(a, b);
728
+ case 60:
729
+ return blend<0xFFFFFFFFFFFFFFF>(a, b);
730
+ case 61:
731
+ return blend<0x1FFFFFFFFFFFFFFF>(a, b);
732
+ case 62:
733
+ return blend<0x3FFFFFFFFFFFFFFF>(a, b);
734
+ case 63:
735
+ return blend<0x7FFFFFFFFFFFFFFF>(a, b);
736
+ }
737
+ return b;
738
+ }
739
+ static Vectorized<T> loadu(const void* ptr) {
740
+ return _mm512_loadu_si512(reinterpret_cast<const __m512i*>(ptr));
741
+ }
742
+ static Vectorized<T> loadu_one_fourth(const void* ptr) {
743
+ // Fast path if only load element number of 16.
744
+ // Note: We didn't merge it as fast path of loadu(const void* ptr, T count),
745
+ // Because loadu(const void* ptr, T count) requires zero initialization for upper 384 bits.
746
+ // However, by using _mm512_castsi128_si512, the upper 384 bits of the result are undefined.
747
+ // TODO<leslie> We can use _mm512_zextsi128_si512 in the furture,
748
+ // since gcc 9.3 doesn't support it now.
749
+ __m128i input_128 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(ptr));
750
+ return _mm512_castsi128_si512(input_128);
751
+ }
752
+ static Vectorized<T> loadu(const void* ptr, T count) {
753
+ if (count == size()) {
754
+ return _mm512_loadu_si512(reinterpret_cast<const __m512i*>(ptr));
755
+ } else if (count == 16) {
756
+ // Fast path if only load element number of 16
757
+ return loadu_one_fourth(ptr);
758
+ } else {
759
+ __mmask64 mask = (1ULL << count) - 1;
760
+ return _mm512_maskz_loadu_epi8(mask, ptr);
761
+ }
762
+ }
763
+ void store(void* ptr, int count = size()) const {
764
+ if (count == size()) {
765
+ // ptr need not to be aligned here. See
766
+ // https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm512-storeu-si512.html
767
+ _mm512_storeu_si512(reinterpret_cast<__m512i*>(ptr), values);
768
+ } else if (count > 0) {
769
+ if (count == 16) {
770
+ // Fast path if only store element number of 16
771
+ _mm_storeu_si128(
772
+ reinterpret_cast<__m128i*>(ptr),
773
+ _mm512_castsi512_si128(values));
774
+ } else {
775
+ __mmask64 mask = (1ULL << count) - 1;
776
+ _mm512_mask_storeu_epi8(ptr, mask, values);
777
+ }
778
+ }
779
+ }
780
+ const T& operator[](int idx) const = delete;
781
+ T& operator[](int idx) = delete;
782
+ Vectorized<T> real() const {
783
+ return *this;
784
+ }
785
+ Vectorized<T> imag() const {
786
+ return _mm512_set1_epi8(0);
787
+ }
788
+ Vectorized<T> conj() const {
789
+ return *this;
790
+ }
791
+ };
792
+
793
+ template<>
794
+ class Vectorized<int8_t>: public Vectorized8<int8_t> {
795
+ public:
796
+ using Vectorized8::Vectorized8;
797
+
798
+ static Vectorized<int8_t> blendv(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b,
799
+ const Vectorized<int8_t>& mask) {
800
+ auto msb_one = _mm512_set1_epi8(0xFF);
801
+ auto mask_ = _mm512_cmp_epi8_mask(mask, msb_one, _MM_CMPINT_EQ);
802
+ return _mm512_mask_blend_epi8(mask_, a.values, b.values);
803
+ }
804
+
805
+ Vectorized<int8_t> neg() const;
806
+
807
+ Vectorized<int8_t> abs() const {
808
+ return _mm512_abs_epi8(values);
809
+ }
810
+
811
+ Vectorized<int8_t> operator==(const Vectorized<int8_t>& other) const {
812
+ auto mask = _mm512_cmpeq_epi8_mask(values, other.values);
813
+ return _mm512_mask_set1_epi8(zero_vector, mask, 0xFF);
814
+ }
815
+ Vectorized<int8_t> operator!=(const Vectorized<int8_t>& other) const {
816
+ auto mask = _mm512_cmpneq_epi8_mask(values, other.values);
817
+ return _mm512_mask_set1_epi8(zero_vector, mask, 0xFF);
818
+ }
819
+ Vectorized<int8_t> operator<(const Vectorized<int8_t>& other) const {
820
+ auto mask = _mm512_cmplt_epi8_mask(values, other.values);
821
+ return _mm512_mask_set1_epi8(zero_vector, mask, 0xFF);
822
+ }
823
+ Vectorized<int8_t> operator<=(const Vectorized<int8_t>& other) const {
824
+ auto mask = _mm512_cmple_epi8_mask(values, other.values);
825
+ return _mm512_mask_set1_epi8(zero_vector, mask, 0xFF);
826
+ }
827
+ Vectorized<int8_t> operator>(const Vectorized<int8_t>& other) const {
828
+ return other < *this;
829
+ }
830
+ Vectorized<int8_t> operator>=(const Vectorized<int8_t>& other) const {
831
+ return other <= *this;
832
+ }
833
+
834
+ Vectorized<int8_t> eq(const Vectorized<int8_t>& other) const;
835
+ Vectorized<int8_t> ne(const Vectorized<int8_t>& other) const;
836
+ Vectorized<int8_t> gt(const Vectorized<int8_t>& other) const;
837
+ Vectorized<int8_t> ge(const Vectorized<int8_t>& other) const;
838
+ Vectorized<int8_t> lt(const Vectorized<int8_t>& other) const;
839
+ Vectorized<int8_t> le(const Vectorized<int8_t>& other) const;
840
+ };
841
+
842
+ template<>
843
+ class Vectorized<uint8_t>: public Vectorized8<uint8_t> {
844
+ public:
845
+ using Vectorized8::Vectorized8;
846
+
847
+ static Vectorized<uint8_t> blendv(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b,
848
+ const Vectorized<uint8_t>& mask) {
849
+ auto msb_one = _mm512_set1_epi8(0xFF);
850
+ auto mask_ = _mm512_cmp_epu8_mask(mask, msb_one, _MM_CMPINT_EQ);
851
+ return _mm512_mask_blend_epi8(mask_, a.values, b.values);
852
+ }
853
+
854
+ Vectorized<uint8_t> neg() const;
855
+
856
+ Vectorized<uint8_t> abs() const {
857
+ return *this;
858
+ }
859
+
860
+ Vectorized<uint8_t> operator==(const Vectorized<uint8_t>& other) const {
861
+ auto mask = _mm512_cmpeq_epu8_mask(values, other.values);
862
+ return _mm512_mask_set1_epi8(zero_vector, mask, 0xFF);
863
+ }
864
+ Vectorized<uint8_t> operator!=(const Vectorized<uint8_t>& other) const {
865
+ auto mask = _mm512_cmpneq_epu8_mask(values, other.values);
866
+ return _mm512_mask_set1_epi8(zero_vector, mask, 0xFF);
867
+ }
868
+ Vectorized<uint8_t> operator<(const Vectorized<uint8_t>& other) const {
869
+ auto mask = _mm512_cmplt_epu8_mask(values, other.values);
870
+ return _mm512_mask_set1_epi8(zero_vector, mask, 0xFF);
871
+ }
872
+ Vectorized<uint8_t> operator<=(const Vectorized<uint8_t>& other) const {
873
+ auto mask = _mm512_cmple_epu8_mask(values, other.values);
874
+ return _mm512_mask_set1_epi8(zero_vector, mask, 0xFF);
875
+ }
876
+ Vectorized<uint8_t> operator>(const Vectorized<uint8_t>& other) const {
877
+ return other < *this;
878
+ }
879
+ Vectorized<uint8_t> operator>=(const Vectorized<uint8_t>& other) const {
880
+ return other <= *this;
881
+ }
882
+
883
+ Vectorized<uint8_t> eq(const Vectorized<uint8_t>& other) const;
884
+ Vectorized<uint8_t> ne(const Vectorized<uint8_t>& other) const;
885
+ Vectorized<uint8_t> gt(const Vectorized<uint8_t>& other) const;
886
+ Vectorized<uint8_t> ge(const Vectorized<uint8_t>& other) const;
887
+ Vectorized<uint8_t> lt(const Vectorized<uint8_t>& other) const;
888
+ Vectorized<uint8_t> le(const Vectorized<uint8_t>& other) const;
889
+ };
890
+
891
+ template <>
892
+ Vectorized<int64_t> inline operator+(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
893
+ return _mm512_add_epi64(a, b);
894
+ }
895
+
896
+ template <>
897
+ Vectorized<int32_t> inline operator+(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
898
+ return _mm512_add_epi32(a, b);
899
+ }
900
+
901
+ template <>
902
+ Vectorized<int16_t> inline operator+(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
903
+ return _mm512_add_epi16(a, b);
904
+ }
905
+
906
+ template <>
907
+ Vectorized<int8_t> inline operator+(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
908
+ return _mm512_add_epi8(a, b);
909
+ }
910
+
911
+ template <>
912
+ Vectorized<uint8_t> inline operator+(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
913
+ return _mm512_add_epi8(a, b);
914
+ }
915
+
916
+ template <>
917
+ Vectorized<int64_t> inline operator-(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
918
+ return _mm512_sub_epi64(a, b);
919
+ }
920
+
921
+ template <>
922
+ Vectorized<int32_t> inline operator-(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
923
+ return _mm512_sub_epi32(a, b);
924
+ }
925
+
926
+ template <>
927
+ Vectorized<int16_t> inline operator-(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
928
+ return _mm512_sub_epi16(a, b);
929
+ }
930
+
931
+ template <>
932
+ Vectorized<int8_t> inline operator-(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
933
+ return _mm512_sub_epi8(a, b);
934
+ }
935
+
936
+ template <>
937
+ Vectorized<uint8_t> inline operator-(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
938
+ return _mm512_sub_epi8(a, b);
939
+ }
940
+
941
+ // Negation. Defined here so we can utilize operator-
942
+ inline Vectorized<int64_t> Vectorized<int64_t>::neg() const {
943
+ return Vectorized<int64_t>(0) - *this;
944
+ }
945
+
946
+ inline Vectorized<int32_t> Vectorized<int32_t>::neg() const {
947
+ return Vectorized<int32_t>(0) - *this;
948
+ }
949
+
950
+ inline Vectorized<int16_t> Vectorized<int16_t>::neg() const {
951
+ return Vectorized<int16_t>(0) - *this;
952
+ }
953
+
954
+ inline Vectorized<int8_t> Vectorized<int8_t>::neg() const {
955
+ return Vectorized<int8_t>(0) - *this;
956
+ }
957
+
958
+ inline Vectorized<uint8_t> Vectorized<uint8_t>::neg() const {
959
+ return Vectorized<uint8_t>(0) - *this;
960
+ }
961
+
962
+ template <>
963
+ Vectorized<int64_t> inline operator*(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
964
+ return _mm512_mullo_epi64(a, b);
965
+ }
966
+
967
+ template <>
968
+ Vectorized<int32_t> inline operator*(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
969
+ return _mm512_mullo_epi32(a, b);
970
+ }
971
+
972
+ template <>
973
+ Vectorized<int16_t> inline operator*(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
974
+ return _mm512_mullo_epi16(a, b);
975
+ }
976
+
977
+ template <typename T, typename Op>
978
+ Vectorized<T> inline int_elementwise_binary_512(const Vectorized<T>& a, const Vectorized<T>& b, Op op) {
979
+ T values_a[Vectorized<T>::size()];
980
+ T values_b[Vectorized<T>::size()];
981
+ a.store(values_a);
982
+ b.store(values_b);
983
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
984
+ values_a[i] = op(values_a[i], values_b[i]);
985
+ }
986
+ return Vectorized<T>::loadu(values_a);
987
+ }
988
+
989
+ template <>
990
+ Vectorized<int8_t> inline operator*(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
991
+ // We don't have an instruction for multiplying int8_t
992
+ #ifndef CPU_CAPABILITY_AVX512
993
+ return int_elementwise_binary_512(a, b, std::multiplies<int8_t>());
994
+ #else
995
+ __m512i mask00FF = _mm512_set1_epi16(0x00FF);
996
+ __m512i a_lo = _mm512_srai_epi16(_mm512_slli_epi16(a, 8), 8);
997
+ __m512i b_lo = _mm512_srai_epi16(_mm512_slli_epi16(b, 8), 8);
998
+ __m512i a_hi = _mm512_srai_epi16(a, 8);
999
+ __m512i b_hi = _mm512_srai_epi16(b, 8);
1000
+ __m512i res_lo = _mm512_and_si512(_mm512_mullo_epi16(a_lo, b_lo), mask00FF);
1001
+ __m512i res_hi = _mm512_slli_epi16(_mm512_mullo_epi16(a_hi, b_hi), 8);
1002
+ __m512i res = _mm512_or_si512(res_hi, res_lo);
1003
+ return res;
1004
+ #endif
1005
+ }
1006
+
1007
+ template <>
1008
+ Vectorized<uint8_t> inline operator*(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
1009
+ // We don't have an instruction for multiplying uint8_t
1010
+ #ifndef CPU_CAPABILITY_AVX512
1011
+ return int_elementwise_binary_512(a, b, std::multiplies<uint8_t>());
1012
+ #else
1013
+ __m512i mask00FF = _mm512_set1_epi16(0x00FF);
1014
+ __m512i a_lo = _mm512_and_si512 (a, mask00FF);
1015
+ __m512i b_lo = _mm512_and_si512 (b, mask00FF);
1016
+ __m512i a_hi = _mm512_srli_epi16(a, 8);
1017
+ __m512i b_hi = _mm512_srli_epi16(b, 8);
1018
+ __m512i res_lo = _mm512_and_si512(_mm512_mullo_epi16(a_lo, b_lo), mask00FF);
1019
+ __m512i res_hi = _mm512_slli_epi16(_mm512_mullo_epi16(a_hi, b_hi), 8);
1020
+ __m512i res = _mm512_or_si512(res_hi, res_lo);
1021
+ return res;
1022
+ #endif
1023
+ }
1024
+
1025
+ template <>
1026
+ Vectorized<int64_t> inline minimum(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
1027
+ return _mm512_min_epi64(a, b);
1028
+ }
1029
+
1030
+ template <>
1031
+ Vectorized<int32_t> inline minimum(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
1032
+ return _mm512_min_epi32(a, b);
1033
+ }
1034
+
1035
+ template <>
1036
+ Vectorized<int16_t> inline minimum(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
1037
+ return _mm512_min_epi16(a, b);
1038
+ }
1039
+
1040
+ template <>
1041
+ Vectorized<int8_t> inline minimum(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
1042
+ return _mm512_min_epi8(a, b);
1043
+ }
1044
+
1045
+ template <>
1046
+ Vectorized<uint8_t> inline minimum(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
1047
+ return _mm512_min_epu8(a, b);
1048
+ }
1049
+
1050
+ template <>
1051
+ Vectorized<int64_t> inline maximum(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
1052
+ return _mm512_max_epi64(a, b);
1053
+ }
1054
+
1055
+ template <>
1056
+ Vectorized<int32_t> inline maximum(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
1057
+ return _mm512_max_epi32(a, b);
1058
+ }
1059
+
1060
+ template <>
1061
+ Vectorized<int16_t> inline maximum(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
1062
+ return _mm512_max_epi16(a, b);
1063
+ }
1064
+
1065
+ template <>
1066
+ Vectorized<int8_t> inline maximum(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
1067
+ return _mm512_max_epi8(a, b);
1068
+ }
1069
+
1070
+ template <>
1071
+ Vectorized<uint8_t> inline maximum(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
1072
+ return _mm512_max_epi8(a, b);
1073
+ }
1074
+
1075
+ template <>
1076
+ Vectorized<int64_t> inline clamp(const Vectorized<int64_t>& a, const Vectorized<int64_t>& min_val, const Vectorized<int64_t>& max_val) {
1077
+ return _mm512_min_epi64(max_val, _mm512_max_epi64(a, min_val));
1078
+ }
1079
+
1080
+ template <>
1081
+ Vectorized<int32_t> inline clamp(const Vectorized<int32_t>& a, const Vectorized<int32_t>& min_val, const Vectorized<int32_t>& max_val) {
1082
+ return _mm512_min_epi32(max_val, _mm512_max_epi32(a, min_val));
1083
+ }
1084
+
1085
+ template <>
1086
+ Vectorized<int16_t> inline clamp(const Vectorized<int16_t>& a, const Vectorized<int16_t>& min_val, const Vectorized<int16_t>& max_val) {
1087
+ return _mm512_min_epi16(max_val, _mm512_max_epi16(a, min_val));
1088
+ }
1089
+
1090
+ template <>
1091
+ Vectorized<int8_t> inline clamp(const Vectorized<int8_t>& a, const Vectorized<int8_t>& min_val, const Vectorized<int8_t>& max_val) {
1092
+ return _mm512_min_epi8(max_val, _mm512_max_epi8(a, min_val));
1093
+ }
1094
+
1095
+ template <>
1096
+ Vectorized<uint8_t> inline clamp(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& min_val, const Vectorized<uint8_t>& max_val) {
1097
+ return _mm512_min_epu8(max_val, _mm512_max_epu8(a, min_val));
1098
+ }
1099
+
1100
+ template <>
1101
+ Vectorized<int64_t> inline clamp_max(const Vectorized<int64_t>& a, const Vectorized<int64_t>& max_val) {
1102
+ return _mm512_min_epi64(max_val, a);
1103
+ }
1104
+
1105
+ template <>
1106
+ Vectorized<int32_t> inline clamp_max(const Vectorized<int32_t>& a, const Vectorized<int32_t>& max_val) {
1107
+ return _mm512_min_epi32(max_val, a);
1108
+ }
1109
+
1110
+ template <>
1111
+ Vectorized<int16_t> inline clamp_max(const Vectorized<int16_t>& a, const Vectorized<int16_t>& max_val) {
1112
+ return _mm512_min_epi16(max_val, a);
1113
+ }
1114
+
1115
+ template <>
1116
+ Vectorized<int8_t> inline clamp_max(const Vectorized<int8_t>& a, const Vectorized<int8_t>& max_val) {
1117
+ return _mm512_min_epi8(max_val, a);
1118
+ }
1119
+
1120
+ template <>
1121
+ Vectorized<uint8_t> inline clamp_max(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& max_val) {
1122
+ return _mm512_min_epu8(max_val, a);
1123
+ }
1124
+
1125
+ template <>
1126
+ Vectorized<int64_t> inline clamp_min(const Vectorized<int64_t>& a, const Vectorized<int64_t>& min_val) {
1127
+ return _mm512_max_epi64(min_val, a);
1128
+ }
1129
+
1130
+ template <>
1131
+ Vectorized<int32_t> inline clamp_min(const Vectorized<int32_t>& a, const Vectorized<int32_t>& min_val) {
1132
+ return _mm512_max_epi32(min_val, a);
1133
+ }
1134
+
1135
+ template <>
1136
+ Vectorized<int16_t> inline clamp_min(const Vectorized<int16_t>& a, const Vectorized<int16_t>& min_val) {
1137
+ return _mm512_max_epi16(min_val, a);
1138
+ }
1139
+
1140
+ template <>
1141
+ Vectorized<int8_t> inline clamp_min(const Vectorized<int8_t>& a, const Vectorized<int8_t>& min_val) {
1142
+ return _mm512_max_epi8(min_val, a);
1143
+ }
1144
+
1145
+ template <>
1146
+ Vectorized<uint8_t> inline clamp_min(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& min_val) {
1147
+ return _mm512_max_epu8(min_val, a);
1148
+ }
1149
+
1150
+ template<typename T>
1151
+ Vectorized<int32_t> inline convert_to_int32(const T* ptr) {
1152
+ return Vectorized<int32_t>::loadu(ptr);
1153
+ }
1154
+
1155
+ template<>
1156
+ Vectorized<int32_t> inline convert_to_int32<int8_t>(const int8_t* ptr) {
1157
+ return _mm512_cvtepi8_epi32(_mm_loadu_si128(reinterpret_cast<const __m128i*>(ptr)));
1158
+ }
1159
+
1160
+ template<>
1161
+ Vectorized<int32_t> inline convert_to_int32<uint8_t>(const uint8_t* ptr) {
1162
+ return _mm512_cvtepu8_epi32(_mm_loadu_si128(reinterpret_cast<const __m128i*>(ptr)));
1163
+ }
1164
+
1165
+ template <>
1166
+ Vectorized<int64_t> inline operator/(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
1167
+ return int_elementwise_binary_512(a, b, std::divides<int64_t>());
1168
+ }
1169
+ template <>
1170
+ Vectorized<int32_t> inline operator/(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
1171
+ return int_elementwise_binary_512(a, b, std::divides<int32_t>());
1172
+ }
1173
+ template <>
1174
+ Vectorized<int16_t> inline operator/(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
1175
+ return int_elementwise_binary_512(a, b, std::divides<int16_t>());
1176
+ }
1177
+ template <>
1178
+ Vectorized<int8_t> inline operator/(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
1179
+ return int_elementwise_binary_512(a, b, std::divides<int8_t>());
1180
+ }
1181
+ template <>
1182
+ Vectorized<uint8_t> inline operator/(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
1183
+ return int_elementwise_binary_512(a, b, std::divides<uint8_t>());
1184
+ }
1185
+
1186
+ template<class T, typename std::enable_if_t<std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
1187
+ inline Vectorized<T> operator&(const Vectorized<T>& a, const Vectorized<T>& b) {
1188
+ return _mm512_and_si512(a, b);
1189
+ }
1190
+ template<class T, typename std::enable_if_t<std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
1191
+ inline Vectorized<T> operator|(const Vectorized<T>& a, const Vectorized<T>& b) {
1192
+ return _mm512_or_si512(a, b);
1193
+ }
1194
+ template<class T, typename std::enable_if_t<std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
1195
+ inline Vectorized<T> operator^(const Vectorized<T>& a, const Vectorized<T>& b) {
1196
+ return _mm512_xor_si512(a, b);
1197
+ }
1198
+ template<class T, typename std::enable_if_t<std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
1199
+ inline Vectorized<T> operator~(const Vectorized<T>& a) {
1200
+ return _mm512_xor_si512(a, _mm512_set1_epi32(-1));
1201
+ }
1202
+
1203
+ inline Vectorized<int64_t> Vectorized<int64_t>::eq(const Vectorized<int64_t>& other) const {
1204
+ return (*this == other) & Vectorized<int64_t>(1);
1205
+ }
1206
+
1207
+ inline Vectorized<int64_t> Vectorized<int64_t>::ne(const Vectorized<int64_t>& other) const {
1208
+ return (*this != other) & Vectorized<int64_t>(1);
1209
+ }
1210
+
1211
+ inline Vectorized<int64_t> Vectorized<int64_t>::gt(const Vectorized<int64_t>& other) const {
1212
+ return (*this > other) & Vectorized<int64_t>(1);
1213
+ }
1214
+
1215
+ inline Vectorized<int64_t> Vectorized<int64_t>::ge(const Vectorized<int64_t>& other) const {
1216
+ return (*this >= other) & Vectorized<int64_t>(1);
1217
+ }
1218
+
1219
+ inline Vectorized<int64_t> Vectorized<int64_t>::lt(const Vectorized<int64_t>& other) const {
1220
+ return (*this < other) & Vectorized<int64_t>(1);
1221
+ }
1222
+
1223
+ inline Vectorized<int64_t> Vectorized<int64_t>::le(const Vectorized<int64_t>& other) const {
1224
+ return (*this <= other) & Vectorized<int64_t>(1);
1225
+ }
1226
+
1227
+ inline Vectorized<int32_t> Vectorized<int32_t>::eq(const Vectorized<int32_t>& other) const {
1228
+ return (*this == other) & Vectorized<int32_t>(1);
1229
+ }
1230
+
1231
+ inline Vectorized<int32_t> Vectorized<int32_t>::ne(const Vectorized<int32_t>& other) const {
1232
+ return (*this != other) & Vectorized<int32_t>(1);
1233
+ }
1234
+
1235
+ inline Vectorized<int32_t> Vectorized<int32_t>::gt(const Vectorized<int32_t>& other) const {
1236
+ return (*this > other) & Vectorized<int32_t>(1);
1237
+ }
1238
+
1239
+ inline Vectorized<int32_t> Vectorized<int32_t>::ge(const Vectorized<int32_t>& other) const {
1240
+ return (*this >= other) & Vectorized<int32_t>(1);
1241
+ }
1242
+
1243
+ inline Vectorized<int32_t> Vectorized<int32_t>::lt(const Vectorized<int32_t>& other) const {
1244
+ return (*this < other) & Vectorized<int32_t>(1);
1245
+ }
1246
+
1247
+ inline Vectorized<int32_t> Vectorized<int32_t>::le(const Vectorized<int32_t>& other) const {
1248
+ return (*this <= other) & Vectorized<int32_t>(1);
1249
+ }
1250
+
1251
+ inline Vectorized<int16_t> Vectorized<int16_t>::eq(const Vectorized<int16_t>& other) const {
1252
+ return (*this == other) & Vectorized<int16_t>(1);
1253
+ }
1254
+
1255
+ inline Vectorized<int16_t> Vectorized<int16_t>::ne(const Vectorized<int16_t>& other) const {
1256
+ return (*this != other) & Vectorized<int16_t>(1);
1257
+ }
1258
+
1259
+ inline Vectorized<int16_t> Vectorized<int16_t>::gt(const Vectorized<int16_t>& other) const {
1260
+ return (*this > other) & Vectorized<int16_t>(1);
1261
+ }
1262
+
1263
+ inline Vectorized<int16_t> Vectorized<int16_t>::ge(const Vectorized<int16_t>& other) const {
1264
+ return (*this >= other) & Vectorized<int16_t>(1);
1265
+ }
1266
+
1267
+ inline Vectorized<int16_t> Vectorized<int16_t>::lt(const Vectorized<int16_t>& other) const {
1268
+ return (*this < other) & Vectorized<int16_t>(1);
1269
+ }
1270
+
1271
+ inline Vectorized<int16_t> Vectorized<int16_t>::le(const Vectorized<int16_t>& other) const {
1272
+ return (*this <= other) & Vectorized<int16_t>(1);
1273
+ }
1274
+
1275
+ inline Vectorized<int8_t> Vectorized<int8_t>::eq(const Vectorized<int8_t>& other) const {
1276
+ return (*this == other) & Vectorized<int8_t>(1);
1277
+ }
1278
+
1279
+ inline Vectorized<int8_t> Vectorized<int8_t>::ne(const Vectorized<int8_t>& other) const {
1280
+ return (*this != other) & Vectorized<int8_t>(1);
1281
+ }
1282
+
1283
+ inline Vectorized<int8_t> Vectorized<int8_t>::gt(const Vectorized<int8_t>& other) const {
1284
+ return (*this > other) & Vectorized<int8_t>(1);
1285
+ }
1286
+
1287
+ inline Vectorized<int8_t> Vectorized<int8_t>::ge(const Vectorized<int8_t>& other) const {
1288
+ return (*this >= other) & Vectorized<int8_t>(1);
1289
+ }
1290
+
1291
+ inline Vectorized<int8_t> Vectorized<int8_t>::lt(const Vectorized<int8_t>& other) const {
1292
+ return (*this < other) & Vectorized<int8_t>(1);
1293
+ }
1294
+
1295
+ inline Vectorized<int8_t> Vectorized<int8_t>::le(const Vectorized<int8_t>& other) const {
1296
+ return (*this <= other) & Vectorized<int8_t>(1);
1297
+ }
1298
+
1299
+ inline Vectorized<uint8_t> Vectorized<uint8_t>::eq(const Vectorized<uint8_t>& other) const {
1300
+ return (*this == other) & Vectorized<uint8_t>(1);
1301
+ }
1302
+
1303
+ inline Vectorized<uint8_t> Vectorized<uint8_t>::ne(const Vectorized<uint8_t>& other) const {
1304
+ return (*this != other) & Vectorized<uint8_t>(1);
1305
+ }
1306
+
1307
+ inline Vectorized<uint8_t> Vectorized<uint8_t>::gt(const Vectorized<uint8_t>& other) const {
1308
+ return (*this > other) & Vectorized<uint8_t>(1);
1309
+ }
1310
+
1311
+ inline Vectorized<uint8_t> Vectorized<uint8_t>::ge(const Vectorized<uint8_t>& other) const {
1312
+ return (*this >= other) & Vectorized<uint8_t>(1);
1313
+ }
1314
+
1315
+ inline Vectorized<uint8_t> Vectorized<uint8_t>::lt(const Vectorized<uint8_t>& other) const {
1316
+ return (*this < other) & Vectorized<uint8_t>(1);
1317
+ }
1318
+
1319
+ inline Vectorized<uint8_t> Vectorized<uint8_t>::le(const Vectorized<uint8_t>& other) const {
1320
+ return (*this <= other) & Vectorized<uint8_t>(1);
1321
+ }
1322
+
1323
+ template <bool left_shift, typename T, typename std::enable_if_t<std::is_same<T, int8_t>::value || std::is_same<T, uint8_t>::value, int> = 0>
1324
+ Vectorized<T> inline shift_512_8(const Vectorized<T>& a, const Vectorized<T>& b) {
1325
+ // No vector instruction for shifting int8_t/uint8_t, so emulating
1326
+ // it instead.
1327
+
1328
+ // Control masks for shuffle operation, treating 512 bits as an
1329
+ // array of 8-bit elements, and considering pairs of neighboring
1330
+ // elements. Specifially, a mask named "ctl_M_N" (M,N in [0,1], and
1331
+ // M!=N) is set so that shuffle will move element with index M from
1332
+ // input pair into element with index N in output pair, and element
1333
+ // with index M in output pair will be set to all 0s.
1334
+ __m512i ctl_0_1 = _mm512_set_epi8(62, 0x80, 60, 0x80, 58, 0x80, 56, 0x80,
1335
+ 54, 0x80, 52, 0x80, 50, 0x80, 48, 0x80,
1336
+ 46, 0x80, 44, 0x80, 42, 0x80, 40, 0x80,
1337
+ 38, 0x80, 36, 0x80, 34, 0x80, 32, 0x80,
1338
+ 30, 0x80, 28, 0x80, 26, 0x80, 24, 0x80,
1339
+ 22, 0x80, 20, 0x80, 18, 0x80, 16, 0x80,
1340
+ 14, 0x80, 12, 0x80, 10, 0x80, 8, 0x80,
1341
+ 6, 0x80, 4, 0x80, 2, 0x80, 0, 0x80);
1342
+ __m512i ctl_1_0 = _mm512_set_epi8(0x80, 63, 0x80, 61, 0x80, 59, 0x80, 57,
1343
+ 0x80, 55, 0x80, 53, 0x80, 51, 0x80, 49,
1344
+ 0x80, 47, 0x80, 45, 0x80, 43, 0x80, 41,
1345
+ 0x80, 39, 0x80, 37, 0x80, 35, 0x80, 33,
1346
+ 0x80, 31, 0x80, 29, 0x80, 27, 0x80, 25,
1347
+ 0x80, 23, 0x80, 21, 0x80, 19, 0x80, 17,
1348
+ 0x80, 15, 0x80, 13, 0x80, 11, 0x80, 9,
1349
+ 0x80, 7, 0x80, 5, 0x80, 3, 0x80, 1);
1350
+
1351
+ // Masks for bitwise and operation, treating 512 bits as an array of
1352
+ // 8-bit elements, and considering them in pairs of neighboring
1353
+ // elements. A mask named "keep_M" (M in [0,1]) is set so that
1354
+ // bitwise and will copy element with index M from input pair into
1355
+ // element with the same index in output pair, while the other
1356
+ // element in output pair will be set to all 0s.
1357
+ __m512i keep_0 = _mm512_set1_epi16(0xFF);
1358
+ __m512i keep_1 = _mm512_set1_epi16(0xFF00);
1359
+
1360
+ // Take each 8-bit element with idx%2==0 from input array to be
1361
+ // shifted and extend it to 16 bits so that 0s are added to the
1362
+ // right. Then, perform shifting on this 16-bit number. Upper 8
1363
+ // bits will be proper result of shifting original 8-bit number, so
1364
+ // write them to result array, into the same position from which
1365
+ // corresponding input element is taken. Also, make sure that
1366
+ // result array elements with idx%2!=0 are set to all 0s.
1367
+ //
1368
+ // Note that number of bits to shift for is extended to 16 bits by
1369
+ // adding 0s to the left. That means this number is not properly
1370
+ // sign-extended for negative values. However, number of bits to
1371
+ // shift is treated as an unsigned integer by respective shift
1372
+ // intrinsics anyway so if negative then either with or without
1373
+ // proper sign extension, it will be interpreted as a number greater
1374
+ // than 32, and the shifting result will be the same.
1375
+ __m512i a0 = _mm512_shuffle_epi8(a, ctl_0_1);
1376
+ __m512i b0 = _mm512_and_si512(b, keep_0);
1377
+ __m512i c0;
1378
+ if (left_shift)
1379
+ c0 = _mm512_sllv_epi16(a0, b0);
1380
+ else
1381
+ if constexpr (std::is_same_v<T, int8_t>)
1382
+ c0 = _mm512_srav_epi16(a0, b0);
1383
+ else
1384
+ c0 = _mm512_srlv_epi16(a0, b0);
1385
+ c0 = _mm512_shuffle_epi8(c0, ctl_1_0);
1386
+
1387
+ // Peform shifting the same way for input array elements with
1388
+ // idx%2==1.
1389
+ __m512i a1 = _mm512_and_si512(a, keep_1);
1390
+ __m512i b1 = _mm512_shuffle_epi8(b, ctl_1_0);
1391
+ __m512i c1;
1392
+ if (left_shift)
1393
+ c1 = _mm512_sllv_epi16(a1, b1);
1394
+ else
1395
+ if constexpr (std::is_same_v<T, int8_t>)
1396
+ c1 = _mm512_srav_epi16(a1, b1);
1397
+ else
1398
+ c1 = _mm512_srlv_epi16(a1, b1);
1399
+ c1 = _mm512_and_si512(c1, keep_1);
1400
+
1401
+ // Merge partial results into the final result.
1402
+ __m512i c = _mm512_or_si512(c0, c1);
1403
+
1404
+ return c;
1405
+ }
1406
+
1407
+ template <>
1408
+ Vectorized<int64_t> inline operator<<(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
1409
+ return _mm512_sllv_epi64(a, b);
1410
+ }
1411
+
1412
+ template <>
1413
+ Vectorized<int32_t> inline operator<<(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
1414
+ return _mm512_sllv_epi32(a, b);
1415
+ }
1416
+
1417
+ template <>
1418
+ Vectorized<int16_t> inline operator<<(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
1419
+ return _mm512_sllv_epi16(a, b);
1420
+ }
1421
+
1422
+ template <>
1423
+ Vectorized<int8_t> inline operator<<(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
1424
+ return shift_512_8<true>(a, b);
1425
+ }
1426
+
1427
+ template <>
1428
+ Vectorized<uint8_t> inline operator<<(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
1429
+ return shift_512_8<true>(a, b);
1430
+ }
1431
+
1432
+ template <>
1433
+ Vectorized<int64_t> inline operator>>(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
1434
+ return _mm512_srav_epi64(a, b);
1435
+ }
1436
+
1437
+ template <>
1438
+ Vectorized<int32_t> inline operator>>(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
1439
+ return _mm512_srav_epi32(a, b);
1440
+ }
1441
+
1442
+ template <>
1443
+ Vectorized<int16_t> inline operator>>(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
1444
+ return _mm512_srav_epi16(a, b);
1445
+ }
1446
+
1447
+ template <>
1448
+ Vectorized<int8_t> inline operator>>(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
1449
+ return shift_512_8<false>(a, b);
1450
+ }
1451
+
1452
+ template <>
1453
+ Vectorized<uint8_t> inline operator>>(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
1454
+ return shift_512_8<false>(a, b);
1455
+ }
1456
+
1457
+ #endif
1458
+
1459
+ }}}
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_qint.h ADDED
@@ -0,0 +1,1346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/intrinsics.h>
7
+ #include <ATen/cpu/vec/vec_base.h>
8
+ #include <ATen/native/quantized/AffineQuantizerBase.h>
9
+
10
+ #include <c10/util/irange.h>
11
+ #include <c10/util/qint32.h>
12
+ #include <c10/util/qint8.h>
13
+ #include <c10/util/quint8.h>
14
+
15
+ #include <array>
16
+ #include <cmath>
17
+
18
+ // This file defines Vectorized<> for the quantized types.
19
+ //
20
+ //
21
+ // Currently, we simply use these classes as efficient converters between
22
+ // the quantized types and Vectorized<float>, usually in bandwidth-bound cases
23
+ // where doing the arithmetic in full-precision is acceptable (e.g.
24
+ // elementwise operators).
25
+ //
26
+ //
27
+ // Conversions are as follows:
28
+ // Vectorized<qint8> -> 4x Vectorized<float>
29
+ // Vectorized<quint8> -> 4x Vectorized<float>
30
+ // Vectorized<qint32> -> 1x Vectorized<float>
31
+ //
32
+ // The size of the returned float vector is specified by the special
33
+ // constexpr function float_num_vecs. The type of the value returned
34
+ // from dequantize (and expected as an argument to quantize) is
35
+ // specified by float_vec_return_type.
36
+ //
37
+ // When writing kernels with these vectors, it is expected that floating-
38
+ // point operations will be carried out in a loop over Vectorized<T>::float_num_vecs
39
+ // iterations.
40
+
41
+ namespace at {
42
+ namespace vec {
43
+ inline namespace CPU_CAPABILITY {
44
+
45
+ #if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER)
46
+
47
+ struct Vectorizedqi {
48
+ protected:
49
+ __m512i vals __attribute__((aligned(64)));
50
+
51
+ public:
52
+ Vectorizedqi() {}
53
+ Vectorizedqi(__m512i v) : vals(v) {}
54
+ operator __m512i() const {
55
+ return vals;
56
+ }
57
+ };
58
+
59
+
60
+ template <typename T>
61
+ __m512i pack_saturate_and_clamp(
62
+ __m512i first,
63
+ __m512i second,
64
+ T min_val,
65
+ T max_val);
66
+
67
+ template <>
68
+ inline __m512i pack_saturate_and_clamp<int32_t>(
69
+ __m512i first,
70
+ __m512i second,
71
+ int32_t min_val,
72
+ int32_t max_val) {
73
+ // This function is for linkage only, will not be used
74
+ AT_ERROR("pack_saturate_and_clamp<int32_t> is not supported");
75
+ }
76
+
77
+ template <>
78
+ inline __m512i pack_saturate_and_clamp<int8_t>(
79
+ __m512i first,
80
+ __m512i second,
81
+ int8_t min_val,
82
+ int8_t max_val) {
83
+ __m512i packed_and_sat = _mm512_packs_epi16(first, second);
84
+ return _mm512_max_epi8(
85
+ _mm512_set1_epi8(min_val),
86
+ _mm512_min_epi8(packed_and_sat, _mm512_set1_epi8(max_val)));
87
+ }
88
+
89
+ template <>
90
+ inline __m512i pack_saturate_and_clamp<uint8_t>(
91
+ __m512i first,
92
+ __m512i second,
93
+ uint8_t min_val,
94
+ uint8_t max_val) {
95
+ __m512i packed_and_sat = _mm512_packus_epi16(first, second);
96
+ return _mm512_max_epu8(
97
+ _mm512_set1_epi8(min_val),
98
+ _mm512_min_epu8(packed_and_sat, _mm512_set1_epi8(max_val)));
99
+ }
100
+
101
+ template <typename T>
102
+ typename std::enable_if<std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, at::vec::Vectorized<float>>::type
103
+ inline convert_int8_to_float(at::vec::Vectorized<T> src) {
104
+ // Note: this function only convert inputs number of elements equal to at::vec::Vectorized<float>.size()
105
+ // Only handle first 16*8 bits
106
+ __m128i input_128 = _mm512_castsi512_si128(src);
107
+ // Convert from 16*uint8/int8 to 16*int32
108
+ __m512i input_512_extended;
109
+ if constexpr (std::is_same_v<T, uint8_t>)
110
+ input_512_extended = _mm512_cvtepu8_epi32(input_128);
111
+ else
112
+ input_512_extended = _mm512_cvtepi8_epi32(input_128);
113
+ // Convert from 16*int32 to 16*float32
114
+ return _mm512_cvtepi32_ps(input_512_extended);
115
+ }
116
+
117
+ template <typename T>
118
+ typename std::enable_if<std::is_same<T, uint8_t>::value || std::is_same<T, int8_t>::value, at::vec::Vectorized<T>>::type
119
+ inline convert_float_to_int8(at::vec::Vectorized<float> src) {
120
+ // Convert from float32 to int32 with truncation
121
+ __m512i x_values_int32 = _mm512_cvttps_epi32(src);
122
+
123
+ // Convert from int32 to int16 using signed saturation
124
+ __m512i xy_packed_v = _mm512_packs_epi32(x_values_int32, x_values_int32);
125
+
126
+ constexpr auto min_val = std::numeric_limits<T>::min();
127
+ constexpr auto max_val = std::numeric_limits<T>::max();
128
+
129
+ // Convert from int16 to uint8/int8 using unsigned saturation
130
+ __m512i xyzw_clamped_v = pack_saturate_and_clamp<T>(
131
+ xy_packed_v, xy_packed_v, min_val, max_val);
132
+ __m512i permute_mask_v =
133
+ _mm512_set_epi32(0x0f, 0x0b, 0x07, 0x03, 0x0e, 0x0a, 0x06, 0x02,
134
+ 0x0d, 0x09, 0x05, 0x01, 0x0c, 0x08, 0x04, 0x00);
135
+ return _mm512_permutexvar_epi32(permute_mask_v, xyzw_clamped_v);
136
+ }
137
+
138
+ template <typename T>
139
+ inline void __attribute__((always_inline)) QuantizeAvx512(
140
+ const float* src,
141
+ T* dst,
142
+ int len,
143
+ float inverse_scale,
144
+ int64_t zero_point) {
145
+ constexpr int VLEN = 16;
146
+ constexpr auto min_val = std::numeric_limits<T>::min();
147
+ constexpr auto max_val = std::numeric_limits<T>::max();
148
+ const __m512i min_v = _mm512_set1_epi32(min_val);
149
+ const __m512i max_v = _mm512_set1_epi32(max_val);
150
+ // This is the largest int32 value < int32_max exactly representable in float
151
+ constexpr int32_t int32_float_max_val =
152
+ std::numeric_limits<int32_t>::max() - 127;
153
+ int i = 0;
154
+ __m512 inverse_scale_v = _mm512_set1_ps(inverse_scale);
155
+ // clang-format off
156
+ static const __m512i shuffle_mask_v = _mm512_set_epi8(
157
+ 0xff, 0xff, 0xff, 0xff,
158
+ 0xff, 0xff, 0xff, 0xff,
159
+ 0xff, 0xff, 0xff, 0xff,
160
+ 0x0c, 0x08, 0x04, 0x00,
161
+ 0xff, 0xff, 0xff, 0xff,
162
+ 0xff, 0xff, 0xff, 0xff,
163
+ 0xff, 0xff, 0xff, 0xff,
164
+ 0x0c, 0x08, 0x04, 0x00,
165
+ 0xff, 0xff, 0xff, 0xff,
166
+ 0xff, 0xff, 0xff, 0xff,
167
+ 0xff, 0xff, 0xff, 0xff,
168
+ 0x0c, 0x08, 0x04, 0x00,
169
+ 0xff, 0xff, 0xff, 0xff,
170
+ 0xff, 0xff, 0xff, 0xff,
171
+ 0xff, 0xff, 0xff, 0xff,
172
+ 0x0c, 0x08, 0x04, 0x00);
173
+ // clang-format on
174
+ __m512i permute_mask_v =
175
+ _mm512_set_epi32(0x0f, 0x0b, 0x07, 0x03, 0x0e, 0x0a, 0x06, 0x02,
176
+ 0x0d, 0x09, 0x05, 0x01, 0x0c, 0x08, 0x04, 0x00);
177
+ __m512i permute_mask_l8_v =
178
+ _mm512_set_epi32(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
179
+ 0x00, 0x00, 0x00, 0x00, 0x0c, 0x08, 0x04, 0x00);
180
+ int len_aligned = len / (VLEN * 4) * (VLEN * 4);
181
+ for (; i < len_aligned; i += 4 * VLEN) {
182
+ // x
183
+ __m512 x_vals = _mm512_load_ps(src + i);
184
+ __m512 x_transformed_v = _mm512_mul_ps(x_vals, inverse_scale_v);
185
+ // If the floating point value is greater than int32_max,
186
+ // _mm512_cvtps_epi32 converts them to -ve. Clip at int32_float_max_val to
187
+ // Clip at int32_float_max_val to avoid this.
188
+ x_transformed_v =
189
+ _mm512_min_ps(x_transformed_v, _mm512_set1_ps(int32_float_max_val));
190
+ // y
191
+ __m512 y_vals = _mm512_load_ps(src + i + VLEN);
192
+ __m512 y_transformed_v = _mm512_mul_ps(y_vals, inverse_scale_v);
193
+ y_transformed_v =
194
+ _mm512_min_ps(y_transformed_v, _mm512_set1_ps(int32_float_max_val));
195
+ // z
196
+ __m512 z_vals = _mm512_load_ps(src + i + 2 * VLEN);
197
+ __m512 z_transformed_v = _mm512_mul_ps(z_vals, inverse_scale_v);
198
+ z_transformed_v =
199
+ _mm512_min_ps(z_transformed_v, _mm512_set1_ps(int32_float_max_val));
200
+ // w
201
+ __m512 w_vals = _mm512_load_ps(src + i + 3 * VLEN);
202
+ __m512 w_transformed_v = _mm512_mul_ps(w_vals, inverse_scale_v);
203
+ w_transformed_v =
204
+ _mm512_min_ps(w_transformed_v, _mm512_set1_ps(int32_float_max_val));
205
+
206
+ __m512i x_rounded_v = _mm512_cvtps_epi32(x_transformed_v);
207
+ __m512i y_rounded_v = _mm512_cvtps_epi32(y_transformed_v);
208
+ __m512i z_rounded_v = _mm512_cvtps_epi32(z_transformed_v);
209
+ __m512i w_rounded_v = _mm512_cvtps_epi32(w_transformed_v);
210
+
211
+ // add zero point
212
+ x_rounded_v = _mm512_add_epi32(x_rounded_v, _mm512_set1_epi32(zero_point));
213
+ y_rounded_v = _mm512_add_epi32(y_rounded_v, _mm512_set1_epi32(zero_point));
214
+ z_rounded_v = _mm512_add_epi32(z_rounded_v, _mm512_set1_epi32(zero_point));
215
+ w_rounded_v = _mm512_add_epi32(w_rounded_v, _mm512_set1_epi32(zero_point));
216
+
217
+ __m512i xy_packed_v = _mm512_packs_epi32(x_rounded_v, y_rounded_v);
218
+ __m512i zw_packed_v = _mm512_packs_epi32(z_rounded_v, w_rounded_v);
219
+ __m512i xyzw_clamped_v =
220
+ pack_saturate_and_clamp<T>(xy_packed_v, zw_packed_v, min_val, max_val);
221
+
222
+ xyzw_clamped_v =
223
+ _mm512_permutexvar_epi32(permute_mask_v, xyzw_clamped_v);
224
+ _mm512_storeu_si512(reinterpret_cast<__m512i*>(dst + i), xyzw_clamped_v);
225
+ }
226
+
227
+ // Additional 8-lane AVX512 version to take advantage when len is smaller
228
+ // based on fbgemm::QuantizeAvx2 (https://github.com/pytorch/FBGEMM)
229
+ for (; i < len / VLEN * VLEN; i += VLEN) {
230
+ __m512 x_vals = _mm512_load_ps(src + i);
231
+ __m512 x_transformed_v = _mm512_mul_ps(x_vals, inverse_scale_v);
232
+ x_transformed_v =
233
+ _mm512_min_ps(x_transformed_v, _mm512_set1_ps(int32_float_max_val));
234
+ __m512i x_rounded_v = _mm512_cvtps_epi32(x_transformed_v);
235
+ x_rounded_v = _mm512_add_epi32(x_rounded_v, _mm512_set1_epi32(zero_point));
236
+ __m512i x_clipped_v =
237
+ _mm512_max_epi32(min_v, _mm512_min_epi32(max_v, x_rounded_v));
238
+
239
+ x_clipped_v = _mm512_shuffle_epi8(x_clipped_v, shuffle_mask_v);
240
+ x_clipped_v = _mm512_permutexvar_epi32(permute_mask_l8_v, x_clipped_v);
241
+ _mm_storeu_si128(
242
+ reinterpret_cast<__m128i*>(dst + i),
243
+ _mm512_castsi512_si128(x_clipped_v));
244
+ }
245
+
246
+ for (; i < len; ++i) {
247
+ float transformed = src[i] * inverse_scale;
248
+
249
+ // Not exactly the same behavior as the vectorized code.
250
+ // The vectorized code above always rounds to even in halfway cases
251
+ // (https://software.intel.com/en-us/node/523819), but std::nearbyint
252
+ // does the same only when the current rounding mode is FE_TONEAREST.
253
+ // However, in practice, this should not be a problem because most cases
254
+ // use the default rounding mode FE_TONEAREST.
255
+ // Note that we cannot implement the same behavior as the vectorized code
256
+ // using std::round because it does rounding away from zero in halfway
257
+ // cases.
258
+ transformed = zero_point + std::nearbyint(transformed);
259
+ float clipped =
260
+ std::min(std::max(transformed, float(min_val)), float(max_val));
261
+ dst[i] = clipped;
262
+ }
263
+ }
264
+
265
+ template<>
266
+ struct Vectorized<c10::qint32> : public Vectorizedqi {
267
+ using size_type = int;
268
+ static constexpr size_type size() {
269
+ return 16;
270
+ }
271
+
272
+ static constexpr int float_num_vecs() {
273
+ return 1;
274
+ }
275
+
276
+ static constexpr int int_num_vecs() {
277
+ return 1;
278
+ }
279
+
280
+ using float_vec_return_type = std::array<Vectorized<float>, 1>;
281
+ using int_vec_return_type = std::array<Vectorized<c10::qint32>, 1>;
282
+ using value_type = c10::qint32::underlying;
283
+
284
+ public:
285
+ using Vectorizedqi::Vectorizedqi;
286
+ Vectorized() {}
287
+
288
+ Vectorized(__m512i vals_) { vals = vals_;}
289
+
290
+ // Broadcast constructor
291
+ Vectorized(const c10::qint32& val) {
292
+ value_type uw = val.val_;
293
+ vals = _mm512_set1_epi32(uw);
294
+ }
295
+
296
+ void store(void* ptr, int count = size()) const {
297
+ if (count != size()) {
298
+ memcpy(ptr, &vals, count * sizeof(value_type));
299
+ } else {
300
+ _mm512_storeu_si512((__m512i*)ptr, vals);
301
+ }
302
+ }
303
+
304
+ static Vectorized<c10::qint32> loadu(const void* ptr) {
305
+ return Vectorized<c10::qint32>(ptr);
306
+ }
307
+
308
+ static Vectorized<c10::qint32> loadu(const void* ptr, int64_t count) {
309
+ __at_align__ value_type tmp_values[size()];
310
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
311
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
312
+ // instructions while a loop would be compiled to one instruction.
313
+ for (const auto i : c10::irange(size())) {
314
+ tmp_values[i] = 0;
315
+ }
316
+ std::memcpy(tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
317
+ return loadu(tmp_values);
318
+ }
319
+
320
+ float_vec_return_type dequantize(
321
+ Vectorized<float> scale,
322
+ Vectorized<float> zero_point,
323
+ Vectorized<float> scale_zp_premul) const {
324
+ __m512 float_vals = _mm512_cvtepi32_ps(vals);
325
+ return {vec::fmadd(scale, Vectorized<float>(float_vals), scale_zp_premul)};
326
+ }
327
+
328
+ float_vec_return_type dequantize(
329
+ Vectorized<float> scale,
330
+ Vectorized<float> zero_point) const {
331
+ __m512 float_vals = _mm512_cvtepi32_ps(vals);
332
+ return {(Vectorized<float>(float_vals) - zero_point) * scale};
333
+ }
334
+
335
+ static Vectorized<c10::qint32> quantize(
336
+ const float_vec_return_type& rhs,
337
+ float scale,
338
+ int32_t zero_point,
339
+ float inverse_scale) {
340
+ Vectorized<c10::qint32> retval;
341
+ auto rhs_data = (__m512)rhs[0];
342
+ at::native::quantize_vec<c10::qint32, /*precision=*/32>(
343
+ scale, zero_point, (float*)&rhs_data, (c10::qint32*)&retval.vals, 16);
344
+ return retval;
345
+ }
346
+
347
+ Vectorized<c10::qint32> maximum(Vectorized<c10::qint32> b) const {
348
+ return _mm512_max_epi32(vals, b.vals);
349
+ }
350
+
351
+ Vectorized<c10::qint32> minimum(Vectorized<c10::qint32> b) const {
352
+ return _mm512_min_epi32(vals, b.vals);
353
+ }
354
+
355
+ Vectorized<c10::qint32> relu(Vectorized<c10::qint32> zero_point) const {
356
+ return maximum(zero_point);
357
+ }
358
+
359
+ Vectorized<c10::qint32> relu6(
360
+ Vectorized<c10::qint32> zero_point,
361
+ Vectorized<c10::qint32> q_six) {
362
+ return _mm512_min_epi32(
363
+ _mm512_max_epi32(vals, zero_point.vals), q_six.vals);
364
+ }
365
+
366
+ int_vec_return_type widening_subtract(Vectorized<c10::qint32> b) const {
367
+ return {_mm512_sub_epi32(vals, b)};
368
+ }
369
+
370
+ static Vectorized<c10::qint32> requantize_from_int(
371
+ const int_vec_return_type& inp,
372
+ float multiplier,
373
+ int32_t zero_point) {
374
+ __m512 multiplier_v = _mm512_set1_ps(multiplier);
375
+ __m512i zero_point_v = _mm512_set1_epi32(zero_point);
376
+
377
+ __m512 scaled = _mm512_mul_ps(_mm512_cvtepi32_ps(inp[0]), multiplier_v);
378
+ __m512i rounded = _mm512_cvtps_epi32(scaled);
379
+ return _mm512_add_epi32(rounded, zero_point_v);
380
+ }
381
+
382
+ private:
383
+ // Load from memory constructor
384
+ Vectorized(const void* ptr) {
385
+ vals = _mm512_loadu_si512((const __m512i*)ptr);
386
+ }
387
+ };
388
+
389
+ template <>
390
+ Vectorized<c10::qint32> inline maximum(const Vectorized<c10::qint32>& a, const Vectorized<c10::qint32>& b) {
391
+ return a.maximum(b);
392
+ }
393
+
394
+ template <>
395
+ Vectorized<c10::qint32> inline operator*(
396
+ const Vectorized<c10::qint32>& a,
397
+ const Vectorized<c10::qint32>& b) {
398
+ return _mm512_mullo_epi32(a, b);
399
+ }
400
+
401
+ template <>
402
+ Vectorized<c10::qint32> inline operator+(
403
+ const Vectorized<c10::qint32>& a,
404
+ const Vectorized<c10::qint32>& b) {
405
+ return _mm512_add_epi32(a, b);
406
+ }
407
+
408
+ /*
409
+ * Convert values from int32 back to int8/uint8
410
+ */
411
+ template <typename T>
412
+ __m512i RequantizeAvx512(
413
+ const std::array<Vectorized<c10::qint32>, 4>& inp,
414
+ __m512 multiplier,
415
+ __m512i zp) {
416
+ static_assert(
417
+ std::is_same<T, int8_t>::value || std::is_same<T, uint8_t>::value,
418
+ "Only int8_t/uint8_t are supported");
419
+ constexpr auto min_val = std::numeric_limits<T>::min();
420
+ constexpr auto max_val = std::numeric_limits<T>::max();
421
+ __m512i permute_mask_v =
422
+ _mm512_set_epi32(0x0f, 0x0b, 0x07, 0x03, 0x0e, 0x0a, 0x06, 0x02,
423
+ 0x0d, 0x09, 0x05, 0x01, 0x0c, 0x08, 0x04, 0x00);
424
+ __m512 x_scaled_v = _mm512_mul_ps(_mm512_cvtepi32_ps(inp[0]), multiplier);
425
+ __m512 y_scaled_v = _mm512_mul_ps(_mm512_cvtepi32_ps(inp[1]), multiplier);
426
+ __m512 z_scaled_v = _mm512_mul_ps(_mm512_cvtepi32_ps(inp[2]), multiplier);
427
+ __m512 w_scaled_v = _mm512_mul_ps(_mm512_cvtepi32_ps(inp[3]), multiplier);
428
+
429
+ __m512i x_rounded_v = _mm512_cvtps_epi32(x_scaled_v);
430
+ __m512i y_rounded_v = _mm512_cvtps_epi32(y_scaled_v);
431
+ __m512i z_rounded_v = _mm512_cvtps_epi32(z_scaled_v);
432
+ __m512i w_rounded_v = _mm512_cvtps_epi32(w_scaled_v);
433
+
434
+ /* Add zero point */
435
+ __m512i x_v = _mm512_add_epi32(x_rounded_v, zp);
436
+ __m512i y_v = _mm512_add_epi32(y_rounded_v, zp);
437
+ __m512i z_v = _mm512_add_epi32(z_rounded_v, zp);
438
+ __m512i w_v = _mm512_add_epi32(w_rounded_v, zp);
439
+
440
+ /* Pack to int16_t and saturate */
441
+ __m512i xy_packed_v = _mm512_packs_epi32(x_v, y_v);
442
+ __m512i zw_packed_v = _mm512_packs_epi32(z_v, w_v);
443
+
444
+ __m512i xyzw_clamped_v =
445
+ pack_saturate_and_clamp<T>(xy_packed_v, zw_packed_v, min_val, max_val);
446
+
447
+ /*
448
+ * xyzw_clamped_v has results in the following layout so we need to
449
+ * permute: x0-3 y0-3 z0-3 w0-3 x4-7 y4-7 z4-7 w4-7 x8-11 y8-11 z8-11 w8-11 x12-15 y12-15 z12-15 w12-15
450
+ */
451
+ xyzw_clamped_v = _mm512_permutexvar_epi32(permute_mask_v, xyzw_clamped_v);
452
+ return xyzw_clamped_v;
453
+ }
454
+
455
+ template<>
456
+ struct Vectorized<c10::qint8> : public Vectorizedqi {
457
+ static constexpr int size() {
458
+ return 64;
459
+ }
460
+
461
+ static constexpr int float_num_vecs() {
462
+ return 4;
463
+ }
464
+
465
+ static constexpr int int_num_vecs() {
466
+ return 4;
467
+ }
468
+
469
+ using float_vec_return_type = std::array<Vectorized<float>, 4>;
470
+ using int_vec_return_type = std::array<Vectorized<c10::qint32>, 4>;
471
+ using value_type = typename c10::qint8::underlying;
472
+
473
+ public:
474
+ using Vectorizedqi::Vectorizedqi;
475
+
476
+ Vectorized() {}
477
+ Vectorized(__m512i vals_) { vals = vals_;}
478
+
479
+ // Broadcast constructor
480
+ Vectorized(const c10::qint8& val) {
481
+ value_type uw = val.val_;
482
+ vals = _mm512_set1_epi8(uw);
483
+ }
484
+
485
+ // This is needed because the compiler emits awful code for the default
486
+ // constructor for moving the enum
487
+ Vectorized(const Vectorized<c10::qint8>& other) : Vectorizedqi(other.vals) { }
488
+
489
+ // This is added to avoid error: definition of implicit copy assignment operator
490
+ // for 'Vectorized<c10::qint8>' is deprecated because it has a user-declared
491
+ // copy constructor [-Werror,-Wdeprecated-copy]
492
+ Vectorized& operator=(const Vectorized<c10::qint8>&) = default;
493
+
494
+ void store(void* ptr, int count = size()) const {
495
+ if (count != size()) {
496
+ memcpy(ptr, &vals, count * sizeof(value_type));
497
+ } else {
498
+ _mm512_storeu_si512((__m512i*)ptr, vals);
499
+ }
500
+ }
501
+
502
+ static Vectorized<c10::qint8> loadu(const void* ptr) {
503
+ return Vectorized<c10::qint8>(ptr);
504
+ }
505
+
506
+ static Vectorized<c10::qint8> loadu(const void* ptr, int64_t count) {
507
+ __at_align__ value_type tmp_values[size()];
508
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
509
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
510
+ // instructions while a loop would be compiled to one instruction.
511
+ for (const auto i : c10::irange(size())) {
512
+ tmp_values[i] = 0;
513
+ }
514
+ std::memcpy(tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
515
+ return loadu(tmp_values);
516
+ }
517
+
518
+ private:
519
+ __m512i cvtepi8_epi32(__m128i epi8_vals) const {
520
+ return _mm512_cvtepi8_epi32(epi8_vals);
521
+ }
522
+
523
+ public:
524
+ float_vec_return_type dequantize(
525
+ Vectorized<float> scale,
526
+ Vectorized<float> zero_point,
527
+ Vectorized<float> scale_neg_zp_premul) const {
528
+ __m128i int_val0 = _mm_set_epi64x(vals[1], vals[0]);
529
+ __m128i int_val1 = _mm_set_epi64x(vals[3], vals[2]);
530
+ __m128i int_val2 = _mm_set_epi64x(vals[5], vals[4]);
531
+ __m128i int_val3 = _mm_set_epi64x(vals[7], vals[6]);
532
+
533
+ __m512 float_val0 = _mm512_cvtepi32_ps(cvtepi8_epi32(int_val0));
534
+ __m512 float_val1 = _mm512_cvtepi32_ps(cvtepi8_epi32(int_val1));
535
+ __m512 float_val2 = _mm512_cvtepi32_ps(cvtepi8_epi32(int_val2));
536
+ __m512 float_val3 = _mm512_cvtepi32_ps(cvtepi8_epi32(int_val3));
537
+
538
+ auto val0 =
539
+ vec::fmadd(scale, Vectorized<float>(float_val0), scale_neg_zp_premul);
540
+ auto val1 =
541
+ vec::fmadd(scale, Vectorized<float>(float_val1), scale_neg_zp_premul);
542
+ auto val2 =
543
+ vec::fmadd(scale, Vectorized<float>(float_val2), scale_neg_zp_premul);
544
+ auto val3 =
545
+ vec::fmadd(scale, Vectorized<float>(float_val3), scale_neg_zp_premul);
546
+ return {val0, val1, val2, val3};
547
+ }
548
+
549
+ float_vec_return_type dequantize(
550
+ Vectorized<float> scale,
551
+ Vectorized<float> zero_point) const {
552
+ __m128i int_val0 = _mm_set_epi64x(vals[1], vals[0]);
553
+ __m128i int_val1 = _mm_set_epi64x(vals[3], vals[2]);
554
+ __m128i int_val2 = _mm_set_epi64x(vals[5], vals[4]);
555
+ __m128i int_val3 = _mm_set_epi64x(vals[7], vals[6]);
556
+
557
+ __m512 float_val0 = _mm512_cvtepi32_ps(cvtepi8_epi32(int_val0));
558
+ __m512 float_val1 = _mm512_cvtepi32_ps(cvtepi8_epi32(int_val1));
559
+ __m512 float_val2 = _mm512_cvtepi32_ps(cvtepi8_epi32(int_val2));
560
+ __m512 float_val3 = _mm512_cvtepi32_ps(cvtepi8_epi32(int_val3));
561
+
562
+ auto val0 = (Vectorized<float>(float_val0) - zero_point) * scale;
563
+ auto val1 = (Vectorized<float>(float_val1) - zero_point) * scale;
564
+ auto val2 = (Vectorized<float>(float_val2) - zero_point) * scale;
565
+ auto val3 = (Vectorized<float>(float_val3) - zero_point) * scale;
566
+ return {val0, val1, val2, val3};
567
+ }
568
+
569
+ static Vectorized<c10::qint8> quantize(
570
+ const float_vec_return_type& rhs,
571
+ float scale,
572
+ int32_t zero_point,
573
+ float inverse_scale) {
574
+ auto* rhs_data = (float*)rhs.data();
575
+ int8_t quantized_values[64];
576
+ QuantizeAvx512<value_type>(
577
+ rhs_data, quantized_values, 64, inverse_scale, zero_point);
578
+ return Vectorized<c10::qint8>::loadu(quantized_values);
579
+ }
580
+
581
+ Vectorized<c10::qint8> maximum(Vectorized<c10::qint8> b) const {
582
+ return _mm512_max_epi8(vals, b.vals);
583
+ }
584
+
585
+ Vectorized<c10::qint8> minimum(Vectorized<c10::qint8> b) const {
586
+ return _mm512_min_epi8(vals, b.vals);
587
+ }
588
+
589
+ Vectorized<c10::qint8> relu(Vectorized<c10::qint8> zero_point) const {
590
+ return maximum(zero_point);
591
+ }
592
+
593
+ Vectorized<c10::qint8> relu6(
594
+ Vectorized<c10::qint8> zero_point,
595
+ Vectorized<c10::qint8> q_six) {
596
+ return _mm512_min_epi8(
597
+ _mm512_max_epi8(vals, zero_point.vals), q_six.vals);
598
+ }
599
+
600
+ int_vec_return_type widening_subtract(Vectorized<c10::qint8> b) const {
601
+ __m128i int_val0 = _mm_set_epi64x(vals[1], vals[0]);
602
+ __m128i int_val1 = _mm_set_epi64x(vals[3], vals[2]);
603
+ __m128i int_val2 = _mm_set_epi64x(vals[5], vals[4]);
604
+ __m128i int_val3 = _mm_set_epi64x(vals[7], vals[6]);
605
+
606
+ __m512i int32_val0 = cvtepi8_epi32(int_val0);
607
+ __m512i int32_val1 = cvtepi8_epi32(int_val1);
608
+ __m512i int32_val2 = cvtepi8_epi32(int_val2);
609
+ __m512i int32_val3 = cvtepi8_epi32(int_val3);
610
+
611
+ __m128i int_b0 = _mm_set_epi64x(b.vals[1], b.vals[0]);
612
+ __m128i int_b1 = _mm_set_epi64x(b.vals[3], b.vals[2]);
613
+ __m128i int_b2 = _mm_set_epi64x(b.vals[5], b.vals[4]);
614
+ __m128i int_b3 = _mm_set_epi64x(b.vals[7], b.vals[6]);
615
+
616
+ __m512i int32_b0 = cvtepi8_epi32(int_b0);
617
+ __m512i int32_b1 = cvtepi8_epi32(int_b1);
618
+ __m512i int32_b2 = cvtepi8_epi32(int_b2);
619
+ __m512i int32_b3 = cvtepi8_epi32(int_b3);
620
+
621
+ __m512i res_0 = _mm512_sub_epi32(int32_val0, int32_b0);
622
+ __m512i res_1 = _mm512_sub_epi32(int32_val1, int32_b1);
623
+ __m512i res_2 = _mm512_sub_epi32(int32_val2, int32_b2);
624
+ __m512i res_3 = _mm512_sub_epi32(int32_val3, int32_b3);
625
+
626
+ return {Vectorized<c10::qint32>(res_0),
627
+ Vectorized<c10::qint32>(res_1),
628
+ Vectorized<c10::qint32>(res_2),
629
+ Vectorized<c10::qint32>(res_3)};
630
+ }
631
+
632
+ static Vectorized<c10::qint8> requantize_from_int(
633
+ const int_vec_return_type& inp,
634
+ float multiplier,
635
+ int32_t zero_point) {
636
+ __m512 multiplier_v = _mm512_set1_ps(multiplier);
637
+ __m512i zero_point_v = _mm512_set1_epi32(zero_point);
638
+ return RequantizeAvx512<value_type>(inp, multiplier_v, zero_point_v);
639
+ }
640
+
641
+ private:
642
+ // Load from memory constructor
643
+ Vectorized(const void* ptr) {
644
+ vals = _mm512_loadu_si512((const __m512i*)ptr);
645
+ }
646
+ };
647
+
648
+ template <>
649
+ Vectorized<c10::qint8> inline maximum(const Vectorized<c10::qint8>& a, const Vectorized<c10::qint8>& b) {
650
+ return a.maximum(b);
651
+ }
652
+
653
+ template<>
654
+ struct Vectorized<c10::quint8> : public Vectorizedqi {
655
+ static constexpr int size() {
656
+ return 64;
657
+ }
658
+
659
+ static constexpr int float_num_vecs() {
660
+ return 4;
661
+ }
662
+
663
+ static constexpr int int_num_vecs() {
664
+ return 4;
665
+ }
666
+
667
+ using float_vec_return_type = std::array<Vectorized<float>, 4>;
668
+ using int_vec_return_type = std::array<Vectorized<c10::qint32>, 4>;
669
+ using value_type = typename c10::quint8::underlying;
670
+
671
+ public:
672
+ using Vectorizedqi::Vectorizedqi;
673
+ Vectorized() {}
674
+
675
+ Vectorized(__m512i vals_) { vals = vals_;}
676
+
677
+ // Broadcast constructor
678
+ Vectorized(const c10::quint8& val) {
679
+ value_type uw = val.val_;
680
+ vals = _mm512_set1_epi8(uw);
681
+ }
682
+
683
+ Vectorized(const Vectorized<c10::quint8>& other) : Vectorizedqi(other.vals) { }
684
+
685
+ // This is added to avoid error: definition of implicit copy assignment operator
686
+ // for 'Vectorized<c10::quint8>' is deprecated because it has a user-declared
687
+ // copy constructor [-Werror,-Wdeprecated-copy]
688
+ Vectorized& operator=(const Vectorized<c10::quint8>&) = default;
689
+
690
+ void store(void* ptr, int count = size()) const {
691
+ if (count != size()) {
692
+ memcpy(ptr, &vals, count * sizeof(value_type));
693
+ } else {
694
+ _mm512_storeu_si512((__m512i*)ptr, vals);
695
+ }
696
+ }
697
+
698
+ static Vectorized<c10::quint8> loadu(const void* ptr) {
699
+ return Vectorized<c10::quint8>(ptr);
700
+ }
701
+
702
+ static Vectorized<c10::quint8> loadu(const void* ptr, int64_t count) {
703
+ __at_align__ value_type tmp_values[size()];
704
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
705
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
706
+ // instructions while a loop would be compiled to one instruction.
707
+ for (const auto i : c10::irange(size())) {
708
+ tmp_values[i] = 0;
709
+ }
710
+ std::memcpy(tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
711
+ return loadu(tmp_values);
712
+ }
713
+
714
+ private:
715
+ __m512i cvtepu8_epi32(__m128i epu8_vals) const {
716
+ return _mm512_cvtepu8_epi32(epu8_vals);
717
+ }
718
+
719
+ public:
720
+ float_vec_return_type dequantize(
721
+ Vectorized<float> scale,
722
+ Vectorized<float> zero_point,
723
+ Vectorized<float> scale_zp_premul) const {
724
+ __m128i int_val0 = _mm_set_epi64x(vals[1], vals[0]);
725
+ __m128i int_val1 = _mm_set_epi64x(vals[3], vals[2]);
726
+ __m128i int_val2 = _mm_set_epi64x(vals[5], vals[4]);
727
+ __m128i int_val3 = _mm_set_epi64x(vals[7], vals[6]);
728
+
729
+ __m512 float_val0 = _mm512_cvtepi32_ps(cvtepu8_epi32(int_val0));
730
+ __m512 float_val1 = _mm512_cvtepi32_ps(cvtepu8_epi32(int_val1));
731
+ __m512 float_val2 = _mm512_cvtepi32_ps(cvtepu8_epi32(int_val2));
732
+ __m512 float_val3 = _mm512_cvtepi32_ps(cvtepu8_epi32(int_val3));
733
+
734
+ auto val0 =
735
+ vec::fmadd(scale, Vectorized<float>(float_val0), scale_zp_premul);
736
+ auto val1 =
737
+ vec::fmadd(scale, Vectorized<float>(float_val1), scale_zp_premul);
738
+ auto val2 =
739
+ vec::fmadd(scale, Vectorized<float>(float_val2), scale_zp_premul);
740
+ auto val3 =
741
+ vec::fmadd(scale, Vectorized<float>(float_val3), scale_zp_premul);
742
+
743
+ return {val0, val1, val2, val3};
744
+ }
745
+
746
+ float_vec_return_type dequantize(
747
+ Vectorized<float> scale,
748
+ Vectorized<float> zero_point) const {
749
+ __m128i int_val0 = _mm_set_epi64x(vals[1], vals[0]);
750
+ __m128i int_val1 = _mm_set_epi64x(vals[3], vals[2]);
751
+ __m128i int_val2 = _mm_set_epi64x(vals[5], vals[4]);
752
+ __m128i int_val3 = _mm_set_epi64x(vals[7], vals[6]);
753
+
754
+ __m512 float_val0 = _mm512_cvtepi32_ps(cvtepu8_epi32(int_val0));
755
+ __m512 float_val1 = _mm512_cvtepi32_ps(cvtepu8_epi32(int_val1));
756
+ __m512 float_val2 = _mm512_cvtepi32_ps(cvtepu8_epi32(int_val2));
757
+ __m512 float_val3 = _mm512_cvtepi32_ps(cvtepu8_epi32(int_val3));
758
+
759
+ auto val0 = (Vectorized<float>(float_val0) - zero_point) * scale;
760
+ auto val1 = (Vectorized<float>(float_val1) - zero_point) * scale;
761
+ auto val2 = (Vectorized<float>(float_val2) - zero_point) * scale;
762
+ auto val3 = (Vectorized<float>(float_val3) - zero_point) * scale;
763
+
764
+ return {val0, val1, val2, val3};
765
+ }
766
+
767
+ static Vectorized<c10::quint8> quantize(
768
+ const float_vec_return_type& rhs,
769
+ float scale,
770
+ int32_t zero_point,
771
+ float inverse_scale) {
772
+ auto* rhs_data = (float*)rhs.data();
773
+ uint8_t quantized_values[64];
774
+ QuantizeAvx512<value_type>(
775
+ rhs_data, quantized_values, 64, inverse_scale, zero_point);
776
+ return Vectorized<c10::quint8>::loadu(quantized_values);
777
+ }
778
+
779
+ Vectorized<c10::quint8> maximum(Vectorized<c10::quint8> b) const {
780
+ return _mm512_max_epu8(vals, b.vals);
781
+ }
782
+
783
+ Vectorized<c10::quint8> minimum(Vectorized<c10::quint8> b) const {
784
+ return _mm512_min_epu8(vals, b.vals);
785
+ }
786
+
787
+ Vectorized<c10::quint8> relu(Vectorized<c10::quint8> zero_point) const {
788
+ return maximum(zero_point);
789
+ }
790
+
791
+ Vectorized<c10::quint8> relu6(
792
+ Vectorized<c10::quint8> zero_point,
793
+ Vectorized<c10::quint8> q_six) {
794
+ return _mm512_min_epu8(
795
+ _mm512_max_epu8(vals, zero_point.vals), q_six.vals);
796
+ }
797
+
798
+ int_vec_return_type widening_subtract(Vectorized<c10::quint8> b) const {
799
+ __m128i int_val0 = _mm_set_epi64x(vals[1], vals[0]);
800
+ __m128i int_val1 = _mm_set_epi64x(vals[3], vals[2]);
801
+ __m128i int_val2 = _mm_set_epi64x(vals[5], vals[4]);
802
+ __m128i int_val3 = _mm_set_epi64x(vals[7], vals[6]);
803
+
804
+ __m512i int32_val0 = cvtepu8_epi32(int_val0);
805
+ __m512i int32_val1 = cvtepu8_epi32(int_val1);
806
+ __m512i int32_val2 = cvtepu8_epi32(int_val2);
807
+ __m512i int32_val3 = cvtepu8_epi32(int_val3);
808
+
809
+ __m128i int_b0 = _mm_set_epi64x(b.vals[1], b.vals[0]);
810
+ __m128i int_b1 = _mm_set_epi64x(b.vals[3], b.vals[2]);
811
+ __m128i int_b2 = _mm_set_epi64x(b.vals[5], b.vals[4]);
812
+ __m128i int_b3 = _mm_set_epi64x(b.vals[7], b.vals[6]);
813
+
814
+ __m512i int32_b0 = cvtepu8_epi32(int_b0);
815
+ __m512i int32_b1 = cvtepu8_epi32(int_b1);
816
+ __m512i int32_b2 = cvtepu8_epi32(int_b2);
817
+ __m512i int32_b3 = cvtepu8_epi32(int_b3);
818
+
819
+ __m512i res_0 = _mm512_sub_epi32(int32_val0, int32_b0);
820
+ __m512i res_1 = _mm512_sub_epi32(int32_val1, int32_b1);
821
+ __m512i res_2 = _mm512_sub_epi32(int32_val2, int32_b2);
822
+ __m512i res_3 = _mm512_sub_epi32(int32_val3, int32_b3);
823
+ return {Vectorized<c10::qint32>(res_0),
824
+ Vectorized<c10::qint32>(res_1),
825
+ Vectorized<c10::qint32>(res_2),
826
+ Vectorized<c10::qint32>(res_3)};
827
+ }
828
+
829
+ static Vectorized<c10::quint8> requantize_from_int(
830
+ const int_vec_return_type& inp,
831
+ float multiplier,
832
+ int32_t zero_point) {
833
+ __m512 multiplier_v = _mm512_set1_ps(multiplier);
834
+ __m512i zero_point_v = _mm512_set1_epi32(zero_point);
835
+ return RequantizeAvx512<value_type>(inp, multiplier_v, zero_point_v);
836
+ }
837
+
838
+ private:
839
+
840
+ // Load from memory constructor
841
+ Vectorized(const void* ptr) {
842
+ vals = _mm512_loadu_si512((const __m512i*)ptr);
843
+ }
844
+ };
845
+
846
+ template <>
847
+ Vectorized<c10::quint8> inline maximum(const Vectorized<c10::quint8>& a, const Vectorized<c10::quint8>& b) {
848
+ return a.maximum(b);
849
+ }
850
+
851
+ #else
852
+
853
+ // NOTE: These are low-performance implementations that we fall back on.
854
+
855
+ template <
856
+ typename T,
857
+ typename float_vec_return_type_,
858
+ typename int_vec_return_type_,
859
+ int size_>
860
+ struct VectorizedQuantizedConverter {
861
+ static constexpr int size() {
862
+ return size_;
863
+ }
864
+
865
+ static constexpr int float_num_vecs() {
866
+ return size() / 8;
867
+ }
868
+
869
+ static constexpr int int_num_vecs() {
870
+ return size() / 8;
871
+ }
872
+
873
+ using float_vec_return_type = float_vec_return_type_;
874
+ using int_vec_return_type = int_vec_return_type_;
875
+
876
+ using value_type = typename T::underlying;
877
+ std::array<value_type, size_> vals;
878
+
879
+ VectorizedQuantizedConverter(T val) {
880
+ for (const auto i : c10::irange(size())) {
881
+ vals[i] = val.val_;
882
+ }
883
+ }
884
+
885
+ VectorizedQuantizedConverter(const void* ptr) {
886
+ memcpy(vals.data(), ptr, sizeof(value_type) * size());
887
+ }
888
+
889
+ void store(void* ptr, int count = size()) const {
890
+ memcpy(ptr, vals.data(), count * sizeof(value_type));
891
+ }
892
+
893
+ float_vec_return_type dequantize(
894
+ Vectorized<float> scale,
895
+ Vectorized<float> zero_point,
896
+ Vectorized<float> scale_zp_premul) const {
897
+ float_vec_return_type rv;
898
+ for (const auto i : c10::irange(float_num_vecs())) {
899
+ float tmp_vals[16];
900
+ for (const auto j : c10::irange(16)) {
901
+ tmp_vals[j] = at::native::dequantize_val<T>(
902
+ scale[j], zero_point[j], T(vals[16 * i + j]));
903
+ }
904
+ rv[i] = Vectorized<float>(tmp_vals[0],
905
+ tmp_vals[1],
906
+ tmp_vals[2],
907
+ tmp_vals[3],
908
+ tmp_vals[4],
909
+ tmp_vals[5],
910
+ tmp_vals[6],
911
+ tmp_vals[7],
912
+ tmp_vals[8],
913
+ tmp_vals[9],
914
+ tmp_vals[10],
915
+ tmp_vals[11],
916
+ tmp_vals[12],
917
+ tmp_vals[13],
918
+ tmp_vals[14],
919
+ tmp_vals[15]);
920
+ }
921
+ return rv;
922
+ }
923
+
924
+ float_vec_return_type dequantize(
925
+ Vectorized<float> scale,
926
+ Vectorized<float> zero_point) const {
927
+ Vectorized<float> scale_zp_premul;
928
+ return dequantize(scale, zero_point, scale_zp_premul);
929
+ }
930
+
931
+ protected:
932
+ VectorizedQuantizedConverter() {}
933
+ };
934
+
935
+ template <>
936
+ struct Vectorized<c10::qint32> : public VectorizedQuantizedConverter<
937
+ c10::qint32,
938
+ std::array<Vectorized<float>, 1>,
939
+ std::array<Vectorized<c10::qint32>, 1>,
940
+ 16> {
941
+ Vectorized()
942
+ : VectorizedQuantizedConverter<
943
+ c10::qint32,
944
+ std::array<Vectorized<float>, 1>,
945
+ std::array<Vectorized<c10::qint32>, 1>,
946
+ 16>() {}
947
+ Vectorized(c10::qint32 val)
948
+ : VectorizedQuantizedConverter<
949
+ c10::qint32,
950
+ std::array<Vectorized<float>, 1>,
951
+ std::array<Vectorized<c10::qint32>, 1>,
952
+ 16>(val) {}
953
+ Vectorized(const void* ptr)
954
+ : VectorizedQuantizedConverter<
955
+ c10::qint32,
956
+ std::array<Vectorized<float>, 1>,
957
+ std::array<Vectorized<c10::qint32>, 1>,
958
+ 16>(ptr) {}
959
+
960
+ static Vectorized<c10::qint32> loadu(const void* ptr) {
961
+ return Vectorized<c10::qint32>(ptr);
962
+ }
963
+
964
+ static Vectorized<c10::qint32> loadu(const void* ptr, int64_t count) {
965
+ __at_align__ value_type tmp_values[size()];
966
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
967
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
968
+ // instructions while a loop would be compiled to one instruction.
969
+ for (const auto i : c10::irange(size())) {
970
+ tmp_values[i] = 0;
971
+ }
972
+ std::memcpy(tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
973
+ return loadu(tmp_values);
974
+ }
975
+
976
+ static Vectorized<c10::qint32> quantize(
977
+ const float_vec_return_type& rhs,
978
+ float scale,
979
+ int32_t zero_point,
980
+ float inverse_scale) {
981
+ std::array<value_type, size()> qvals;
982
+ std::array<float, float_num_vecs() * 16> float_vals;
983
+
984
+ for (const auto i : c10::irange(float_num_vecs())) {
985
+ rhs[i].store(&float_vals[i * 16], 16);
986
+ }
987
+
988
+ at::native::quantize_vec<c10::qint32, /*precision=*/32>(
989
+ scale,
990
+ zero_point,
991
+ float_vals.data(),
992
+ (c10::qint32*)qvals.data(),
993
+ 16 * float_num_vecs());
994
+
995
+ return Vectorized<c10::qint32>::loadu(qvals.data());
996
+ }
997
+
998
+ Vectorized<c10::qint32> maximum(Vectorized<c10::qint32> b) const {
999
+ Vectorized<c10::qint32> retval;
1000
+ for (const auto i : c10::irange(size())) {
1001
+ retval.vals[i] = std::max<value_type>(vals[i], b.vals[i]);
1002
+ }
1003
+ return retval;
1004
+ }
1005
+
1006
+ Vectorized<c10::qint32> minimum(Vectorized<c10::qint32> b) const {
1007
+ Vectorized<c10::qint32> retval;
1008
+ for (const auto i : c10::irange(size())) {
1009
+ retval.vals[i] = std::min<value_type>(vals[i], b.vals[i]);
1010
+ }
1011
+ return retval;
1012
+ }
1013
+
1014
+ Vectorized<c10::qint32> relu(Vectorized<c10::qint32> zero_point) const {
1015
+ return maximum(zero_point);
1016
+ }
1017
+
1018
+
1019
+ Vectorized<c10::qint32> relu6(
1020
+ Vectorized<c10::qint32> zero_point,
1021
+ Vectorized<c10::qint32> q_six) {
1022
+ Vectorized<c10::qint32> retval;
1023
+ for (const auto i : c10::irange(size())) {
1024
+ retval.vals[i] = std::min<value_type>(
1025
+ std::max<value_type>(vals[i], zero_point.vals[i]), q_six.vals[i]);
1026
+ }
1027
+ return retval;
1028
+ }
1029
+
1030
+ int_vec_return_type widening_subtract(Vectorized<c10::qint32> b) const {
1031
+ int_vec_return_type retval;
1032
+ for (const auto i : c10::irange(size())) {
1033
+ retval[0].vals[i] = vals[i] - b.vals[i];
1034
+ }
1035
+ return retval;
1036
+ }
1037
+
1038
+ static Vectorized<c10::qint32> requantize_from_int(
1039
+ const int_vec_return_type& inp,
1040
+ float multiplier,
1041
+ int32_t zero_point) {
1042
+ Vectorized<c10::qint32> retval;
1043
+ for (const auto i : c10::irange(size())) {
1044
+ retval.vals[i] =
1045
+ std::nearbyint(static_cast<float>(inp[0].vals[i]) * multiplier) +
1046
+ zero_point;
1047
+ }
1048
+ return retval;
1049
+ }
1050
+ };
1051
+
1052
+ template <>
1053
+ Vectorized<c10::qint32> inline maximum(const Vectorized<c10::qint32>& a, const Vectorized<c10::qint32>& b) {
1054
+ return a.maximum(b);
1055
+ }
1056
+
1057
+ template <>
1058
+ Vectorized<c10::qint32> inline operator*(
1059
+ const Vectorized<c10::qint32>& a,
1060
+ const Vectorized<c10::qint32>& b) {
1061
+ Vectorized<c10::qint32> retval;
1062
+ for (const auto i : c10::irange(std::decay_t<decltype(a)>::size())) {
1063
+ retval.vals[i] = a.vals[i] * b.vals[i];
1064
+ }
1065
+ return retval;
1066
+ }
1067
+
1068
+ template <>
1069
+ Vectorized<c10::qint32> inline operator+(
1070
+ const Vectorized<c10::qint32>& a,
1071
+ const Vectorized<c10::qint32>& b) {
1072
+ Vectorized<c10::qint32> retval;
1073
+ for (const auto i : c10::irange(std::decay_t<decltype(a)>::size())) {
1074
+ retval.vals[i] = a.vals[i] + b.vals[i];
1075
+ }
1076
+ return retval;
1077
+ }
1078
+
1079
+ template <>
1080
+ struct Vectorized<c10::qint8> : public VectorizedQuantizedConverter<
1081
+ c10::qint8,
1082
+ std::array<Vectorized<float>, 4>,
1083
+ std::array<Vectorized<c10::qint32>, 4>,
1084
+ 64> {
1085
+ Vectorized()
1086
+ : VectorizedQuantizedConverter<
1087
+ c10::qint8,
1088
+ std::array<Vectorized<float>, 4>,
1089
+ std::array<Vectorized<c10::qint32>, 4>,
1090
+ 64>() {}
1091
+ Vectorized(c10::qint8 val)
1092
+ : VectorizedQuantizedConverter<
1093
+ c10::qint8,
1094
+ std::array<Vectorized<float>, 4>,
1095
+ std::array<Vectorized<c10::qint32>, 4>,
1096
+ 64>(val) {}
1097
+ Vectorized(const void* ptr)
1098
+ : VectorizedQuantizedConverter<
1099
+ c10::qint8,
1100
+ std::array<Vectorized<float>, 4>,
1101
+ std::array<Vectorized<c10::qint32>, 4>,
1102
+ 64>(ptr) {}
1103
+
1104
+ static Vectorized<c10::qint8> loadu(const void* ptr) {
1105
+ return Vectorized<c10::qint8>(ptr);
1106
+ }
1107
+
1108
+ static Vectorized<c10::qint8> loadu(const void* ptr, int64_t count) {
1109
+ __at_align__ value_type tmp_values[size()];
1110
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
1111
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
1112
+ // instructions while a loop would be compiled to one instruction.
1113
+ for (const auto i : c10::irange(size())) {
1114
+ tmp_values[i] = 0;
1115
+ }
1116
+ std::memcpy(tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
1117
+ return loadu(tmp_values);
1118
+ }
1119
+
1120
+ static Vectorized<c10::qint8> quantize(
1121
+ const float_vec_return_type& rhs,
1122
+ float scale,
1123
+ int32_t zero_point,
1124
+ float inverse_scale) {
1125
+ std::array<value_type, size()> qvals;
1126
+ std::array<float, float_num_vecs() * 16> float_vals;
1127
+
1128
+ for (const auto i : c10::irange(float_num_vecs())) {
1129
+ rhs[i].store(&float_vals[i * 16], 16);
1130
+ }
1131
+
1132
+ at::native::quantize_vec<c10::qint8>(
1133
+ scale,
1134
+ zero_point,
1135
+ float_vals.data(),
1136
+ (c10::qint8*)qvals.data(),
1137
+ 16 * float_num_vecs());
1138
+
1139
+ return Vectorized<c10::qint8>::loadu(qvals.data());
1140
+ }
1141
+
1142
+ Vectorized<c10::qint8> maximum(Vectorized<c10::qint8> b) const {
1143
+ Vectorized<c10::qint8> retval;
1144
+ for (const auto i : c10::irange(size())) {
1145
+ retval.vals[i] = std::max<value_type>(vals[i], b.vals[i]);
1146
+ }
1147
+ return retval;
1148
+ }
1149
+
1150
+ Vectorized<c10::qint8> minimum(Vectorized<c10::qint8> b) const {
1151
+ Vectorized<c10::qint8> retval;
1152
+ for (const auto i : c10::irange(size())) {
1153
+ retval.vals[i] = std::min<value_type>(vals[i], b.vals[i]);
1154
+ }
1155
+ return retval;
1156
+ }
1157
+
1158
+ Vectorized<c10::qint8> relu(Vectorized<c10::qint8> zero_point) const {
1159
+ return maximum(zero_point);
1160
+ }
1161
+
1162
+ Vectorized<c10::qint8> relu6(
1163
+ Vectorized<c10::qint8> zero_point,
1164
+ Vectorized<c10::qint8> q_six) {
1165
+ Vectorized<c10::qint8> retval;
1166
+ for (const auto i : c10::irange(size())) {
1167
+ retval.vals[i] = std::min<value_type>(
1168
+ std::max<value_type>(vals[i], zero_point.vals[i]), q_six.vals[i]);
1169
+ }
1170
+ return retval;
1171
+ }
1172
+
1173
+ int_vec_return_type widening_subtract(Vectorized<c10::qint8> b) const {
1174
+ int_vec_return_type retval;
1175
+ constexpr int elem_per_int_vec = size() / int_num_vecs();
1176
+ for (const auto i : c10::irange(int_num_vecs())) {
1177
+ for (const auto j : c10::irange(elem_per_int_vec)) {
1178
+ retval[i].vals[j] =
1179
+ static_cast<int32_t>(vals[i * elem_per_int_vec + j]) -
1180
+ static_cast<int32_t>(b.vals[i * elem_per_int_vec + j]);
1181
+ }
1182
+ }
1183
+ return retval;
1184
+ }
1185
+ static Vectorized<c10::qint8> requantize_from_int(
1186
+ const int_vec_return_type& inp,
1187
+ float multiplier,
1188
+ int32_t zero_point) {
1189
+ constexpr int elem_per_int_vec = size() / int_num_vecs();
1190
+ constexpr auto min_val = std::numeric_limits<value_type>::min();
1191
+ constexpr auto max_val = std::numeric_limits<value_type>::max();
1192
+ Vectorized<c10::qint8> retval;
1193
+ for (const auto i : c10::irange(int_num_vecs())) {
1194
+ for (const auto j : c10::irange(elem_per_int_vec)) {
1195
+ int32_t rounded =
1196
+ std::nearbyint(static_cast<float>(inp[i].vals[j]) * multiplier) +
1197
+ zero_point;
1198
+ retval.vals[i * elem_per_int_vec + j] =
1199
+ std::min<int32_t>(std::max<int32_t>(rounded, min_val), max_val);
1200
+ }
1201
+ }
1202
+ return retval;
1203
+ }
1204
+ };
1205
+
1206
+ template <>
1207
+ Vectorized<c10::qint8> inline maximum(const Vectorized<c10::qint8>& a, const Vectorized<c10::qint8>& b) {
1208
+ return a.maximum(b);
1209
+ }
1210
+
1211
+ template <>
1212
+ struct Vectorized<c10::quint8> : public VectorizedQuantizedConverter<
1213
+ c10::quint8,
1214
+ std::array<Vectorized<float>, 4>,
1215
+ std::array<Vectorized<c10::qint32>, 4>,
1216
+ 64> {
1217
+ Vectorized()
1218
+ : VectorizedQuantizedConverter<
1219
+ c10::quint8,
1220
+ std::array<Vectorized<float>, 4>,
1221
+ std::array<Vectorized<c10::qint32>, 4>,
1222
+ 64>() {}
1223
+ Vectorized(c10::quint8 val)
1224
+ : VectorizedQuantizedConverter<
1225
+ c10::quint8,
1226
+ std::array<Vectorized<float>, 4>,
1227
+ std::array<Vectorized<c10::qint32>, 4>,
1228
+ 64>(val) {}
1229
+ Vectorized(const void* ptr)
1230
+ : VectorizedQuantizedConverter<
1231
+ c10::quint8,
1232
+ std::array<Vectorized<float>, 4>,
1233
+ std::array<Vectorized<c10::qint32>, 4>,
1234
+ 64>(ptr) {}
1235
+
1236
+ static Vectorized<c10::quint8> loadu(const void* ptr) {
1237
+ return Vectorized<c10::quint8>(ptr);
1238
+ }
1239
+
1240
+ static Vectorized<c10::quint8> loadu(const void* ptr, int64_t count) {
1241
+ __at_align__ value_type tmp_values[size()];
1242
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
1243
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
1244
+ // instructions while a loop would be compiled to one instruction.
1245
+ for (const auto i : c10::irange(size())) {
1246
+ tmp_values[i] = 0;
1247
+ }
1248
+ std::memcpy(tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
1249
+ return loadu(tmp_values);
1250
+ }
1251
+
1252
+ static Vectorized<c10::quint8> quantize(
1253
+ const float_vec_return_type& rhs,
1254
+ float scale,
1255
+ int32_t zero_point,
1256
+ float inverse_scale) {
1257
+ std::array<value_type, size()> qvals;
1258
+ std::array<float, float_num_vecs() * 16> float_vals;
1259
+
1260
+ for (const auto i : c10::irange(float_num_vecs())) {
1261
+ rhs[i].store(&float_vals[i * 16], 16);
1262
+ }
1263
+
1264
+ at::native::quantize_vec<c10::quint8>(
1265
+ scale,
1266
+ zero_point,
1267
+ float_vals.data(),
1268
+ (c10::quint8*)qvals.data(),
1269
+ 16 * float_num_vecs());
1270
+
1271
+ return Vectorized<c10::quint8>::loadu(qvals.data());
1272
+ }
1273
+
1274
+ Vectorized<c10::quint8> maximum(Vectorized<c10::quint8> b) const {
1275
+ Vectorized<c10::quint8> retval;
1276
+ for (const auto i : c10::irange(size())) {
1277
+ retval.vals[i] = std::max<value_type>(vals[i], b.vals[i]);
1278
+ }
1279
+ return retval;
1280
+ }
1281
+
1282
+ Vectorized<c10::quint8> minimum(Vectorized<c10::quint8> b) const {
1283
+ Vectorized<c10::quint8> retval;
1284
+ for (const auto i : c10::irange(size())) {
1285
+ retval.vals[i] = std::min<value_type>(vals[i], b.vals[i]);
1286
+ }
1287
+ return retval;
1288
+ }
1289
+
1290
+ Vectorized<c10::quint8> relu(Vectorized<c10::quint8> zero_point) const {
1291
+ return maximum(zero_point);
1292
+ }
1293
+
1294
+
1295
+ Vectorized<c10::quint8> relu6(
1296
+ Vectorized<c10::quint8> zero_point,
1297
+ Vectorized<c10::quint8> q_six) {
1298
+ Vectorized<c10::quint8> retval;
1299
+ for (const auto i : c10::irange(size())) {
1300
+ retval.vals[i] = std::min<value_type>(
1301
+ std::max<value_type>(vals[i], zero_point.vals[i]), q_six.vals[i]);
1302
+ }
1303
+ return retval;
1304
+ }
1305
+
1306
+ int_vec_return_type widening_subtract(Vectorized<c10::quint8> b) const {
1307
+ int_vec_return_type retval;
1308
+ constexpr int elem_per_int_vec = size() / int_num_vecs();
1309
+ for (const auto i : c10::irange(int_num_vecs())) {
1310
+ for (const auto j : c10::irange(elem_per_int_vec)) {
1311
+ retval[i].vals[j] =
1312
+ static_cast<int32_t>(vals[i * elem_per_int_vec + j]) -
1313
+ static_cast<int32_t>(b.vals[i * elem_per_int_vec + j]);
1314
+ }
1315
+ }
1316
+ return retval;
1317
+ }
1318
+ static Vectorized<c10::quint8> requantize_from_int(
1319
+ const int_vec_return_type& inp,
1320
+ float multiplier,
1321
+ int32_t zero_point) {
1322
+ constexpr int elem_per_int_vec = size() / int_num_vecs();
1323
+ constexpr auto min_val = std::numeric_limits<value_type>::min();
1324
+ constexpr auto max_val = std::numeric_limits<value_type>::max();
1325
+ Vectorized<c10::quint8> retval;
1326
+ for (const auto i : c10::irange(int_num_vecs())) {
1327
+ for (const auto j : c10::irange(elem_per_int_vec)) {
1328
+ int32_t rounded =
1329
+ std::nearbyint(static_cast<float>(inp[i].vals[j]) * multiplier) +
1330
+ zero_point;
1331
+ retval.vals[i * elem_per_int_vec + j] =
1332
+ std::min<int32_t>(std::max<int32_t>(rounded, min_val), max_val);
1333
+ }
1334
+ }
1335
+ return retval;
1336
+ }
1337
+ };
1338
+
1339
+ template <>
1340
+ Vectorized<c10::quint8> inline maximum(const Vectorized<c10::quint8>& a, const Vectorized<c10::quint8>& b) {
1341
+ return a.maximum(b);
1342
+ }
1343
+
1344
+ #endif // defined(CPU_CAPABILITY_AVX512) && !defined(MSVC)
1345
+
1346
+ }}}
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_base.h ADDED
@@ -0,0 +1,1108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+ //
6
+ // Note [Do not compile initializers with AVX]
7
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8
+ // If you define a static initializer in this file, the initialization will use
9
+ // AVX instructions because these object files are compiled with AVX enabled.
10
+ // We need to avoid non-trivial global data in these architecture specific files
11
+ // because there's no way to guard the global initializers with CPU capability
12
+ // detection.
13
+ //
14
+ // See https://github.com/pytorch/pytorch/issues/37577 for an instance
15
+ // of this bug in the past.
16
+
17
+ #include <array>
18
+ #include <algorithm>
19
+ #include <cassert>
20
+ #include <cstring>
21
+ #include <functional>
22
+ #include <cmath>
23
+ #include <type_traits>
24
+ #include <climits>
25
+
26
+ #include <ATen/cpu/vec/intrinsics.h>
27
+ #include <ATen/native/Math.h>
28
+ #include <ATen/NumericUtils.h>
29
+ #include <c10/util/Half.h>
30
+ #include <c10/util/BFloat16.h>
31
+ #include <c10/util/BFloat16-math.h>
32
+ #include <c10/util/copysign.h>
33
+ #include <ATen/native/cpu/zmath.h>
34
+ #include <c10/util/TypeCast.h>
35
+ #include <c10/macros/Macros.h>
36
+ #include <c10/util/irange.h>
37
+ #include <c10/util/Load.h>
38
+
39
+ // These macros helped us unify vec_base.h
40
+ #ifdef CPU_CAPABILITY_AVX512
41
+ #if defined(__GNUC__)
42
+ #define __at_align__ __attribute__((aligned(64)))
43
+ #elif defined(_WIN32)
44
+ #define __at_align__ __declspec(align(64))
45
+ #else
46
+ #define __at_align__
47
+ #endif
48
+ #define VECTOR_WIDTH 64
49
+ #define int_vector __m512i
50
+ #else // CPU_CAPABILITY_AVX512
51
+ #if defined(__GNUC__)
52
+ #define __at_align__ __attribute__((aligned(32)))
53
+ #elif defined(_WIN32)
54
+ #define __at_align__ __declspec(align(32))
55
+ #else
56
+ #define __at_align__
57
+ #endif
58
+ #define VECTOR_WIDTH 32
59
+ #define int_vector __m256i
60
+ #endif // CPU_CAPABILITY_AVX512
61
+
62
+ namespace at::vec {
63
+ // See Note [CPU_CAPABILITY namespace]
64
+ inline namespace CPU_CAPABILITY {
65
+ // at::Half and at::BFloat16 should be treated as floating point
66
+ template <typename T>
67
+ struct is_floating_point:
68
+ std::integral_constant<bool,
69
+ std::is_floating_point<T>::value ||
70
+ std::is_same<T, at::Half>::value ||
71
+ std::is_same<T, at::BFloat16>::value> {
72
+ };
73
+
74
+ template<typename T>
75
+ constexpr bool is_floating_point_v = is_floating_point<T>::value;
76
+
77
+ template <typename T>
78
+ struct is_reduced_floating_point:
79
+ std::integral_constant<bool,
80
+ std::is_same<T, at::Half>::value ||
81
+ std::is_same<T, at::BFloat16>::value> {
82
+ };
83
+
84
+ template <typename T>
85
+ constexpr bool is_reduced_floating_point_v = is_reduced_floating_point<T>::value;
86
+
87
+ template<size_t n> struct int_of_size;
88
+
89
+ #define DEFINE_INT_OF_SIZE(int_t) \
90
+ template<> struct int_of_size<sizeof(int_t)> { using type = int_t; }
91
+
92
+ DEFINE_INT_OF_SIZE(int64_t);
93
+ DEFINE_INT_OF_SIZE(int32_t);
94
+ DEFINE_INT_OF_SIZE(int16_t);
95
+ DEFINE_INT_OF_SIZE(int8_t);
96
+
97
+ #undef DEFINE_INT_OF_SIZE
98
+
99
+ template <typename T>
100
+ using int_same_size_t = typename int_of_size<sizeof(T)>::type;
101
+
102
+ // NOTE: If you specialize on a type, you must define all operations!
103
+
104
+ // emulates Vectorized types
105
+ #if defined(__s390x__)
106
+ template <class T, class TEMP=void>
107
+ #else
108
+ template <class T>
109
+ #endif
110
+ struct Vectorized {
111
+ private:
112
+ __at_align__ T values[VECTOR_WIDTH / sizeof(T)];
113
+ public:
114
+ using value_type = T;
115
+ using size_type = int;
116
+ // Note [constexpr static function to avoid odr-usage compiler bug]
117
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
118
+ // Why, you might ask, is size defined to be a static constexpr function,
119
+ // rather than a more ordinary 'static constexpr int size;' variable?
120
+ // The problem lies within ODR rules for static constexpr members versus
121
+ // static constexpr functions. First, recall that this class (along with all
122
+ // of its derivations) live in an anonymous namespace: they are intended to be
123
+ // *completely* inlined at their use-sites, because we need to compile it
124
+ // multiple times for different instruction sets.
125
+ //
126
+ // Because of this constraint, we CANNOT provide a single definition for
127
+ // any static members in this class; since we want to compile the class
128
+ // multiple times, there wouldn't actually be any good place to put the
129
+ // definition. Now here is the problem: if we ODR-use a static constexpr
130
+ // member, we are *obligated* to provide a definition. Without the
131
+ // definition, you get a compile error like:
132
+ //
133
+ // relocation R_X86_64_PC32 against undefined symbol
134
+ // `_ZN2at6vec25612_GLOBAL__N_16VectorizedIdE4sizeE' can not be used when making
135
+ // a shared object; recompile with -fPIC
136
+ //
137
+ // If this were C++17, we could replace a static constexpr variable with
138
+ // an inline variable which doesn't require one definition. But we are not
139
+ // C++17. So the next best thing is to replace the member with a static
140
+ // constexpr (and therefore inline) function, which does not require ODR
141
+ // either.
142
+ //
143
+ // Also, technically according to the C++ standard, we don't have to define
144
+ // a constexpr variable if we never odr-use it. But it seems that some
145
+ // versions GCC/Clang have buggy determinations on whether or not an
146
+ // identifier is odr-used or not, and in any case it's hard to tell if
147
+ // a variable is odr-used or not. So best to just cut the problem at the root.
148
+ static constexpr size_type size() {
149
+ return VECTOR_WIDTH / sizeof(T);
150
+ }
151
+ Vectorized() : values{static_cast<T>(0)} {}
152
+ Vectorized(T val) {
153
+ for (int i = 0; i != size(); i++) {
154
+ values[i] = val;
155
+ }
156
+ }
157
+ template<typename... Args,
158
+ typename = std::enable_if_t<(sizeof...(Args) == size())>>
159
+ Vectorized(Args... vals) : values{vals...}{
160
+ }
161
+ // This also implies const T& operator[](int idx) const
162
+ inline operator const T*() const {
163
+ return values;
164
+ }
165
+ // This also implies T& operator[](int idx)
166
+ inline operator T*() {
167
+ return values;
168
+ }
169
+ // Return the values as char* for type punning
170
+ auto as_bytes() const -> const char* {
171
+ return reinterpret_cast<const char*>(values);
172
+ }
173
+ template <int64_t mask_>
174
+ static Vectorized<T> blend(const Vectorized<T>& a, const Vectorized<T>& b) {
175
+ int64_t mask = mask_;
176
+ Vectorized vector;
177
+ for (const auto i : c10::irange(size())) {
178
+ if (mask & 0x01) {
179
+ vector[i] = b[i];
180
+ } else {
181
+ vector[i] = a[i];
182
+ }
183
+ mask = mask >> 1;
184
+ }
185
+ return vector;
186
+ }
187
+ static Vectorized<T> blendv(const Vectorized<T>& a, const Vectorized<T>& b,
188
+ const Vectorized<T>& mask) {
189
+ Vectorized vector;
190
+ int_same_size_t<T> buffer[size()];
191
+ mask.store(buffer);
192
+ for (const auto i : c10::irange(size())) {
193
+ if (buffer[i] & 0x01)
194
+ {
195
+ vector[i] = b[i];
196
+ } else {
197
+ vector[i] = a[i];
198
+ }
199
+ }
200
+ return vector;
201
+ }
202
+ template<typename step_t> // step sometimes requires a higher precision type (e.g., T=int, step_t=double)
203
+ static Vectorized<T> arange(T base = static_cast<T>(0), step_t step = static_cast<step_t>(1)) {
204
+ Vectorized vector;
205
+ for (const auto i : c10::irange(size())) {
206
+ vector.values[i] = base + i * step;
207
+ }
208
+ return vector;
209
+ }
210
+ static Vectorized<T> set(const Vectorized<T>& a, const Vectorized<T>& b, int64_t count = size()) {
211
+ Vectorized vector;
212
+ for (const auto i : c10::irange(size())) {
213
+ if (i < count) {
214
+ vector[i] = b[i];
215
+ } else {
216
+ vector[i] = a[i];
217
+ }
218
+ }
219
+ return vector;
220
+ }
221
+ static Vectorized<T> loadu(const void* ptr) {
222
+ Vectorized vector;
223
+ std::memcpy(vector.values, ptr, VECTOR_WIDTH);
224
+ return vector;
225
+ }
226
+ static Vectorized<T> loadu(const void* ptr, int64_t count) {
227
+ Vectorized vector;
228
+ std::memcpy(vector.values, ptr, count * sizeof(T));
229
+ return vector;
230
+ }
231
+ void store(void* ptr, int count = size()) const {
232
+ std::memcpy(ptr, values, count * sizeof(T));
233
+ }
234
+ int zero_mask() const {
235
+ // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit
236
+ int mask = 0;
237
+ for (int i = 0; i < size(); ++ i) {
238
+ if (values[i] == static_cast<T>(0)) {
239
+ mask |= (1 << i);
240
+ }
241
+ }
242
+ return mask;
243
+ }
244
+ Vectorized<T> isnan() const {
245
+ Vectorized<T> vector;
246
+ for (int64_t i = 0; i != size(); i++) {
247
+ if (_isnan(values[i])) {
248
+ std::memset(static_cast<void*>(vector.values + i), 0xFF, sizeof(T));
249
+ } else {
250
+ std::memset(static_cast<void*>(vector.values + i), 0, sizeof(T));
251
+ }
252
+ }
253
+ return vector;
254
+ }
255
+ bool has_inf_nan() const {
256
+ for (int64_t i = 0; i != size(); i++) {
257
+ if(_isnan(values[i]) || _isinf(values[i])) {
258
+ return true;
259
+ }
260
+ }
261
+ return false;
262
+ }
263
+ Vectorized<T> map(T (*const f)(T)) const {
264
+ Vectorized<T> ret;
265
+ for (int64_t i = 0; i != size(); i++) {
266
+ ret[i] = f(values[i]);
267
+ }
268
+ return ret;
269
+ }
270
+ Vectorized<T> map(T (*const f)(const T &)) const {
271
+ Vectorized<T> ret;
272
+ for (int64_t i = 0; i != size(); i++) {
273
+ ret[i] = f(values[i]);
274
+ }
275
+ return ret;
276
+ }
277
+ template <typename other_t_abs = T,
278
+ typename std::enable_if<!is_floating_point_v<other_t_abs> && !c10::is_complex<other_t_abs>::value, int>::type = 0>
279
+ Vectorized<T> abs() const {
280
+ // other_t_abs is for SFINAE and clarity. Make sure it is not changed.
281
+ static_assert(std::is_same<other_t_abs, T>::value, "other_t_abs must be T");
282
+ return map([](T x) -> T { return x < static_cast<T>(0) ? -x : x; });
283
+ }
284
+ template <typename float_t_abs = T,
285
+ typename std::enable_if<is_floating_point_v<float_t_abs>, int>::type = 0>
286
+ Vectorized<T> abs() const {
287
+ // float_t_abs is for SFINAE and clarity. Make sure it is not changed.
288
+ static_assert(std::is_same<float_t_abs, T>::value, "float_t_abs must be T");
289
+ // Specifically deal with floating-point because the generic code above won't handle -0.0 (which should result in
290
+ // 0.0) properly.
291
+ return map([](T x) -> T { return std::abs(x); });
292
+ }
293
+ template <typename complex_t_abs = T,
294
+ typename std::enable_if<c10::is_complex<complex_t_abs>::value, int>::type = 0>
295
+ Vectorized<T> abs() const {
296
+ // complex_t_abs is for SFINAE and clarity. Make sure it is not changed.
297
+ static_assert(std::is_same<complex_t_abs, T>::value, "complex_t_abs must be T");
298
+ // Specifically map() does not perform the type conversion needed by abs.
299
+ return map([](T x) { return static_cast<T>(std::abs(x)); });
300
+ }
301
+
302
+ template <typename other_t_sgn = T,
303
+ typename std::enable_if<c10::is_complex<other_t_sgn>::value, int>::type = 0>
304
+ Vectorized<T> sgn() const {
305
+ return map(at::native::sgn_impl);
306
+ }
307
+
308
+ template <typename other_t_angle = T,
309
+ typename std::enable_if<!c10::is_complex<other_t_angle>::value, int>::type = 0>
310
+ Vectorized<T> angle() const {
311
+ // other_t_angle is for SFINAE and clarity. Make sure it is not changed.
312
+ static_assert(std::is_same<other_t_angle, T>::value, "other_t_angle must be T");
313
+ return map(at::native::angle_impl<T>); // compiler is unable to resolve the overload without <T>
314
+ }
315
+ template <typename complex_t_angle = T,
316
+ typename std::enable_if<c10::is_complex<complex_t_angle>::value, int>::type = 0>
317
+ Vectorized<T> angle() const {
318
+ // complex_t_angle is for SFINAE and clarity. Make sure it is not changed.
319
+ static_assert(std::is_same<complex_t_angle, T>::value, "complex_t_angle must be T");
320
+ return map([](T x) { return static_cast<T>(std::arg(x)); });
321
+ }
322
+ template <typename other_t_real = T,
323
+ typename std::enable_if<!c10::is_complex<other_t_real>::value, int>::type = 0>
324
+ Vectorized<T> real() const {
325
+ // other_t_real is for SFINAE and clarity. Make sure it is not changed.
326
+ static_assert(std::is_same<other_t_real, T>::value, "other_t_real must be T");
327
+ return *this;
328
+ }
329
+ template <typename complex_t_real = T,
330
+ typename std::enable_if<c10::is_complex<complex_t_real>::value, int>::type = 0>
331
+ Vectorized<T> real() const {
332
+ // complex_t_real is for SFINAE and clarity. Make sure it is not changed.
333
+ static_assert(std::is_same<complex_t_real, T>::value, "complex_t_real must be T");
334
+ return map([](T x) { return static_cast<T>(x.real()); });
335
+ }
336
+ template <typename other_t_imag = T,
337
+ typename std::enable_if<!c10::is_complex<other_t_imag>::value, int>::type = 0>
338
+ Vectorized<T> imag() const {
339
+ // other_t_imag is for SFINAE and clarity. Make sure it is not changed.
340
+ static_assert(std::is_same<other_t_imag, T>::value, "other_t_imag must be T");
341
+ return Vectorized(0);
342
+ }
343
+ template <typename complex_t_imag = T,
344
+ typename std::enable_if<c10::is_complex<complex_t_imag>::value, int>::type = 0>
345
+ Vectorized<T> imag() const {
346
+ // complex_t_imag is for SFINAE and clarity. Make sure it is not changed.
347
+ static_assert(std::is_same<complex_t_imag, T>::value, "complex_t_imag must be T");
348
+ return map([](T x) { return static_cast<T>(x.imag()); });
349
+ }
350
+ template <typename other_t_conj = T,
351
+ typename std::enable_if<!c10::is_complex<other_t_conj>::value, int>::type = 0>
352
+ Vectorized<T> conj() const {
353
+ // other_t_conj is for SFINAE and clarity. Make sure it is not changed.
354
+ static_assert(std::is_same<other_t_conj, T>::value, "other_t_conj must be T");
355
+ return *this;
356
+ }
357
+ template <typename complex_t_conj = T,
358
+ typename std::enable_if<c10::is_complex<complex_t_conj>::value, int>::type = 0>
359
+ Vectorized<T> conj() const {
360
+ // complex_t_conj is for SFINAE and clarity. Make sure it is not changed.
361
+ static_assert(std::is_same<complex_t_conj, T>::value, "complex_t_conj must be T");
362
+ return map([](T x) { return static_cast<T>(std::conj(x)); });
363
+ }
364
+ Vectorized<T> acos() const {
365
+ return map(std::acos);
366
+ }
367
+ Vectorized<T> acosh() const {
368
+ return map(std::acosh);
369
+ }
370
+ Vectorized<T> asin() const {
371
+ return map(std::asin);
372
+ }
373
+ Vectorized<T> atan() const {
374
+ return map(std::atan);
375
+ }
376
+ Vectorized<T> atanh() const {
377
+ return map(std::atanh);
378
+ }
379
+ Vectorized<T> atan2(const Vectorized<T> &exp) const {
380
+ Vectorized<T> ret;
381
+ for (const auto i : c10::irange(size())) {
382
+ ret[i] = std::atan2(values[i], exp[i]);
383
+ }
384
+ return ret;
385
+ }
386
+ template <
387
+ typename U = T,
388
+ typename std::enable_if_t<is_floating_point_v<U>, int> = 0>
389
+ Vectorized<T> copysign(const Vectorized<T> &sign) const {
390
+ Vectorized<T> ret;
391
+ for (size_type i = 0; i < size(); i++) {
392
+ ret[i] = c10::copysign(values[i], sign[i]);
393
+ }
394
+ return ret;
395
+ }
396
+ Vectorized<T> erf() const {
397
+ return map(std::erf);
398
+ }
399
+ Vectorized<T> erfc() const {
400
+ return map(std::erfc);
401
+ }
402
+ Vectorized<T> erfinv() const {
403
+ return map(calc_erfinv);
404
+ }
405
+ Vectorized<T> exp() const {
406
+ return map(std::exp);
407
+ }
408
+ Vectorized<T> exp2() const {
409
+ return map(exp2_impl);
410
+ }
411
+ Vectorized<T> expm1() const {
412
+ return map(std::expm1);
413
+ }
414
+ Vectorized<T> exp_u20() const {
415
+ return map(std::exp);
416
+ }
417
+ Vectorized<T> frac() const {
418
+ return *this - this->trunc();
419
+ }
420
+ template <
421
+ typename U = T,
422
+ typename std::enable_if_t<is_floating_point_v<U>, int> = 0>
423
+ Vectorized<T> fmod(const Vectorized<T>& q) const {
424
+ // U is for SFINAE purposes only. Make sure it is not changed.
425
+ static_assert(std::is_same<U, T>::value, "U must be T");
426
+ Vectorized<T> ret;
427
+ for (const auto i : c10::irange(size())) {
428
+ ret[i] = std::fmod(values[i], q[i]);
429
+ }
430
+ return ret;
431
+ }
432
+ Vectorized<T> log() const {
433
+ return map(std::log);
434
+ }
435
+ Vectorized<T> log10() const {
436
+ return map(std::log10);
437
+ }
438
+ Vectorized<T> log1p() const {
439
+ return map(std::log1p);
440
+ }
441
+ template <typename other_t_log2 = T,
442
+ typename std::enable_if<!c10::is_complex<other_t_log2>::value, int>::type = 0>
443
+ Vectorized<T> log2() const {
444
+ // other_t_log2 is for SFINAE and clarity. Make sure it is not changed.
445
+ static_assert(std::is_same<other_t_log2, T>::value, "other_t_log2 must be T");
446
+ return map(std::log2);
447
+ }
448
+ template <typename complex_t_log2 = T,
449
+ typename std::enable_if<c10::is_complex<complex_t_log2>::value, int>::type = 0>
450
+ Vectorized<T> log2() const {
451
+ // complex_t_log2 is for SFINAE and clarity. Make sure it is not changed.
452
+ static_assert(std::is_same<complex_t_log2, T>::value, "complex_t_log2 must be T");
453
+ const T log_2 = T(std::log(2.0));
454
+ return Vectorized(map(std::log))/Vectorized(log_2);
455
+ }
456
+ Vectorized<T> ceil() const {
457
+ return map(at::native::ceil_impl);
458
+ }
459
+ Vectorized<T> cos() const {
460
+ return map(std::cos);
461
+ }
462
+ Vectorized<T> cosh() const {
463
+ return map(std::cosh);
464
+ }
465
+ Vectorized<T> floor() const {
466
+ return map(at::native::floor_impl);
467
+ }
468
+ Vectorized<T> hypot(const Vectorized<T> &b) const {
469
+ Vectorized<T> ret;
470
+ for (const auto i : c10::irange(size())) {
471
+ ret[i] = std::hypot(values[i], b[i]);
472
+ }
473
+ return ret;
474
+ }
475
+ Vectorized<T> i0() const {
476
+ return map(calc_i0);
477
+ }
478
+ Vectorized<T> i0e() const {
479
+ return map(calc_i0e);
480
+ }
481
+ Vectorized<T> digamma() const {
482
+ return map(calc_digamma);
483
+ }
484
+ Vectorized<T> igamma(const Vectorized<T> &x) const {
485
+ Vectorized<T> ret;
486
+ for (const auto i : c10::irange(size())) {
487
+ ret[i] = calc_igamma(values[i], x[i]);
488
+ }
489
+ return ret;
490
+ }
491
+ Vectorized<T> igammac(const Vectorized<T> &x) const {
492
+ Vectorized<T> ret;
493
+ for (const auto i : c10::irange(size())) {
494
+ ret[i] = calc_igammac(values[i], x[i]);
495
+ }
496
+ return ret;
497
+ }
498
+ Vectorized<T> neg() const {
499
+ // NB: the trailing return type is needed because we need to coerce the
500
+ // return value back to T in the case of unary operator- incuring a
501
+ // promotion
502
+ return map([](T x) -> T { return -x; });
503
+ }
504
+ Vectorized<T> nextafter(const Vectorized<T> &b) const {
505
+ Vectorized<T> ret;
506
+ for (const auto i : c10::irange(size())) {
507
+ ret[i] = std::nextafter(values[i], b[i]);
508
+ }
509
+ return ret;
510
+ }
511
+ Vectorized<T> round() const {
512
+ // We do not use std::round because we would like to round midway numbers to the nearest even integer.
513
+ return map(at::native::round_impl);
514
+ }
515
+ Vectorized<T> sin() const {
516
+ return map(std::sin);
517
+ }
518
+ Vectorized<T> sinh() const {
519
+ return map(std::sinh);
520
+ }
521
+ Vectorized<T> tan() const {
522
+ return map(std::tan);
523
+ }
524
+ Vectorized<T> tanh() const {
525
+ return map(std::tanh);
526
+ }
527
+ Vectorized<T> trunc() const {
528
+ return map(at::native::trunc_impl);
529
+ }
530
+ Vectorized<T> lgamma() const {
531
+ return map(std::lgamma);
532
+ }
533
+ Vectorized<T> sqrt() const {
534
+ return map(std::sqrt);
535
+ }
536
+ Vectorized<T> reciprocal() const {
537
+ return map([](T x) { return (T)(1) / x; });
538
+ }
539
+ Vectorized<T> rsqrt() const {
540
+ return map([](T x) { return (T)1 / std::sqrt(x); });
541
+ }
542
+ Vectorized<T> pow(const Vectorized<T> &exp) const {
543
+ Vectorized<T> ret;
544
+ for (const auto i : c10::irange(size())) {
545
+ ret[i] = std::pow(values[i], exp[i]);
546
+ }
547
+ return ret;
548
+ }
549
+ private:
550
+ template <typename Op>
551
+ inline Vectorized<T> binary_pred(const Vectorized<T>& other, Op op) const {
552
+ // All bits are set to 1 if the pred is true, otherwise 0.
553
+ Vectorized<T> vector;
554
+ for (int64_t i = 0; i != size(); i++) {
555
+ if (op(values[i], other.values[i])) {
556
+ std::memset(static_cast<void*>(vector.values + i), 0xFF, sizeof(T));
557
+ } else {
558
+ std::memset(static_cast<void*>(vector.values + i), 0, sizeof(T));
559
+ }
560
+ }
561
+ return vector;
562
+ }
563
+
564
+ public:
565
+ Vectorized<T> operator==(const Vectorized<T>& other) const { return binary_pred(other, std::equal_to<T>()); }
566
+ Vectorized<T> operator!=(const Vectorized<T>& other) const { return binary_pred(other, std::not_equal_to<T>()); }
567
+ Vectorized<T> operator>=(const Vectorized<T>& other) const { return binary_pred(other, std::greater_equal<T>()); }
568
+ Vectorized<T> operator<=(const Vectorized<T>& other) const { return binary_pred(other, std::less_equal<T>()); }
569
+ Vectorized<T> operator>(const Vectorized<T>& other) const { return binary_pred(other, std::greater<T>()); }
570
+ Vectorized<T> operator<(const Vectorized<T>& other) const { return binary_pred(other, std::less<T>()); }
571
+
572
+ private:
573
+ template <typename Op>
574
+ inline Vectorized<T> binary_pred_bool(const Vectorized<T>& other, Op op) const {
575
+ // 1 if the pred is true, otherwise 0.
576
+ Vectorized<T> vector;
577
+ for (int i = 0; i != size(); ++ i) {
578
+ vector[i] = static_cast<T>(op(values[i], other.values[i]));
579
+ }
580
+ return vector;
581
+ }
582
+
583
+ public:
584
+ Vectorized<T> eq(const Vectorized<T>& other) const { return binary_pred_bool(other, std::equal_to<T>()); }
585
+ Vectorized<T> ne(const Vectorized<T>& other) const { return binary_pred_bool(other, std::not_equal_to<T>()); }
586
+ Vectorized<T> gt(const Vectorized<T>& other) const { return binary_pred_bool(other, std::greater<T>()); }
587
+ Vectorized<T> ge(const Vectorized<T>& other) const { return binary_pred_bool(other, std::greater_equal<T>()); }
588
+ Vectorized<T> lt(const Vectorized<T>& other) const { return binary_pred_bool(other, std::less<T>()); }
589
+ Vectorized<T> le(const Vectorized<T>& other) const { return binary_pred_bool(other, std::less_equal<T>()); }
590
+ };
591
+
592
+ template <class T> Vectorized<T> inline operator+(const Vectorized<T> &a, const Vectorized<T> &b) {
593
+ Vectorized<T> c;
594
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
595
+ c[i] = a[i] + b[i];
596
+ }
597
+ return c;
598
+ }
599
+
600
+ template <class T> Vectorized<T> inline operator-(const Vectorized<T> &a, const Vectorized<T> &b) {
601
+ Vectorized<T> c;
602
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
603
+ c[i] = a[i] - b[i];
604
+ }
605
+ return c;
606
+ }
607
+
608
+ template <class T> Vectorized<T> inline operator*(const Vectorized<T> &a, const Vectorized<T> &b) {
609
+ Vectorized<T> c;
610
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
611
+ c[i] = a[i] * b[i];
612
+ }
613
+ return c;
614
+ }
615
+
616
+ template <class T> Vectorized<T> inline operator/(const Vectorized<T> &a, const Vectorized<T> &b) __ubsan_ignore_float_divide_by_zero__ {
617
+ Vectorized<T> c;
618
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
619
+ c[i] = a[i] / b[i];
620
+ }
621
+ return c;
622
+ }
623
+
624
+ template <class T,
625
+ typename std::enable_if<!is_floating_point_v<T>, int>::type = 0>
626
+ Vectorized<T> inline operator%(const Vectorized<T> &a, const Vectorized<T> &b) __ubsan_ignore_float_divide_by_zero__ {
627
+ return a - a / b * b;
628
+ }
629
+
630
+ template <class T> Vectorized<T> inline operator||(
631
+ const Vectorized<T> &a, const Vectorized<T> &b) {
632
+ Vectorized<T> c;
633
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
634
+ c[i] = a[i] || b[i];
635
+ }
636
+ return c;
637
+ }
638
+
639
+ // Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
640
+ // either input is a NaN.
641
+ template <class T,
642
+ typename std::enable_if<!c10::is_complex<T>::value, int>::type = 0>
643
+ Vectorized<T> inline maximum(const Vectorized<T> &a, const Vectorized<T> &b) {
644
+ Vectorized<T> c;
645
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
646
+ c[i] = (a[i] > b[i]) ? a[i] : b[i];
647
+ if (_isnan(a[i])) {
648
+ // If either input is NaN, propagate a NaN.
649
+ // NOTE: The case where b[i] was NaN is handled correctly by the naive
650
+ // ternary operator above.
651
+ c[i] = a[i];
652
+ }
653
+ }
654
+ return c;
655
+ }
656
+
657
+ template <class T,
658
+ typename std::enable_if<c10::is_complex<T>::value, int>::type = 0>
659
+ Vectorized<T> inline maximum(const Vectorized<T> &a, const Vectorized<T> &b) {
660
+ Vectorized<T> c;
661
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
662
+ c[i] = (std::abs(a[i]) > std::abs(b[i])) ? a[i] : b[i];
663
+ if (_isnan(a[i])) {
664
+ // If either input is NaN, propagate a NaN.
665
+ // NOTE: The case where b[i] was NaN is handled correctly by the naive
666
+ // ternary operator above.
667
+ c[i] = a[i];
668
+ }
669
+ }
670
+ return c;
671
+ }
672
+
673
+ // Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
674
+ // either input is a NaN.
675
+ template <class T,
676
+ typename std::enable_if<!c10::is_complex<T>::value, int>::type = 0>
677
+ Vectorized<T> inline minimum(const Vectorized<T> &a, const Vectorized<T> &b) {
678
+ Vectorized<T> c;
679
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
680
+ c[i] = (a[i] < b[i]) ? a[i] : b[i];
681
+ if (_isnan(a[i])) {
682
+ // If either input is NaN, propagate a NaN.
683
+ // NOTE: The case where b[i] was NaN is handled correctly by the naive
684
+ // ternary operator above.
685
+ c[i] = a[i];
686
+ }
687
+ }
688
+ return c;
689
+ }
690
+
691
+ template <class T,
692
+ typename std::enable_if<c10::is_complex<T>::value, int>::type = 0>
693
+ Vectorized<T> inline minimum(const Vectorized<T> &a, const Vectorized<T> &b) {
694
+ Vectorized<T> c;
695
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
696
+ c[i] = (std::abs(a[i]) < std::abs(b[i])) ? a[i] : b[i];
697
+ if (_isnan(a[i])) {
698
+ // If either input is NaN, propagate a NaN.
699
+ // NOTE: The case where b[i] was NaN is handled correctly by the naive
700
+ // ternary operator above.
701
+ c[i] = a[i];
702
+ }
703
+ }
704
+ return c;
705
+ }
706
+
707
+ template <class T,
708
+ typename std::enable_if<!c10::is_complex<T>::value, int>::type = 0>
709
+ Vectorized<T> inline clamp(const Vectorized<T> &a, const Vectorized<T> &min_vec, const Vectorized<T> &max_vec) {
710
+ Vectorized<T> c;
711
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
712
+ c[i] = std::min(std::max(a[i], min_vec[i]), max_vec[i]);
713
+ }
714
+ return c;
715
+ }
716
+
717
+ template <class T,
718
+ typename std::enable_if<!c10::is_complex<T>::value, int>::type = 0>
719
+ Vectorized<T> inline clamp_max(const Vectorized<T> &a, const Vectorized<T> &max_vec) {
720
+ Vectorized<T> c;
721
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
722
+ c[i] = a[i] > max_vec[i] ? max_vec[i] : a[i];
723
+ }
724
+ return c;
725
+ }
726
+
727
+ template <class T,
728
+ typename std::enable_if<!c10::is_complex<T>::value, int>::type = 0>
729
+ Vectorized<T> inline clamp_min(const Vectorized<T> &a, const Vectorized<T> &min_vec) {
730
+ Vectorized<T> c;
731
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
732
+ c[i] = a[i] < min_vec[i] ? min_vec[i] : a[i];
733
+ }
734
+ return c;
735
+ }
736
+
737
+ struct Vectorizedi;
738
+
739
+ #if defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_AVX512)
740
+ template <class T, typename Op>
741
+ static inline Vectorized<T> bitwise_binary_op(const Vectorized<T> &a, const Vectorized<T> &b, Op op) {
742
+ int_vector buffer;
743
+ #if defined(CPU_CAPABILITY_AVX2)
744
+ int_vector a_buffer = _mm256_load_si256(reinterpret_cast<const int_vector*>((const T*)a));
745
+ int_vector b_buffer = _mm256_load_si256(reinterpret_cast<const int_vector*>((const T*)b));
746
+ #elif defined(CPU_CAPABILITY_AVX512)
747
+ int_vector a_buffer = _mm512_load_si512(reinterpret_cast<const int_vector*>((const T*)a));
748
+ int_vector b_buffer = _mm512_load_si512(reinterpret_cast<const int_vector*>((const T*)b));
749
+ #endif
750
+ buffer = op(a_buffer, b_buffer);
751
+ __at_align__ T results[Vectorized<T>::size()];
752
+
753
+ #if defined(CPU_CAPABILITY_AVX2)
754
+ _mm256_store_si256(reinterpret_cast<int_vector*>(results), buffer);
755
+ #elif defined(CPU_CAPABILITY_AVX512)
756
+ _mm512_store_si512(reinterpret_cast<int_vector*>(results), buffer);
757
+ #endif
758
+ return Vectorized<T>::loadu(results);
759
+ }
760
+
761
+ template<class T, typename std::enable_if_t<!std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
762
+ inline Vectorized<T> operator&(const Vectorized<T>& a, const Vectorized<T>& b) {
763
+ // We enclose _mm512_and_si512 or _mm256_and_si256 with lambda because it is always_inline
764
+ #if defined(CPU_CAPABILITY_AVX2)
765
+ return bitwise_binary_op(a, b, [](int_vector a, int_vector b) { return _mm256_and_si256(a, b); });
766
+ #elif defined(CPU_CAPABILITY_AVX512)
767
+ return bitwise_binary_op(a, b, [](int_vector a, int_vector b) { return _mm512_and_si512(a, b); });
768
+ #endif
769
+ }
770
+ template<class T, typename std::enable_if_t<!std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
771
+ inline Vectorized<T> operator|(const Vectorized<T>& a, const Vectorized<T>& b) {
772
+ // We enclose _mm512_or_si512 or _mm256_or_si256 with lambda because it is always_inline
773
+ #if defined(CPU_CAPABILITY_AVX2)
774
+ return bitwise_binary_op(a, b, [](int_vector a, int_vector b) { return _mm256_or_si256(a, b); });
775
+ #elif defined(CPU_CAPABILITY_AVX512)
776
+ return bitwise_binary_op(a, b, [](int_vector a, int_vector b) { return _mm512_or_si512(a, b); });
777
+ #endif
778
+ }
779
+ template<class T, typename std::enable_if_t<!std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
780
+ inline Vectorized<T> operator^(const Vectorized<T>& a, const Vectorized<T>& b) {
781
+ // We enclose _mm512_xor_si512 or _mm256_xor_si256 with lambda because it is always_inline
782
+ #if defined(CPU_CAPABILITY_AVX2)
783
+ return bitwise_binary_op(a, b, [](int_vector a, int_vector b) { return _mm256_xor_si256(a, b); });
784
+ #elif defined(CPU_CAPABILITY_AVX512)
785
+ return bitwise_binary_op(a, b, [](int_vector a, int_vector b) { return _mm512_xor_si512(a, b); });
786
+ #endif
787
+ }
788
+
789
+ #else
790
+
791
+ template <typename T>
792
+ auto load(char const* data) -> T {
793
+ T ret;
794
+ std::memcpy(&ret, data, sizeof(ret));
795
+ return ret;
796
+ }
797
+
798
+ template<class T, typename Op>
799
+ static inline Vectorized<T> bitwise_binary_op(const Vectorized<T> &a, const Vectorized<T> &b, Op op) {
800
+ static constexpr uint32_t element_no = VECTOR_WIDTH / sizeof(intmax_t);
801
+ __at_align__ intmax_t buffer[element_no];
802
+ static_assert(VECTOR_WIDTH % sizeof(intmax_t) == 0, "VECTOR_WIDTH not a multiple of sizeof(intmax_t)");
803
+ static_assert(sizeof(buffer) == sizeof(Vectorized<T>), "sizeof(buffer) must match sizeof(Vectorized<T>)");
804
+ // We should be using memcpy in order to respect the strict aliasing rule
805
+ // see: https://github.com/pytorch/pytorch/issues/66119
806
+ // Using char* is defined in the C11 standard 6.5 Expression paragraph 7
807
+ // (http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf)
808
+ const auto* a_data = a.as_bytes();
809
+ const auto* b_data = b.as_bytes();
810
+ // load each intmax_t chunk and process; increase pointers by sizeof(intmax_t)
811
+ for (auto& out : buffer) {
812
+ out = op(load<intmax_t>(a_data), load<intmax_t>(b_data));
813
+ a_data += sizeof(intmax_t);
814
+ b_data += sizeof(intmax_t);
815
+ }
816
+ assert(a_data == a.as_bytes() + sizeof(a));
817
+ assert(b_data == b.as_bytes() + sizeof(b));
818
+ return Vectorized<T>::loadu(buffer);
819
+ }
820
+
821
+ template<class T, typename std::enable_if_t<!std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
822
+ inline Vectorized<T> operator&(const Vectorized<T>& a, const Vectorized<T>& b) {
823
+ return bitwise_binary_op(a, b, std::bit_and<intmax_t>());
824
+ }
825
+ template<class T, typename std::enable_if_t<!std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
826
+ inline Vectorized<T> operator|(const Vectorized<T>& a, const Vectorized<T>& b) {
827
+ return bitwise_binary_op(a, b, std::bit_or<intmax_t>());
828
+ }
829
+ template<class T, typename std::enable_if_t<!std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
830
+ inline Vectorized<T> operator^(const Vectorized<T>& a, const Vectorized<T>& b) {
831
+ return bitwise_binary_op(a, b, std::bit_xor<intmax_t>());
832
+ }
833
+
834
+ #endif // defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_AVX512)
835
+
836
+ template<class T, typename std::enable_if_t<!std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
837
+ inline Vectorized<T> operator~(const Vectorized<T>& a) {
838
+ Vectorized<T> ones; // All bits are 1
839
+ memset((T*) ones, 0xFF, VECTOR_WIDTH);
840
+ return a ^ ones;
841
+ }
842
+
843
+ template <class T> Vectorized<T> inline operator<<(const Vectorized<T> &a, const Vectorized<T> &b) {
844
+ constexpr T max_shift = sizeof(T) * CHAR_BIT;
845
+ Vectorized<T> c;
846
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
847
+ T shift = b[i];
848
+ if ((static_cast<std::make_signed_t<T>>(shift) < 0) || (shift >= max_shift)) {
849
+ c[i] = 0;
850
+ } else {
851
+ c[i] = static_cast<std::make_unsigned_t<T>>(a[i]) << shift;
852
+ }
853
+ }
854
+ return c;
855
+ }
856
+
857
+ template <class T> Vectorized<T> inline operator>>(const Vectorized<T> &a, const Vectorized<T> &b) {
858
+ // right shift value to retain sign bit for signed and no bits for unsigned
859
+ constexpr T max_shift = sizeof(T) * CHAR_BIT - std::is_signed_v<T>;
860
+ Vectorized<T> c;
861
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
862
+ T shift = b[i];
863
+ if ((static_cast<std::make_signed_t<T>>(shift) < 0) || (shift >= max_shift)) {
864
+ c[i] = a[i] >> max_shift;
865
+ } else {
866
+ c[i] = a[i] >> shift;
867
+ }
868
+ }
869
+ return c;
870
+ }
871
+
872
+ template <typename T>
873
+ inline Vectorized<T>& operator += (Vectorized<T>& a, const Vectorized<T>& b) {
874
+ a = a + b;
875
+ return a;
876
+ }
877
+ template <typename T>
878
+ inline Vectorized<T>& operator -= (Vectorized<T>& a, const Vectorized<T>& b) {
879
+ a = a - b;
880
+ return a;
881
+ }
882
+ template <typename T>
883
+ inline Vectorized<T>& operator /= (Vectorized<T>& a, const Vectorized<T>& b) {
884
+ a = a / b;
885
+ return a;
886
+ }
887
+ template <typename T>
888
+ inline Vectorized<T>& operator %= (Vectorized<T>& a, const Vectorized<T>& b) {
889
+ a = a % b;
890
+ return a;
891
+ }
892
+ template <typename T>
893
+ inline Vectorized<T>& operator *= (Vectorized<T>& a, const Vectorized<T>& b) {
894
+ a = a * b;
895
+ return a;
896
+ }
897
+
898
+ template <typename T>
899
+ inline Vectorized<T>& operator <<= (Vectorized<T>& a, const Vectorized<T>& b) {
900
+ a = a << b;
901
+ return a;
902
+ }
903
+
904
+ template <typename T>
905
+ inline Vectorized<T>& operator >>= (Vectorized<T>& a, const Vectorized<T>& b) {
906
+ a = a >> b;
907
+ return a;
908
+ }
909
+
910
+ template <typename T>
911
+ inline Vectorized<T> fmadd(const Vectorized<T>& a, const Vectorized<T>& b, const Vectorized<T>& c) {
912
+ return a * b + c;
913
+ }
914
+
915
+ template <typename T>
916
+ inline Vectorized<T> fmsub(const Vectorized<T>& a, const Vectorized<T>& b, const Vectorized<T>& c) {
917
+ return a * b - c;
918
+ }
919
+
920
+ template <int64_t scale = 1, typename T = void>
921
+ std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<T>>
922
+ inline gather(T const* base_addr, const Vectorized<int_same_size_t<T>>& vindex) {
923
+ static constexpr int size = Vectorized<T>::size();
924
+ int_same_size_t<T> index_arr[size];
925
+ vindex.store(static_cast<void*>(index_arr));
926
+ T buffer[size];
927
+ for (const auto i : c10::irange(size)) {
928
+ buffer[i] = base_addr[index_arr[i] * scale / sizeof(T)];
929
+ }
930
+ return Vectorized<T>::loadu(static_cast<void*>(buffer));
931
+ }
932
+
933
+ template <int64_t scale = 1, typename T = void>
934
+ std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<T>>
935
+ inline mask_gather(const Vectorized<T>& src, T const* base_addr,
936
+ const Vectorized<int_same_size_t<T>>& vindex, Vectorized<T>& mask) {
937
+ static constexpr int size = Vectorized<T>::size();
938
+ T src_arr[size];
939
+ int_same_size_t<T> mask_arr[size]; // use int type so we can logical and
940
+ int_same_size_t<T> index_arr[size];
941
+ src.store(static_cast<void*>(src_arr));
942
+ mask.store(static_cast<void*>(mask_arr));
943
+ vindex.store(static_cast<void*>(index_arr));
944
+ T buffer[size];
945
+ for (const auto i : c10::irange(size)) {
946
+ if (mask_arr[i] & 0x01) { // check highest bit
947
+ buffer[i] = base_addr[index_arr[i] * scale / sizeof(T)];
948
+ } else {
949
+ buffer[i] = src_arr[i];
950
+ }
951
+ }
952
+ mask = Vectorized<T>(); // "zero out" mask
953
+ return Vectorized<T>::loadu(static_cast<void*>(buffer));
954
+ }
955
+
956
+ // Cast a given vector to another type without changing the bits representation.
957
+ // So a Vectorized<double> of 512 bits containing all ones can be cast to a
958
+ // Vectorized<int64_t> of 512 bits containing all ones (i.e., eight negative 1s).
959
+ // A Vec<double> of 256 bits containing all ones can be cast to a
960
+ // Vec<int64_t> of 256 bits containing all ones (i.e., four negative 1s).
961
+ // There is a struct here because we don't have static_if and I can't
962
+ // partially specialize a templated function.
963
+ template<typename dst_t, typename src_t>
964
+ struct CastImpl {
965
+ static inline Vectorized<dst_t> apply(const Vectorized<src_t>& src) {
966
+ src_t src_arr[Vectorized<src_t>::size()];
967
+ src.store(static_cast<void*>(src_arr));
968
+ return Vectorized<dst_t>::loadu(static_cast<const void*>(src_arr));
969
+ }
970
+ };
971
+
972
+ template<typename scalar_t>
973
+ struct CastImpl<scalar_t, scalar_t> {
974
+ static inline Vectorized<scalar_t> apply(const Vectorized<scalar_t>& src) {
975
+ return src;
976
+ }
977
+ };
978
+
979
+ template<typename dst_t, typename src_t>
980
+ inline Vectorized<dst_t> cast(const Vectorized<src_t>& src) {
981
+ return CastImpl<dst_t, src_t>::apply(src);
982
+ }
983
+
984
+ template <typename T, typename IntType = int_same_size_t<T>>
985
+ inline Vectorized<IntType> convert_to_int_of_same_size(const Vectorized<T>& src) {
986
+ static_assert(sizeof(T) == sizeof(IntType));
987
+ static constexpr int size = Vectorized<T>::size();
988
+
989
+ std::array<T, size> src_arr;
990
+ src.store(static_cast<void*>(src_arr.data()));
991
+ std::array<IntType, size> buffer;
992
+ std::transform(src_arr.cbegin(), src_arr.cend(), buffer.begin(),
993
+ [](const T& x) { return static_cast<IntType>(x); });
994
+ return Vectorized<IntType>::loadu(static_cast<const void*>(buffer.data()));
995
+ }
996
+
997
+ template <typename T, typename IntType = int_same_size_t<T>>
998
+ inline Vectorized<T> convert_to_fp_of_same_size(const Vectorized<IntType>& src) {
999
+ static_assert(sizeof(T) == sizeof(IntType));
1000
+ static constexpr int size = Vectorized<T>::size();
1001
+
1002
+ std::array<IntType, size> src_arr;
1003
+ src.store(static_cast<void*>(src_arr.data()));
1004
+ std::array<T, size> buffer;
1005
+ std::transform(src_arr.cbegin(), src_arr.cend(), buffer.begin(),
1006
+ [](const IntType& x) { return static_cast<T>(x); });
1007
+ return Vectorized<T>::loadu(static_cast<const void*>(buffer.data()));
1008
+ }
1009
+
1010
+ // Example inputs for AVX512:
1011
+ // a Vectorized<float> = {a0, b0, a1, b1, a2, b2, a3, b3, a4, b4, a5, b5, a6, b6, a7, b7}
1012
+ // b Vectorized<float> = {a8, b8, a9, b9, a10, b10, a11, b11, a12, b12, a13, b13, a14, b14, a15, b15}
1013
+ // returns:
1014
+ // Vectorized<float> = {a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15}
1015
+ // Vectorized<float> = {b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15}
1016
+ // Example inputs for AVX2: a Vectorized<float> = {a0, b0, a1, b1, a2, b2, a3, b3}
1017
+ // b Vectorized<float> = {a4, b4, a5, b5, a6, b6, a7, b7}
1018
+ // returns: Vectorized<float> = {a0, a1, a2, a3, a4, a5, a6, a7}
1019
+ // Vectorized<float> = {b0, b1, b2, b3, b4, b5, b6, b7}
1020
+ template <typename T>
1021
+ inline std::enable_if_t<Vectorized<T>::size() % 2 == 0, std::pair<Vectorized<T>, Vectorized<T>>>
1022
+ deinterleave2(const Vectorized<T>& a, const Vectorized<T>& b) {
1023
+ static constexpr int size = Vectorized<T>::size();
1024
+ static constexpr int half_size = size / 2;
1025
+ T a_arr[size];
1026
+ T b_arr[size];
1027
+ T buffer1[size];
1028
+ T buffer2[size];
1029
+ a.store(static_cast<void*>(a_arr));
1030
+ b.store(static_cast<void*>(b_arr));
1031
+ for (const auto i : c10::irange(half_size)) {
1032
+ buffer1[i] = a_arr[i * 2];
1033
+ buffer1[half_size + i] = b_arr[i * 2];
1034
+ buffer2[i] = a_arr[i * 2 + 1];
1035
+ buffer2[half_size + i] = b_arr[i * 2 + 1];
1036
+ }
1037
+ return std::make_pair(Vectorized<T>::loadu(static_cast<void*>(buffer1)),
1038
+ Vectorized<T>::loadu(static_cast<void*>(buffer2)));
1039
+ }
1040
+
1041
+ // inverse operation of deinterleave2
1042
+ // Example inputs for AVX512:
1043
+ // a Vectorized<float> = {a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15}
1044
+ // b Vectorized<float> = {b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15}
1045
+ // returns, for AVX512:
1046
+ // Vectorized<float> = {a0, b0, a1, b1, a2, b2, a3, b3, a4, b4, a5, b5, a6, b6, a7, b7}
1047
+ // Vectorized<float> = {a8, b8, a9, b9, a10, b10, a11, b11, a12, b12, a13, b13, a14, b14, a15, b15}
1048
+ // Example inputs for AVX2 : a Vectorized<float> = {a0, a1, a2, a3, a4, a5, a6, a7}
1049
+ // b Vectorized<float> = {b0, b1, b2, b3, b4, b5, b6, b7}
1050
+ // returns: Vectorized<float> = {a0, b0, a1, b1, a2, b2, a3, b3}
1051
+ // Vectorized<float> = {a4, b4, a5, b5, a6, b6, a7, b7}
1052
+ template <typename T>
1053
+ inline std::enable_if_t<Vectorized<T>::size() % 2 == 0, std::pair<Vectorized<T>, Vectorized<T>>>
1054
+ interleave2(const Vectorized<T>& a, const Vectorized<T>& b) {
1055
+ static constexpr int size = Vectorized<T>::size();
1056
+ static constexpr int half_size = size / 2;
1057
+ T a_arr[size];
1058
+ T b_arr[size];
1059
+ T buffer1[size];
1060
+ T buffer2[size];
1061
+ a.store(static_cast<void*>(a_arr));
1062
+ b.store(static_cast<void*>(b_arr));
1063
+ for (const auto i : c10::irange(half_size)) {
1064
+ buffer1[i * 2] = a_arr[i];
1065
+ buffer1[i * 2 + 1] = b_arr[i];
1066
+ buffer2[i * 2] = a_arr[half_size + i];
1067
+ buffer2[i * 2 + 1] = b_arr[half_size + i];
1068
+ }
1069
+ return std::make_pair(Vectorized<T>::loadu(static_cast<void*>(buffer1)),
1070
+ Vectorized<T>::loadu(static_cast<void*>(buffer2)));
1071
+ }
1072
+
1073
+ template <typename src_T, typename dst_T>
1074
+ inline void convert(const src_T *src, dst_T *dst, int64_t n) {
1075
+ #ifndef _MSC_VER
1076
+ # pragma unroll
1077
+ #endif
1078
+ for (C10_UNUSED const auto i : c10::irange(n)) {
1079
+ *dst = c10::convert<dst_T>(c10::load(src));
1080
+ src++;
1081
+ dst++;
1082
+ }
1083
+ }
1084
+
1085
+ template <typename T>
1086
+ inline Vectorized<T> flip(const Vectorized<T> & data) {
1087
+ static constexpr int size = Vectorized<T>::size();
1088
+ T output[size];
1089
+ T buffer[size];
1090
+ data.store(static_cast<void*>(buffer));
1091
+ for (const auto i : c10::irange(size)) {
1092
+ output[i] = buffer[size - i - 1];
1093
+ }
1094
+ return Vectorized<T>::loadu(static_cast<void*>(output));
1095
+ }
1096
+
1097
+ // Transpose the `src` buffer of type `T` and size (M,N) into the `dst` buffer. `ld_src` is the leading
1098
+ // dimension of `src` and `ld_dst` is the leading dimension of `dst`.
1099
+ template <typename T, int M, int N>
1100
+ inline void transpose_mxn(const T* src, int64_t ld_src, T* dst, int64_t ld_dst) {
1101
+ for (int i = 0; i < M; i++) {
1102
+ for (int j = 0; j < N; j++) {
1103
+ dst[j*ld_dst + i] = src[i*ld_src + j];
1104
+ }
1105
+ }
1106
+ }
1107
+
1108
+ }} // namespace at::vec::CPU_CAPABILITY
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_half.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cpu/vec/intrinsics.h>
4
+
5
+ namespace at::vec {
6
+ // See Note [CPU_CAPABILITY namespace]
7
+ inline namespace CPU_CAPABILITY {
8
+
9
+ #if (defined(CPU_CAPABILITY_AVX2) || defined(CPU_CAPABILITY_AVX512)) && \
10
+ !defined(__APPLE__)
11
+ static inline uint16_t float2half_scalar(float val) {
12
+ #if defined(CPU_CAPABILITY_AVX2)
13
+ #if defined(_MSC_VER)
14
+ __m256 v = _mm256_set1_ps(val);
15
+ __m128i o =
16
+ _mm256_cvtps_ph(v, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
17
+ return static_cast<std::uint16_t>(_mm_cvtsi128_si32(o));
18
+ #else
19
+ return _cvtss_sh(val, _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
20
+ #endif
21
+ #elif defined(CPU_CAPABILITY_AVX512)
22
+ __m512 v = _mm512_set1_ps(val);
23
+ __m256i o =
24
+ _mm512_cvtps_ph(v, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
25
+ return static_cast<std::uint16_t>(
26
+ _mm_cvtsi128_si32(_mm256_castsi256_si128(o)));
27
+ #endif
28
+ }
29
+
30
+ static inline float half2float_scalar(uint16_t val) {
31
+ #if defined(CPU_CAPABILITY_AVX2)
32
+ #if defined(_MSC_VER)
33
+ __m128i v = _mm_cvtsi32_si128(val);
34
+ __m256 o = _mm256_cvtph_ps(v);
35
+ return _mm256_cvtss_f32(o);
36
+ #else
37
+ return _cvtsh_ss(val);
38
+ #endif
39
+ #elif defined(CPU_CAPABILITY_AVX512)
40
+ __m256i v =
41
+ _mm256_setr_epi16(val, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
42
+ __m512 o = _mm512_cvtph_ps(v);
43
+ return _mm512_cvtss_f32(o);
44
+ #endif
45
+ }
46
+
47
+ #endif
48
+
49
+ } // namespace CPU_CAPABILITY
50
+ } // namespace at::vec
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec_n.h ADDED
@@ -0,0 +1,344 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/cpu/vec/vec_base.h>
2
+ #include <array>
3
+
4
+ namespace at::vec {
5
+ inline namespace CPU_CAPABILITY {
6
+
7
+ /**
8
+ * @brief A class template representing a vectorized type with
9
+ * `N * Vectorized<T>::size()` elements, aiming to support vectors of
10
+ * arbitrary size. A specific use case of it is to represent vectors
11
+ * converted from data types with different sizes but with the same
12
+ * number of vector elements, e.g., `VectorizedN<float, 2>` can be
13
+ * a vector converted from two `Vectorized<bfloat16>`, `VectorizedN<int64_t, 2>`
14
+ * can be a vector converted from two `Vectorized<int32_t>` etc.
15
+ *
16
+ * It supports most of the operations of `Vectorized<T>`
17
+ * and the implementation delegates to `Vectorized<T>` with loops over `N`.
18
+ *
19
+ * @tparam T The underlying type of the vectorized elements.
20
+ * @tparam N The number of underlying `Vectorized<T>`.
21
+ */
22
+ template <typename T, int N>
23
+ class VectorizedN {
24
+ public:
25
+ using value_type = T;
26
+ using size_type = int;
27
+
28
+ static constexpr size_type size_T = sizeof(T);
29
+ static constexpr size_type size() {
30
+ return Vectorized<T>::size() * N;
31
+ }
32
+
33
+ private:
34
+ std::array<Vectorized<T>, N> values;
35
+
36
+ public:
37
+ // methods not implemented yet:
38
+ // variadic constructor, operator T*, as_bytes, zero_mask
39
+
40
+ #define VECTORIZEDN_DEFINE_UNARY_OP(op) \
41
+ VectorizedN<T, N> op() const { \
42
+ return unary_op([](const Vectorized<T>& a) { return a.op(); }); \
43
+ }
44
+
45
+ #define VECTORIZEDN_DEFINE_BINARY_OP(op) \
46
+ VectorizedN<T, N> op(const VectorizedN<T, N>& other) const { \
47
+ return binary_op( \
48
+ other, [](const Vectorized<T>& a, const Vectorized<T>& b) { \
49
+ return a.op(b); \
50
+ }); \
51
+ }
52
+
53
+ template <typename Op>
54
+ inline VectorizedN<T, N> unary_op(Op op) const {
55
+ VectorizedN<T, N> result;
56
+ #ifndef _MSC_VER
57
+ #pragma unroll
58
+ #endif
59
+ for (int i = 0; i < N; ++i) {
60
+ result.values[i] = op(values[i]);
61
+ }
62
+ return result;
63
+ }
64
+
65
+ template <typename Op>
66
+ inline VectorizedN<T, N> binary_op(const VectorizedN<T, N>& other, Op op)
67
+ const {
68
+ VectorizedN<T, N> result;
69
+ #ifndef _MSC_VER
70
+ #pragma unroll
71
+ #endif
72
+ for (int i = 0; i < N; ++i) {
73
+ result.values[i] = op(values[i], other.values[i]);
74
+ }
75
+ return result;
76
+ }
77
+
78
+ VectorizedN() = default;
79
+
80
+ explicit VectorizedN(T val) {
81
+ for (int i = 0; i < N; ++i) {
82
+ values[i] = Vectorized<T>(val);
83
+ }
84
+ }
85
+
86
+ const Vectorized<T>& operator[](int i) const {
87
+ return values[i];
88
+ }
89
+
90
+ Vectorized<T>& operator[](int i) {
91
+ return values[i];
92
+ }
93
+
94
+ template <int64_t mask>
95
+ static VectorizedN<T, N> blend(
96
+ const VectorizedN<T, N>& a,
97
+ const VectorizedN<T, N>& b) {
98
+ VectorizedN<T, N> result;
99
+ for (int i = 0; i < N; ++i) {
100
+ result.values[i] = Vectorized<T>::blend<mask>(a.values[i], b.values[i]);
101
+ }
102
+ return result;
103
+ }
104
+
105
+ static VectorizedN<T, N> blendv(
106
+ const VectorizedN<T, N>& a,
107
+ const VectorizedN<T, N>& b,
108
+ const VectorizedN<T, N>& mask) {
109
+ VectorizedN<T, N> result;
110
+ for (int i = 0; i < N; ++i) {
111
+ result.values[i] =
112
+ Vectorized<T>::blendv(a.values[i], b.values[i], mask.values[i]);
113
+ }
114
+ return result;
115
+ }
116
+
117
+ template <typename step_t>
118
+ static VectorizedN<T, N> arange(
119
+ T base = static_cast<T>(0),
120
+ step_t step = static_cast<step_t>(1)) {
121
+ VectorizedN<T, N> result;
122
+ for (int i = 0; i < N; ++i) {
123
+ result.values[i] = Vectorized<T>::arange(base, step);
124
+ base += step * Vectorized<T>::size();
125
+ }
126
+ return result;
127
+ }
128
+
129
+ static VectorizedN<T, N> set(
130
+ const VectorizedN<T, N>& a,
131
+ const VectorizedN<T, N>& b,
132
+ int64_t count = size()) {
133
+ VectorizedN<T, N> result;
134
+ for (int i = 0; i < N; ++i) {
135
+ result.values[i] =
136
+ Vectorized<T>::set(a.values[i], b.values[i], std::min(count, Vectorized<T>::size()));
137
+ count -= Vectorized<T>::size();
138
+ if (count <= 0) {
139
+ break;
140
+ }
141
+ }
142
+ return result;
143
+ }
144
+
145
+ static VectorizedN<T, N> loadu(const void* ptr) {
146
+ VectorizedN<T, N> result;
147
+ for (int i = 0; i < N; ++i) {
148
+ result.values[i] = Vectorized<T>::loadu(ptr);
149
+ ptr = static_cast<const T*>(ptr) + Vectorized<T>::size();
150
+ }
151
+ return result;
152
+ }
153
+
154
+ static VectorizedN<T, N> loadu(const void* ptr, int64_t count) {
155
+ VectorizedN<T, N> result;
156
+ for (int i = 0; i < N; ++i) {
157
+ result.values[i] =
158
+ Vectorized<T>::loadu(ptr, std::min(count, Vectorized<T>::size()));
159
+ ptr = static_cast<const T*>(ptr) + Vectorized<T>::size();
160
+ count -= Vectorized<T>::size();
161
+ if (count <= 0) {
162
+ break;
163
+ }
164
+ }
165
+ return result;
166
+ }
167
+
168
+ void store(void* ptr) const {
169
+ for (int i = 0; i < N; ++i) {
170
+ values[i].store(ptr);
171
+ ptr = static_cast<T*>(ptr) + Vectorized<T>::size();
172
+ }
173
+ }
174
+
175
+ void store(void* ptr, int count) const {
176
+ for (int i = 0; i < N; ++i) {
177
+ values[i].store(ptr, std::min(count, Vectorized<T>::size()));
178
+ ptr = static_cast<T*>(ptr) + Vectorized<T>::size();
179
+ count -= Vectorized<T>::size();
180
+ if (count <= 0) {
181
+ break;
182
+ }
183
+ }
184
+ }
185
+
186
+ bool has_inf_nan() const {
187
+ for (int i = 0; i < N; ++i) {
188
+ if (values[i].has_inf_nan()) {
189
+ return true;
190
+ }
191
+ }
192
+ return false;
193
+ }
194
+
195
+ VectorizedN<T, N> map(T (*const f)(T)) const {
196
+ VectorizedN<T, N> result;
197
+ for (int i = 0; i < N; ++i) {
198
+ result.values[i] = values[i].map(f);
199
+ }
200
+ return result;
201
+ }
202
+
203
+ VectorizedN<T, N> map(T (*const f)(const T&)) const {
204
+ VectorizedN<T, N> result;
205
+ for (int i = 0; i < N; ++i) {
206
+ result.values[i] = values[i].map(f);
207
+ }
208
+ return result;
209
+ }
210
+
211
+ VECTORIZEDN_DEFINE_UNARY_OP(abs)
212
+ VECTORIZEDN_DEFINE_UNARY_OP(sgn)
213
+ VECTORIZEDN_DEFINE_UNARY_OP(angle)
214
+ VECTORIZEDN_DEFINE_UNARY_OP(real)
215
+ VECTORIZEDN_DEFINE_UNARY_OP(imag)
216
+ VECTORIZEDN_DEFINE_UNARY_OP(conj)
217
+ VECTORIZEDN_DEFINE_UNARY_OP(acos)
218
+ VECTORIZEDN_DEFINE_UNARY_OP(acosh)
219
+ VECTORIZEDN_DEFINE_UNARY_OP(asin)
220
+ VECTORIZEDN_DEFINE_UNARY_OP(atan)
221
+ VECTORIZEDN_DEFINE_UNARY_OP(atanh)
222
+ VECTORIZEDN_DEFINE_BINARY_OP(atan2)
223
+ VECTORIZEDN_DEFINE_BINARY_OP(copysign)
224
+ VECTORIZEDN_DEFINE_UNARY_OP(erf)
225
+ VECTORIZEDN_DEFINE_UNARY_OP(erfc)
226
+ VECTORIZEDN_DEFINE_UNARY_OP(erfinv)
227
+ VECTORIZEDN_DEFINE_UNARY_OP(exp)
228
+ VECTORIZEDN_DEFINE_UNARY_OP(exp2)
229
+ VECTORIZEDN_DEFINE_UNARY_OP(expm1)
230
+ VECTORIZEDN_DEFINE_UNARY_OP(exp_u20)
231
+ VECTORIZEDN_DEFINE_UNARY_OP(frac)
232
+ VECTORIZEDN_DEFINE_BINARY_OP(fmod)
233
+ VECTORIZEDN_DEFINE_UNARY_OP(log)
234
+ VECTORIZEDN_DEFINE_UNARY_OP(log10)
235
+ VECTORIZEDN_DEFINE_UNARY_OP(log1p)
236
+ VECTORIZEDN_DEFINE_UNARY_OP(log2)
237
+ VECTORIZEDN_DEFINE_UNARY_OP(ceil)
238
+ VECTORIZEDN_DEFINE_UNARY_OP(cos)
239
+ VECTORIZEDN_DEFINE_UNARY_OP(cosh)
240
+ VECTORIZEDN_DEFINE_UNARY_OP(floor)
241
+ VECTORIZEDN_DEFINE_BINARY_OP(hypot)
242
+ VECTORIZEDN_DEFINE_UNARY_OP(i0)
243
+ VECTORIZEDN_DEFINE_UNARY_OP(i0e)
244
+ VECTORIZEDN_DEFINE_UNARY_OP(digamma)
245
+ VECTORIZEDN_DEFINE_BINARY_OP(igamma)
246
+ VECTORIZEDN_DEFINE_BINARY_OP(igammac)
247
+ VECTORIZEDN_DEFINE_UNARY_OP(neg)
248
+ VECTORIZEDN_DEFINE_BINARY_OP(nextafter)
249
+ VECTORIZEDN_DEFINE_UNARY_OP(round)
250
+ VECTORIZEDN_DEFINE_UNARY_OP(sin)
251
+ VECTORIZEDN_DEFINE_UNARY_OP(sinh)
252
+ VECTORIZEDN_DEFINE_UNARY_OP(tan)
253
+ VECTORIZEDN_DEFINE_UNARY_OP(tanh)
254
+ VECTORIZEDN_DEFINE_UNARY_OP(trunc)
255
+ VECTORIZEDN_DEFINE_UNARY_OP(lgamma)
256
+ VECTORIZEDN_DEFINE_UNARY_OP(sqrt)
257
+ VECTORIZEDN_DEFINE_UNARY_OP(reciprocal)
258
+ VECTORIZEDN_DEFINE_UNARY_OP(rsqrt)
259
+ VECTORIZEDN_DEFINE_BINARY_OP(pow)
260
+ VECTORIZEDN_DEFINE_BINARY_OP(operator==)
261
+ VECTORIZEDN_DEFINE_BINARY_OP(operator!=)
262
+ VECTORIZEDN_DEFINE_BINARY_OP(operator>=)
263
+ VECTORIZEDN_DEFINE_BINARY_OP(operator<=)
264
+ VECTORIZEDN_DEFINE_BINARY_OP(operator>)
265
+ VECTORIZEDN_DEFINE_BINARY_OP(operator<)
266
+ VECTORIZEDN_DEFINE_BINARY_OP(eq)
267
+ VECTORIZEDN_DEFINE_BINARY_OP(ne)
268
+ VECTORIZEDN_DEFINE_BINARY_OP(gt)
269
+ VECTORIZEDN_DEFINE_BINARY_OP(ge)
270
+ VECTORIZEDN_DEFINE_BINARY_OP(lt)
271
+ VECTORIZEDN_DEFINE_BINARY_OP(le)
272
+
273
+ #undef VECTORIZEDN_DEFINE_UNARY_OP
274
+ #undef VECTORIZEDN_DEFINE_BINARY_OP
275
+ };
276
+
277
+ #define VECTORIZEDN_DEFINE_UNARY_OP_GLOBAL(op) \
278
+ template <typename T, int N> \
279
+ inline VectorizedN<T, N> op(const VectorizedN<T, N>& a) { \
280
+ return a.unary_op([](const Vectorized<T>& a) { return op(a); }); \
281
+ }
282
+
283
+ #define VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(op) \
284
+ template <typename T, int N> \
285
+ inline VectorizedN<T, N> op( \
286
+ const VectorizedN<T, N>& a, const VectorizedN<T, N>& b) { \
287
+ return a.binary_op(b, [](const Vectorized<T>& a, const Vectorized<T>& b) { \
288
+ return op(a, b); \
289
+ }); \
290
+ }
291
+
292
+ #define VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(op) \
293
+ template <typename T, int N> \
294
+ inline VectorizedN<T, N>& op( \
295
+ VectorizedN<T, N>& a, const VectorizedN<T, N>& b) { \
296
+ a = a.binary_op(b, [](const Vectorized<T>& a, const Vectorized<T>& b) { \
297
+ return op(a, b); \
298
+ }); \
299
+ return a; \
300
+ }
301
+
302
+ VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator+)
303
+ VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator-)
304
+ VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator*)
305
+ VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator/)
306
+ VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator%)
307
+ VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator||)
308
+ VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator<<)
309
+ VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator>>)
310
+ VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(maximum)
311
+ VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(minimum)
312
+ VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(fmadd)
313
+ VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(fmsub)
314
+ VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(clamp)
315
+ VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(clamp_max)
316
+ VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(clamp_min)
317
+ VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator&)
318
+ VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator|)
319
+ VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL(operator^)
320
+ VECTORIZEDN_DEFINE_UNARY_OP_GLOBAL(operator~)
321
+
322
+ VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(operator+=)
323
+ VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(operator-=)
324
+ VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(operator*=)
325
+ VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(operator/=)
326
+ VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(operator%=)
327
+ VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(operator<<=)
328
+ VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL(operator>>=)
329
+
330
+ #undef VECTORIZEDN_DEFINE_UNARY_OP_GLOBAL
331
+ #undef VECTORIZEDN_DEFINE_BINARY_OP_GLOBAL
332
+ #undef VECTORIZEDN_DEFINE_BINARY_OP_INPLACE_GLOBAL
333
+
334
+ template <typename T, int N, typename OpVec>
335
+ inline T vec_reduce_all(const OpVec& vec_fun, VectorizedN<T, N> acc_vec) {
336
+ Vectorized<T> vec_result = acc_vec[0];
337
+ for (int i = 1; i < N; i++) {
338
+ vec_result = vec_fun(vec_result, acc_vec[i]);
339
+ }
340
+ return vec_reduce_all(vec_fun, vec_result);
341
+ }
342
+
343
+ } // namespace CPU_CAPABILITY
344
+ } // namespace at::vec
venv/lib/python3.10/site-packages/torch/include/ATen/cpu/vml.h ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Config.h>
4
+ #include <ATen/Parallel.h>
5
+ #include <ATen/OpMathType.h>
6
+ #include <ATen/cpu/vec/functional.h>
7
+ #include <ATen/cpu/vec/vec.h>
8
+ #include <c10/util/complex.h>
9
+
10
+ // This header implements various unary operations using a MKL VML style
11
+ // interface.
12
+
13
+ // It implements various functions with a simple interface
14
+ // For example it enables the user to call vsin(float* out, const float* in,
15
+ // size) This functions takes a pointer to a continuous output array of floats and
16
+ // a constant input array. It will then apply sin to each value in the input
17
+ // array and write the result into the output array. out and in may point to the
18
+ // same memory, i.e. this fully supports in-place operations. These functions
19
+ // also implement their own parallelization, so take precautions when calling
20
+ // these from threaded functions.
21
+
22
+ // When MKL is available it will call into MKL's VML library similar to NumPy
23
+ // If MKL is not available it will use SLEEF.
24
+
25
+ // This file might be compiled under AVX or AVX2 when called from e.g.
26
+ // UnaryOpsKernel.cpp
27
+
28
+ #include <algorithm>
29
+ #include <cstddef>
30
+ #include <cstdint>
31
+ #include <cstring>
32
+ #include <type_traits>
33
+
34
+ #if AT_MKL_ENABLED() && !defined(__APPLE__)
35
+ #include <mkl.h>
36
+ #endif
37
+
38
+ namespace at {
39
+ namespace vml {
40
+ inline namespace CPU_CAPABILITY {
41
+
42
+ using namespace vec;
43
+
44
+ template <typename scalar_t>
45
+ inline void vrsqrt(scalar_t* out, scalar_t* in, int64_t size) {
46
+ parallel_for(0, size, 2048, [out, in](int64_t begin, int64_t end) {
47
+ map(
48
+ [](const Vectorized<scalar_t>& x) {
49
+ return Vectorized<scalar_t>((scalar_t)(1)) / x.sqrt();
50
+ },
51
+ out + begin,
52
+ in + begin,
53
+ end - begin);
54
+ });
55
+ }
56
+
57
+ // NB: We ignore numerical errors by convention and leave them to the user
58
+
59
+ #define IMPLEMENT_VML(op) \
60
+ template <typename scalar_t> \
61
+ inline void v##op(scalar_t* out, const scalar_t* in, int64_t size) { \
62
+ using vec_t = Vectorized<vec_scalar_t<scalar_t>>; \
63
+ vec::map([](vec_t x) { return x.op(); }, out, in, size); \
64
+ } \
65
+
66
+ IMPLEMENT_VML(abs)
67
+ IMPLEMENT_VML(acos)
68
+ IMPLEMENT_VML(asin)
69
+ IMPLEMENT_VML(atan)
70
+ IMPLEMENT_VML(atanh)
71
+ IMPLEMENT_VML(ceil)
72
+ IMPLEMENT_VML(cos)
73
+ // IMPLEMENT_VML(cosh)
74
+ IMPLEMENT_VML(erf)
75
+ IMPLEMENT_VML(erfc)
76
+ IMPLEMENT_VML(erfinv)
77
+ IMPLEMENT_VML(exp)
78
+ IMPLEMENT_VML(expm1)
79
+ IMPLEMENT_VML(floor)
80
+ IMPLEMENT_VML(i0)
81
+ IMPLEMENT_VML(i0e)
82
+ IMPLEMENT_VML(digamma)
83
+ IMPLEMENT_VML(reciprocal)
84
+ IMPLEMENT_VML(log)
85
+ IMPLEMENT_VML(log10)
86
+ IMPLEMENT_VML(log1p)
87
+ IMPLEMENT_VML(log2)
88
+ IMPLEMENT_VML(neg)
89
+ IMPLEMENT_VML(sin)
90
+ // IMPLEMENT_VML(sinh)
91
+ IMPLEMENT_VML(sqrt)
92
+ IMPLEMENT_VML(round)
93
+ IMPLEMENT_VML(rsqrt)
94
+ IMPLEMENT_VML(tan)
95
+ IMPLEMENT_VML(tanh)
96
+ IMPLEMENT_VML(trunc)
97
+ IMPLEMENT_VML(lgamma)
98
+
99
+
100
+ #if AT_MKL_ENABLED() && !defined(__APPLE__)
101
+
102
+ // NB: LP64 MKL is the most commonly used and thus we assume it here. That means
103
+ // we need to expect MKL_INT to be of type int, which implies int32_t or int64_t in most
104
+ // cases.
105
+ static_assert(
106
+ std::is_same_v<MKL_INT, int32_t> || std::is_same_v<MKL_INT, int64_t>,
107
+ "MKL_INT is assumed to be int32_t or int64_t");
108
+ #define IMPLEMENT_VML_MKL_STUB(op, mklop, type, mkltype) \
109
+ template <> \
110
+ inline void v##op(type * out, const type * in, int64_t size) { \
111
+ int64_t max_mkl_ind = std::numeric_limits<MKL_INT>::max(); \
112
+ if (size <= static_cast<int64_t>(max_mkl_ind)) { \
113
+ vm##mkltype##mklop( \
114
+ size, in, out, VML_HA | VML_FTZDAZ_OFF | VML_ERRMODE_IGNORE); \
115
+ } else { \
116
+ MKL_INT ind = 0; \
117
+ int64_t chunks = size / max_mkl_ind; \
118
+ int64_t rest = size % max_mkl_ind; \
119
+ for (; ind < chunks; ind++) { \
120
+ vm##mkltype##mklop( \
121
+ max_mkl_ind, \
122
+ in + ind * max_mkl_ind, \
123
+ out + ind * max_mkl_ind, \
124
+ VML_HA | VML_FTZDAZ_OFF | VML_ERRMODE_IGNORE); \
125
+ } \
126
+ vm##mkltype##mklop( \
127
+ rest, \
128
+ in + ind * max_mkl_ind, \
129
+ out + ind * max_mkl_ind, \
130
+ VML_HA | VML_FTZDAZ_OFF | VML_ERRMODE_IGNORE); \
131
+ } \
132
+ }
133
+
134
+ #define IMPLEMENT_VML_MKL(op, mklop) \
135
+ IMPLEMENT_VML_MKL_STUB(op, mklop, float, s) \
136
+ IMPLEMENT_VML_MKL_STUB(op, mklop, double, d)
137
+
138
+ // NB: abs, cosh and sinh were temporarily disabled due to issues with Apple
139
+ // NB: expm1 is disabled because on some configs it produces expm1(nan)=-1
140
+ IMPLEMENT_VML_MKL(acos, Acos)
141
+ IMPLEMENT_VML_MKL(asin, Asin)
142
+ IMPLEMENT_VML_MKL(atan, Atan)
143
+ IMPLEMENT_VML_MKL(cos, Cos)
144
+ // IMPLEMENT_VML_MKL(cosh, Cosh)
145
+ IMPLEMENT_VML_MKL(erf, Erf)
146
+ IMPLEMENT_VML_MKL(erfc, Erfc)
147
+ IMPLEMENT_VML_MKL(erfinv, ErfInv)
148
+ IMPLEMENT_VML_MKL(exp, Exp)
149
+ // IMPLEMENT_VML_MKL(expm1, Expm1)
150
+ IMPLEMENT_VML_MKL(log, Ln)
151
+ IMPLEMENT_VML_MKL(log10, Log10)
152
+ IMPLEMENT_VML_MKL(sin, Sin)
153
+ // IMPLEMENT_VML_MKL(sinh, Sinh)
154
+ IMPLEMENT_VML_MKL(sqrt, Sqrt)
155
+ IMPLEMENT_VML_MKL(tan, Tan)
156
+ IMPLEMENT_VML_MKL(tanh, Tanh)
157
+ IMPLEMENT_VML_MKL(trunc, Trunc)
158
+
159
+ // Not vectorized in MKL version tested
160
+ // IMPLEMENT_VML_MKL(abs, Abs)
161
+ // IMPLEMENT_VML_MKL(log1p, Log1p)
162
+
163
+ #if INTEL_MKL_VERSION >= 20180406
164
+ IMPLEMENT_VML_MKL(log2, Log2)
165
+ #endif
166
+
167
+ #endif
168
+
169
+ } // namespace
170
+ } // namespace vml
171
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/detail/IPUHooksInterface.h ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Generator.h>
4
+ #include <c10/core/Allocator.h>
5
+ #include <c10/util/Exception.h>
6
+ #include <c10/util/Registry.h>
7
+
8
+ namespace at {
9
+
10
+ struct TORCH_API IPUHooksInterface {
11
+ virtual ~IPUHooksInterface() = default;
12
+
13
+ virtual const Generator& getDefaultIPUGenerator(
14
+ DeviceIndex device_index = -1) const {
15
+ AT_ERROR(
16
+ "Cannot get the default IPU generator: the IPU backend is not "
17
+ "available.");
18
+ }
19
+
20
+ virtual Generator newIPUGenerator(DeviceIndex device_index = -1) const {
21
+ AT_ERROR(
22
+ "Cannot create a new IPU generator: the IPU backend is not available.");
23
+ }
24
+ };
25
+
26
+ struct TORCH_API IPUHooksArgs {};
27
+
28
+ TORCH_DECLARE_REGISTRY(IPUHooksRegistry, IPUHooksInterface, IPUHooksArgs);
29
+ #define REGISTER_IPU_HOOKS(clsname) \
30
+ C10_REGISTER_CLASS(IPUHooksRegistry, clsname, clsname)
31
+
32
+ namespace detail {
33
+ TORCH_API const IPUHooksInterface& getIPUHooks();
34
+ } // namespace detail
35
+ } // namespace at
venv/lib/python3.10/site-packages/torch/include/ATen/miopen/Descriptors.h ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/miopen/Exceptions.h>
4
+
5
+ #include <ATen/miopen/miopen-wrapper.h>
6
+ #include <ATen/core/Tensor.h>
7
+ #include <ATen/TensorUtils.h>
8
+
9
+ namespace at { namespace native {
10
+
11
+ inline int dataSize(miopenDataType_t dataType)
12
+ {
13
+ switch (dataType) {
14
+ case miopenHalf: return 2;
15
+ case miopenFloat: return 4;
16
+ case miopenBFloat16: return 2;
17
+ default: return 8;
18
+ }
19
+ }
20
+
21
+ template <typename T, miopenStatus_t (*dtor)(T*)>
22
+ struct DescriptorDeleter {
23
+ void operator()(T* x) {
24
+ if (x != nullptr) {
25
+ MIOPEN_CHECK(dtor(x));
26
+ }
27
+ }
28
+ };
29
+
30
+ // A generic class for wrapping MIOpen descriptor types. All you need
31
+ // is to give the underlying type the Descriptor_t points to (usually,
32
+ // if it's miopenTensorDescriptor_t it points to miopenTensorStruct),
33
+ // the constructor and the destructor. Subclasses are responsible
34
+ // for defining a set() function to actually set the descriptor.
35
+ //
36
+ // Descriptors default construct to a nullptr, and have a descriptor
37
+ // initialized the first time you call set() or any other initializing
38
+ // function.
39
+ template <typename T, miopenStatus_t (*ctor)(T**), miopenStatus_t (*dtor)(T*)>
40
+ class Descriptor
41
+ {
42
+ public:
43
+ // Use desc() to access the underlying descriptor pointer in
44
+ // a read-only fashion. Most client code should use this.
45
+ // If the descriptor was never initialized, this will return
46
+ // nullptr.
47
+ T* desc() const { return desc_.get(); }
48
+ T* desc() { return desc_.get(); }
49
+
50
+ // Use mut_desc() to access the underlying descriptor pointer
51
+ // if you intend to modify what it points to (e.g., using
52
+ // miopenSetFooDescriptor). This will ensure that the descriptor
53
+ // is initialized. Code in this file will use this function.
54
+ T* mut_desc() { init(); return desc_.get(); }
55
+ protected:
56
+ void init() {
57
+ if (desc_ == nullptr) {
58
+ T* raw_desc;
59
+ MIOPEN_CHECK(ctor(&raw_desc));
60
+ desc_.reset(raw_desc);
61
+ }
62
+ }
63
+ private:
64
+ std::unique_ptr<T, DescriptorDeleter<T, dtor>> desc_;
65
+ };
66
+
67
+ class TensorDescriptor
68
+ : public Descriptor<miopenTensorDescriptor,
69
+ &miopenCreateTensorDescriptor,
70
+ &miopenDestroyTensorDescriptor>
71
+ {
72
+ public:
73
+ TensorDescriptor() {}
74
+ explicit TensorDescriptor(const at::Tensor &t, size_t pad = 0) {
75
+ set(t, pad);
76
+ }
77
+
78
+ void set(const at::Tensor &t, size_t pad = 0);
79
+ void set(miopenDataType_t dataType, IntArrayRef sizes, IntArrayRef strides, size_t pad = 0);
80
+
81
+ void print();
82
+
83
+ private:
84
+ void set(miopenDataType_t dataType, int dim, int* size, int* stride) {
85
+ MIOPEN_CHECK(miopenSetTensorDescriptor(mut_desc(), dataType, dim, size, stride));
86
+ }
87
+ };
88
+
89
+ std::ostream& operator<<(std::ostream & out, const TensorDescriptor& d);
90
+
91
+ class FilterDescriptor
92
+ : public Descriptor<miopenTensorDescriptor,
93
+ &miopenCreateTensorDescriptor,
94
+ &miopenDestroyTensorDescriptor>
95
+ {
96
+ public:
97
+ void set(const at::Tensor &t, int64_t pad = 0) {
98
+ set(t, at::MemoryFormat::Contiguous, pad);
99
+ }
100
+
101
+ void set(const at::Tensor &t, const at::MemoryFormat memory_format, int64_t pad = 0);
102
+
103
+ private:
104
+ void set(miopenDataType_t dataType, int dim, int* size, int* stride) {
105
+ MIOPEN_CHECK(miopenSetTensorDescriptor(mut_desc(), dataType, dim, size, stride));
106
+ }
107
+ };
108
+
109
+ struct ConvolutionDescriptor
110
+ : public Descriptor<miopenConvolutionDescriptor,
111
+ &miopenCreateConvolutionDescriptor,
112
+ &miopenDestroyConvolutionDescriptor>
113
+ {
114
+ void set(miopenDataType_t dataType, miopenConvolutionMode_t c_mode, int dim, int* pad, int* stride, int * upscale /* aka dilation */, int groups, bool deterministic) {
115
+ MIOPEN_CHECK(miopenInitConvolutionNdDescriptor(mut_desc(), dim, pad, stride, upscale, c_mode));
116
+ MIOPEN_CHECK(miopenSetConvolutionGroupCount(mut_desc(), groups));
117
+ MIOPEN_CHECK(miopenSetConvolutionAttribute(mut_desc(), MIOPEN_CONVOLUTION_ATTRIB_DETERMINISTIC, deterministic ? 1 : 0));
118
+ }
119
+ };
120
+
121
+
122
+ struct RNNDescriptor
123
+ : public Descriptor<miopenRNNDescriptor,
124
+ &miopenCreateRNNDescriptor,
125
+ &miopenDestroyRNNDescriptor>
126
+ {
127
+ void set(int64_t hidden_size, int64_t num_layers, miopenRNNInputMode_t input_mode, miopenRNNDirectionMode_t direction, miopenRNNMode_t rnn_mode,
128
+ miopenRNNBiasMode_t bias_mode, miopenRNNAlgo_t algorithm, miopenDataType_t datatype) {
129
+ MIOPEN_CHECK(miopenSetRNNDescriptor(mut_desc(), hidden_size, num_layers, input_mode, direction, rnn_mode, bias_mode, algorithm, datatype));
130
+ }
131
+ };
132
+
133
+ union Constant
134
+ {
135
+ float f;
136
+ double d;
137
+ Constant(miopenDataType_t dataType, double value) {
138
+ if (dataType == miopenHalf || dataType == miopenFloat || dataType == miopenBFloat16) {
139
+ f = static_cast<float>(value);
140
+ } else {
141
+ d = value;
142
+ }
143
+ }
144
+ };
145
+
146
+ }} // namespace
venv/lib/python3.10/site-packages/torch/include/ATen/miopen/Exceptions.h ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/miopen/miopen-wrapper.h>
4
+ #include <string>
5
+ #include <stdexcept>
6
+ #include <sstream>
7
+
8
+ namespace at { namespace native {
9
+
10
+ class miopen_exception : public std::runtime_error {
11
+ public:
12
+ miopenStatus_t status;
13
+ miopen_exception(miopenStatus_t status, const char* msg)
14
+ : std::runtime_error(msg)
15
+ , status(status) {}
16
+ miopen_exception(miopenStatus_t status, const std::string& msg)
17
+ : std::runtime_error(msg)
18
+ , status(status) {}
19
+ };
20
+
21
+ inline void MIOPEN_CHECK(miopenStatus_t status)
22
+ {
23
+ if (status != miopenStatusSuccess) {
24
+ if (status == miopenStatusNotImplemented) {
25
+ throw miopen_exception(status, std::string(miopenGetErrorString(status)) +
26
+ ". This error may appear if you passed in a non-contiguous input.");
27
+ }
28
+ throw miopen_exception(status, miopenGetErrorString(status));
29
+ }
30
+ }
31
+
32
+ inline void HIP_CHECK(hipError_t error)
33
+ {
34
+ if (error != hipSuccess) {
35
+ std::string msg("HIP error: ");
36
+ msg += hipGetErrorString(error);
37
+ throw std::runtime_error(msg);
38
+ }
39
+ }
40
+
41
+ }} // namespace at::native