applied-ai-018 commited on
Commit
f20b778
·
verified ·
1 Parent(s): 083197a

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_common_vsx.h +246 -0
  2. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_complex_double_vsx.h +558 -0
  3. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_complex_float_vsx.h +628 -0
  4. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_double_vsx.h +422 -0
  5. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_int64_vsx.h +251 -0
  6. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/zarch/vec256_zarch.h +2797 -0
  7. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512.h +263 -0
  8. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_bfloat16.h +1232 -0
  9. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_complex_double.h +512 -0
  10. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_complex_float.h +1018 -0
  11. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_double.h +469 -0
  12. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_float.h +730 -0
  13. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_int.h +1448 -0
  14. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_qint.h +1338 -0
  15. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vml.h +171 -0
  16. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/hip/impl/HIPAllocatorMasqueradingAsCUDA.h +28 -0
  17. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h +18 -0
  18. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h +353 -0
  19. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h +130 -0
  20. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/miopen/Descriptors.h +146 -0
  21. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/miopen/Exceptions.h +41 -0
  22. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/miopen/Handle.h +9 -0
  23. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/miopen/Types.h +12 -0
  24. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/miopen/Utils.h +18 -0
  25. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/miopen/miopen-wrapper.h +3 -0
  26. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/mps/EmptyTensor.h +29 -0
  27. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/mps/IndexKernels.h +573 -0
  28. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/mps/MPSAllocator.h +401 -0
  29. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/mps/MPSAllocatorInterface.h +61 -0
  30. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/mps/MPSDevice.h +84 -0
  31. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/mps/MPSEvent.h +100 -0
  32. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/mps/MPSGeneratorImpl.h +52 -0
  33. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/mps/MPSGuardImpl.h +174 -0
  34. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/mps/MPSHooks.h +51 -0
  35. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/mps/MPSProfiler.h +393 -0
  36. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/mps/MPSStream.h +133 -0
  37. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/CuFFTUtils.h +73 -0
  38. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/ForeachMinMaxFunctors.cuh +22 -0
  39. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/MultiTensorApply.cuh +379 -0
  40. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/ROCmLoops.cuh +364 -0
  41. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/ReduceOps.h +20 -0
  42. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/TensorModeKernel.h +19 -0
  43. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/TensorTopK.h +14 -0
  44. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/UniqueCub.cuh +16 -0
  45. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/block_reduce.cuh +105 -0
  46. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/jit_utils.h +215 -0
  47. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/BinaryOps.h +8 -0
  48. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/QuantizedOps.h +258 -0
  49. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/RuyUtils.h +21 -0
  50. env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_coalesced_ops.h +50 -0
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_common_vsx.h ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cpu/vec/intrinsics.h>
4
+ #include <ATen/cpu/vec/vec_base.h>
5
+ #include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
6
+
7
+ // Note: header order is important here
8
+ #include <ATen/cpu/vec/vec256/vsx/vec256_double_vsx.h>
9
+ #include <ATen/cpu/vec/vec256/vsx/vec256_float_vsx.h>
10
+ #include <ATen/cpu/vec/vec256/vsx/vec256_int16_vsx.h>
11
+ #include <ATen/cpu/vec/vec256/vsx/vec256_int32_vsx.h>
12
+ #include <ATen/cpu/vec/vec256/vsx/vec256_int64_vsx.h>
13
+ #include <ATen/cpu/vec/vec256/vsx/vec256_qint32_vsx.h>
14
+ #include <ATen/cpu/vec/vec256/vsx/vec256_qint8_vsx.h>
15
+ #include <ATen/cpu/vec/vec256/vsx/vec256_quint8_vsx.h>
16
+
17
+ #include <ATen/cpu/vec/vec256/vsx/vec256_complex_float_vsx.h>
18
+ #include <ATen/cpu/vec/vec256/vsx/vec256_complex_double_vsx.h>
19
+
20
+ #include <ATen/cpu/vec/vec256/vsx/vec256_bfloat16_vsx.h>
21
+
22
+ namespace at {
23
+ namespace vec {
24
+
25
+ inline namespace CPU_CAPABILITY {
26
+
27
+ DEFINE_CLAMP_FUNCS(c10::quint8)
28
+ DEFINE_CLAMP_FUNCS(c10::qint8)
29
+ DEFINE_CLAMP_FUNCS(c10::qint32)
30
+ DEFINE_CLAMP_FUNCS(int16_t)
31
+ DEFINE_CLAMP_FUNCS(int32_t)
32
+ DEFINE_CLAMP_FUNCS(int64_t)
33
+ DEFINE_CLAMP_FUNCS(float)
34
+ DEFINE_CLAMP_FUNCS(double)
35
+
36
+ template <>
37
+ Vectorized<double> C10_ALWAYS_INLINE fmadd(
38
+ const Vectorized<double>& a,
39
+ const Vectorized<double>& b,
40
+ const Vectorized<double>& c) {
41
+ return Vectorized<double>{
42
+ vec_madd(a.vec0(), b.vec0(), c.vec0()),
43
+ vec_madd(a.vec1(), b.vec1(), c.vec1())};
44
+ }
45
+
46
+ template <>
47
+ Vectorized<int64_t> C10_ALWAYS_INLINE fmadd(
48
+ const Vectorized<int64_t>& a,
49
+ const Vectorized<int64_t>& b,
50
+ const Vectorized<int64_t>& c) {
51
+ return Vectorized<int64_t>{
52
+ a.vec0() * b.vec0() + c.vec0(), a.vec1() * b.vec1() + c.vec1()};
53
+ }
54
+ template <>
55
+ Vectorized<int32_t> C10_ALWAYS_INLINE fmadd(
56
+ const Vectorized<int32_t>& a,
57
+ const Vectorized<int32_t>& b,
58
+ const Vectorized<int32_t>& c) {
59
+ return Vectorized<int32_t>{
60
+ a.vec0() * b.vec0() + c.vec0(), a.vec1() * b.vec1() + c.vec1()};
61
+ }
62
+ template <>
63
+ Vectorized<int16_t> C10_ALWAYS_INLINE fmadd(
64
+ const Vectorized<int16_t>& a,
65
+ const Vectorized<int16_t>& b,
66
+ const Vectorized<int16_t>& c) {
67
+ return Vectorized<int16_t>{
68
+ a.vec0() * b.vec0() + c.vec0(), a.vec1() * b.vec1() + c.vec1()};
69
+ }
70
+
71
+ DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(float)
72
+ DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(double)
73
+ DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(int64_t)
74
+ DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(int32_t)
75
+ DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(int16_t)
76
+
77
+ template <>
78
+ Vectorized<int64_t> C10_ALWAYS_INLINE
79
+ convert_to_int_of_same_size<double>(const Vectorized<double>& src) {
80
+ return Vectorized<int64_t>{vec_signed(src.vec0()), vec_signed(src.vec1())};
81
+ }
82
+
83
+ template <>
84
+ Vectorized<int32_t> C10_ALWAYS_INLINE
85
+ convert_to_int_of_same_size<float>(
86
+ const Vectorized<float>& src) {
87
+ return Vectorized<int32_t>{vec_signed(src.vec0()), vec_signed(src.vec1())};
88
+ }
89
+
90
+ template <>
91
+ inline void convert(const int32_t* src, float* dst, int64_t n) {
92
+ // int32_t and float have same size
93
+ int64_t i;
94
+ for (i = 0; i <= (n - Vectorized<float>::size()); i += Vectorized<float>::size()) {
95
+ const int32_t* src_a = src + i;
96
+ float* dst_a = dst + i;
97
+ vint32 input_vec0 = vec_vsx_ld(offset0, reinterpret_cast<const vint32*>(src_a));
98
+ vint32 input_vec1 =
99
+ vec_vsx_ld(offset16, reinterpret_cast<const vint32*>(src_a));
100
+ vfloat32 c0 = vec_float(input_vec0);
101
+ vfloat32 c1 = vec_float(input_vec1);
102
+ vec_vsx_st(c0, offset0, dst_a);
103
+ vec_vsx_st(c1, offset16, dst_a);
104
+ }
105
+
106
+ for (; i < n; i++) {
107
+ dst[i] = static_cast<float>(src[i]);
108
+ }
109
+ }
110
+
111
+ template <>
112
+ inline void convert(const int64_t* src, double* dst, int64_t n) {
113
+ int64_t i;
114
+ for (i = 0; i <= (n - Vectorized<double>::size()); i += Vectorized<double>::size()) {
115
+ const int64_t* src_a = src + i;
116
+ double* dst_a = dst + i;
117
+ vint64 input_vec0 =
118
+ vec_vsx_ld(offset0, reinterpret_cast<const vint64*>(src_a));
119
+ vint64 input_vec1 =
120
+ vec_vsx_ld(offset16, reinterpret_cast<const vint64*>(src_a));
121
+ vfloat64 c0 = vec_double(input_vec0);
122
+ vfloat64 c1 = vec_double(input_vec1);
123
+ vec_vsx_st(c0, offset0, reinterpret_cast<double*>(dst_a));
124
+ vec_vsx_st(c1, offset16, reinterpret_cast<double*>(dst_a));
125
+ }
126
+ for (; i < n; i++) {
127
+ dst[i] = static_cast<double>(src[i]);
128
+ }
129
+ }
130
+ //Generic implementation to fix compiler error
131
+ //TO-DO : Add optimized version for ppc64
132
+ inline std::tuple<Vectorized<float>, Vectorized<float>> convert_half_float(
133
+ const Vectorized<Half>& a) {
134
+ constexpr int64_t K = Vectorized<Half>::size();
135
+ __at_align__ float arr[K];
136
+ __at_align__ Half arr2[K];
137
+ a.store(arr2);
138
+ convert(arr2, arr, K);
139
+ return std::make_tuple(
140
+ Vectorized<float>::loadu(arr),
141
+ Vectorized<float>::loadu(arr + Vectorized<float>::size()));
142
+ }
143
+
144
+ inline Vectorized<Half> convert_float_half(
145
+ const Vectorized<float>& a, const Vectorized<float>& b) {
146
+ constexpr int64_t K = Vectorized<Half>::size();
147
+ __at_align__ float arr[K];
148
+ __at_align__ Half arr2[K];
149
+ a.store(arr);
150
+ b.store(arr + Vectorized<float>::size());
151
+ convert(arr, arr2, K);
152
+ return Vectorized<Half>::loadu(arr2);
153
+ };
154
+
155
+ template <>
156
+ std::pair<Vectorized<double>, Vectorized<double>> inline interleave2<double>(
157
+ const Vectorized<double>& a,
158
+ const Vectorized<double>& b) {
159
+ // inputs:
160
+ // a = {a0, a1, a2, a3}
161
+ // b = {b0, b1, b2, b3}
162
+
163
+ vfloat64 ab00 = vec_xxpermdi(a.vec0(), b.vec0(), 0);
164
+ vfloat64 ab11 = vec_xxpermdi(a.vec0(), b.vec0(), 3);
165
+ vfloat64 ab2_00 = vec_xxpermdi(a.vec1(), b.vec1(), 0);
166
+ vfloat64 ab2_11 = vec_xxpermdi(a.vec1(), b.vec1(), 3);
167
+ // return {a0, b0, a1, b1}
168
+ // {a2, b2, a3, b3}
169
+ return std::make_pair(
170
+ Vectorized<double>{ab00, ab11}, Vectorized<double>{ab2_00, ab2_11});
171
+ }
172
+
173
+ template <>
174
+ std::pair<Vectorized<double>, Vectorized<double>> inline deinterleave2<double>(
175
+ const Vectorized<double>& a,
176
+ const Vectorized<double>& b) {
177
+ // inputs:
178
+ // a = {a0, b0, a1, b1}
179
+ // b = {a2, b2, a3, b3}
180
+ vfloat64 aa01 = vec_xxpermdi(a.vec0(), a.vec1(), 0);
181
+ vfloat64 aa23 = vec_xxpermdi(b.vec0(), b.vec1(), 0);
182
+
183
+ vfloat64 bb_01 = vec_xxpermdi(a.vec0(), a.vec1(), 3);
184
+ vfloat64 bb_23 = vec_xxpermdi(b.vec0(), b.vec1(), 3);
185
+
186
+ // swap lanes:
187
+ // return {a0, a1, a2, a3}
188
+ // {b0, b1, b2, b3}
189
+ return std::make_pair(
190
+ Vectorized<double>{aa01, aa23}, Vectorized<double>{bb_01, bb_23});
191
+ }
192
+
193
+ template <>
194
+ std::pair<Vectorized<float>, Vectorized<float>> inline interleave2<float>(
195
+ const Vectorized<float>& a,
196
+ const Vectorized<float>& b) {
197
+ // inputs:
198
+ // a = {a0, a1, a2, a3,, a4, a5, a6, a7}
199
+ // b = {b0, b1, b2, b3,, b4, b5, b6, b7}
200
+
201
+ vfloat32 ab0011 = vec_mergeh(a.vec0(), b.vec0());
202
+ vfloat32 ab2233 = vec_mergel(a.vec0(), b.vec0());
203
+
204
+ vfloat32 ab2_0011 = vec_mergeh(a.vec1(), b.vec1());
205
+ vfloat32 ab2_2233 = vec_mergel(a.vec1(), b.vec1());
206
+ // group cols crossing lanes:
207
+ // return {a0, b0, a1, b1,, a2, b2, a3, b3}
208
+ // {a4, b4, a5, b5,, a6, b6, a7, b7}
209
+
210
+ return std::make_pair(
211
+ Vectorized<float>{ab0011, ab2233}, Vectorized<float>{ab2_0011, ab2_2233});
212
+ }
213
+
214
+ template <>
215
+ std::pair<Vectorized<float>, Vectorized<float>> inline deinterleave2<float>(
216
+ const Vectorized<float>& a,
217
+ const Vectorized<float>& b) {
218
+ // inputs:
219
+ // a = {a0, b0, a1, b1,, a2, b2, a3, b3}
220
+ // b = {a4, b4, a5, b5,, a6, b6, a7, b7}
221
+
222
+ // {a0,a2,b0,b2} {a1,a3,b1,b3}
223
+ vfloat32 a0a2b0b2 = vec_mergeh(a.vec0(), a.vec1());
224
+ vfloat32 a1a3b1b3 = vec_mergel(a.vec0(), a.vec1());
225
+
226
+ vfloat32 aa0123 = vec_mergeh(a0a2b0b2, a1a3b1b3);
227
+ vfloat32 bb0123 = vec_mergel(a0a2b0b2, a1a3b1b3);
228
+
229
+ vfloat32 a0a2b0b2_2 = vec_mergeh(b.vec0(), b.vec1());
230
+ vfloat32 a1a3b1b3_2 = vec_mergel(b.vec0(), b.vec1());
231
+
232
+ vfloat32 aa0123_2 = vec_mergeh(a0a2b0b2_2, a1a3b1b3_2);
233
+ vfloat32 bb0123_2 = vec_mergel(a0a2b0b2_2, a1a3b1b3_2);
234
+
235
+ // it could be done with vec_perm ,too
236
+ // swap lanes:
237
+ // return {a0, a1, a2, a3,, a4, a5, a6, a7}
238
+ // {b0, b1, b2, b3,, b4, b5, b6, b7}
239
+
240
+ return std::make_pair(
241
+ Vectorized<float>{aa0123, aa0123_2}, Vectorized<float>{bb0123, bb0123_2});
242
+ }
243
+
244
+ } // namespace
245
+ } // namespace vec
246
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_complex_double_vsx.h ADDED
@@ -0,0 +1,558 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/cpu/vec/intrinsics.h>
3
+ #include <ATen/cpu/vec/vec_base.h>
4
+ #include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
5
+ #include <c10/util/complex.h>
6
+ #include <c10/util/irange.h>
7
+
8
+ namespace at {
9
+ namespace vec {
10
+ // See Note [CPU_CAPABILITY namespace]
11
+ inline namespace CPU_CAPABILITY {
12
+ using ComplexDbl = c10::complex<double>;
13
+
14
+ template <>
15
+ class Vectorized<ComplexDbl> {
16
+ union {
17
+ struct {
18
+ vfloat64 _vec0;
19
+ vfloat64 _vec1;
20
+ };
21
+ struct {
22
+ vbool64 _vecb0;
23
+ vbool64 _vecb1;
24
+ };
25
+
26
+ } __attribute__((__may_alias__));
27
+
28
+ public:
29
+ using value_type = ComplexDbl;
30
+ using vec_internal_type = vfloat64;
31
+ using vec_internal_mask_type = vbool64;
32
+ using size_type = int;
33
+ static constexpr size_type size() {
34
+ return 2;
35
+ }
36
+ Vectorized() {}
37
+ C10_ALWAYS_INLINE Vectorized(vfloat64 v) : _vec0{v}, _vec1{v} {}
38
+ C10_ALWAYS_INLINE Vectorized(vbool64 vmask) : _vecb0{vmask}, _vecb1{vmask} {}
39
+ C10_ALWAYS_INLINE Vectorized(vfloat64 v1, vfloat64 v2) : _vec0{v1}, _vec1{v2} {}
40
+ C10_ALWAYS_INLINE Vectorized(vbool64 v1, vbool64 v2) : _vecb0{v1}, _vecb1{v2} {}
41
+
42
+ Vectorized(ComplexDbl val) {
43
+ double real_value = val.real();
44
+ double imag_value = val.imag();
45
+ _vec0 = vfloat64{real_value, imag_value};
46
+ _vec1 = vfloat64{real_value, imag_value};
47
+ }
48
+ Vectorized(ComplexDbl val1, ComplexDbl val2) {
49
+ _vec0 = vfloat64{val1.real(), val1.imag()};
50
+ _vec1 = vfloat64{val2.real(), val2.imag()};
51
+ }
52
+
53
+ C10_ALWAYS_INLINE const vec_internal_type& vec0() const {
54
+ return _vec0;
55
+ }
56
+ C10_ALWAYS_INLINE const vec_internal_type& vec1() const {
57
+ return _vec1;
58
+ }
59
+
60
+ template <int64_t mask>
61
+ static std::enable_if_t<blendChoiceComplexDbl(mask) == 0, Vectorized<ComplexDbl>>
62
+ C10_ALWAYS_INLINE
63
+ blend(const Vectorized<ComplexDbl>& a, const Vectorized<ComplexDbl>& b) {
64
+ return a;
65
+ }
66
+
67
+ template <int64_t mask>
68
+ static std::enable_if_t<blendChoiceComplexDbl(mask) == 1, Vectorized<ComplexDbl>>
69
+ C10_ALWAYS_INLINE
70
+ blend(const Vectorized<ComplexDbl>& a, const Vectorized<ComplexDbl>& b) {
71
+ return b;
72
+ }
73
+
74
+ template <int64_t mask>
75
+ static std::enable_if_t<blendChoiceComplexDbl(mask) == 2, Vectorized<ComplexDbl>>
76
+ C10_ALWAYS_INLINE
77
+ blend(const Vectorized<ComplexDbl>& a, const Vectorized<ComplexDbl>& b) {
78
+ return {b._vec0, a._vec1};
79
+ }
80
+
81
+ template <int64_t mask>
82
+ static std::enable_if_t<blendChoiceComplexDbl(mask) == 3, Vectorized<ComplexDbl>>
83
+ C10_ALWAYS_INLINE
84
+ blend(const Vectorized<ComplexDbl>& a, const Vectorized<ComplexDbl>& b) {
85
+ return {a._vec0, b._vec1};
86
+ }
87
+
88
+ template <int64_t mask>
89
+ static Vectorized<ComplexDbl> C10_ALWAYS_INLINE
90
+ el_blend(const Vectorized<ComplexDbl>& a, const Vectorized<ComplexDbl>& b) {
91
+ const vbool64 mask_1st = VsxDblMask1(mask);
92
+ const vbool64 mask_2nd = VsxDblMask2(mask);
93
+ return {
94
+ (vfloat64)vec_sel(a._vec0, b._vec0, mask_1st),
95
+ (vfloat64)vec_sel(a._vec1, b._vec1, mask_2nd)};
96
+ }
97
+
98
+ static Vectorized<ComplexDbl> blendv(
99
+ const Vectorized<ComplexDbl>& a,
100
+ const Vectorized<ComplexDbl>& b,
101
+ const Vectorized<ComplexDbl>& mask) {
102
+ // convert std::complex<V> index mask to V index mask: xy -> xxyy
103
+ auto mask_complex =
104
+ Vectorized<ComplexDbl>(vec_splat(mask._vec0, 0), vec_splat(mask._vec1, 0));
105
+ return {
106
+ vec_sel(a._vec0, b._vec0, mask_complex._vecb0),
107
+ vec_sel(a._vec1, b._vec1, mask_complex._vecb1)};
108
+ }
109
+
110
+ static Vectorized<ComplexDbl> C10_ALWAYS_INLINE elwise_blendv(
111
+ const Vectorized<ComplexDbl>& a,
112
+ const Vectorized<ComplexDbl>& b,
113
+ const Vectorized<ComplexDbl>& mask) {
114
+ return {
115
+ vec_sel(a._vec0, b._vec0, mask._vecb0),
116
+ vec_sel(a._vec1, b._vec1, mask._vecb1)};
117
+ }
118
+ template <typename step_t>
119
+ static Vectorized<ComplexDbl> arange(
120
+ ComplexDbl base = 0.,
121
+ step_t step = static_cast<step_t>(1)) {
122
+ return Vectorized<ComplexDbl>(base, base + step);
123
+ }
124
+ static Vectorized<ComplexDbl> set(
125
+ const Vectorized<ComplexDbl>& a,
126
+ const Vectorized<ComplexDbl>& b,
127
+ int64_t count = size()) {
128
+ switch (count) {
129
+ case 0:
130
+ return a;
131
+ case 1:
132
+ return blend<1>(a, b);
133
+ }
134
+ return b;
135
+ }
136
+
137
+ static Vectorized<value_type> C10_ALWAYS_INLINE
138
+ loadu(const void* ptr, int count = size()) {
139
+ if (count == size()) {
140
+ return {
141
+ vec_vsx_ld(offset0, reinterpret_cast<const double*>(ptr)),
142
+ vec_vsx_ld(offset16, reinterpret_cast<const double*>(ptr))};
143
+ }
144
+
145
+ __at_align__ value_type tmp_values[size()] = {};
146
+ std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type));
147
+
148
+ return {
149
+ vec_vsx_ld(offset0, reinterpret_cast<const double*>(tmp_values)),
150
+ vec_vsx_ld(offset16, reinterpret_cast<const double*>(tmp_values))};
151
+ }
152
+ void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const {
153
+ if (count == size()) {
154
+ vec_vsx_st(_vec0, offset0, reinterpret_cast<double*>(ptr));
155
+ vec_vsx_st(_vec1, offset16, reinterpret_cast<double*>(ptr));
156
+ } else if (count > 0) {
157
+ __at_align__ value_type tmp_values[size()];
158
+ vec_vsx_st(_vec0, offset0, reinterpret_cast<double*>(tmp_values));
159
+ vec_vsx_st(_vec1, offset16, reinterpret_cast<double*>(tmp_values));
160
+ std::memcpy(
161
+ ptr, tmp_values, std::min(count, size()) * sizeof(value_type));
162
+ }
163
+ }
164
+
165
+ const ComplexDbl& operator[](int idx) const = delete;
166
+ ComplexDbl& operator[](int idx) = delete;
167
+
168
+ Vectorized<ComplexDbl> map(ComplexDbl (*const f)(ComplexDbl)) const {
169
+ __at_align__ ComplexDbl tmp[size()];
170
+ store(tmp);
171
+ for (const auto i : c10::irange(size())) {
172
+ tmp[i] = f(tmp[i]);
173
+ }
174
+ return loadu(tmp);
175
+ }
176
+
177
+ Vectorized<ComplexDbl> map(ComplexDbl (*const f)(const ComplexDbl&)) const {
178
+ __at_align__ ComplexDbl tmp[size()];
179
+ store(tmp);
180
+ for (const auto i : c10::irange(size())) {
181
+ tmp[i] = f(tmp[i]);
182
+ }
183
+ return loadu(tmp);
184
+ }
185
+
186
+ Vectorized<ComplexDbl> el_swapped() const {
187
+ vfloat64 v0 = vec_xxpermdi(_vec0, _vec0, 2);
188
+ vfloat64 v1 = vec_xxpermdi(_vec1, _vec1, 2);
189
+ return {v0, v1};
190
+ }
191
+
192
+ Vectorized<ComplexDbl> el_madd(
193
+ const Vectorized<ComplexDbl>& multiplier,
194
+ const Vectorized<ComplexDbl>& val) const {
195
+ return {
196
+ vec_madd(_vec0, multiplier._vec0, val._vec0),
197
+ vec_madd(_vec1, multiplier._vec1, val._vec1)};
198
+ }
199
+
200
+ Vectorized<ComplexDbl> el_mergeo() const {
201
+ vfloat64 v0 = vec_splat(_vec0, 1);
202
+ vfloat64 v1 = vec_splat(_vec1, 1);
203
+ return {v0, v1};
204
+ }
205
+
206
+ Vectorized<ComplexDbl> el_mergee() const {
207
+ vfloat64 v0 = vec_splat(_vec0, 0);
208
+ vfloat64 v1 = vec_splat(_vec1, 0);
209
+ return {v0, v1};
210
+ }
211
+
212
+ static Vectorized<ComplexDbl> el_mergee(
213
+ Vectorized<ComplexDbl>& first,
214
+ Vectorized<ComplexDbl>& second) {
215
+ // as mergee phased in , we can use vec_perm with mask
216
+ return {
217
+ vec_mergeh(first._vec0, second._vec0),
218
+ vec_mergeh(first._vec1, second._vec1)};
219
+ }
220
+
221
+ Vectorized<ComplexDbl> abs_2_() const {
222
+ auto a = (*this).elwise_mult(*this);
223
+ auto permuted = a.el_swapped();
224
+ a = a + permuted;
225
+ return a;
226
+ }
227
+
228
+ Vectorized<ComplexDbl> abs_() const {
229
+ auto ret = abs_2_();
230
+ return ret.elwise_sqrt();
231
+ }
232
+
233
+ Vectorized<ComplexDbl> abs() const {
234
+ return abs_() & vd_real_mask;
235
+ }
236
+
237
+ Vectorized<ComplexDbl> angle_() const {
238
+ // angle = atan2(b/a)
239
+ // auto b_a = _mm256_permute_pd(values, 0x05); // b a
240
+ // return Sleef_atan2d4_u10(values, b_a); // 90-angle angle
241
+ Vectorized<ComplexDbl> ret;
242
+ ret._vec0[0] = std::atan2(_vec0[1], _vec0[0]);
243
+ ret._vec1[0] = std::atan2(_vec1[1], _vec1[0]);
244
+ return ret;
245
+ }
246
+
247
+ Vectorized<ComplexDbl> angle() const {
248
+ return angle_() & vd_real_mask;
249
+ }
250
+
251
+ Vectorized<ComplexDbl> real_() const {
252
+ return *this & vd_real_mask;
253
+ }
254
+ Vectorized<ComplexDbl> real() const {
255
+ return *this & vd_real_mask;
256
+ }
257
+ Vectorized<ComplexDbl> imag_() const {
258
+ return *this & vd_imag_mask;
259
+ }
260
+ Vectorized<ComplexDbl> imag() const {
261
+ return imag_().el_swapped();
262
+ }
263
+
264
+ Vectorized<ComplexDbl> conj_() const {
265
+ return *this ^ vd_isign_mask;
266
+ }
267
+ Vectorized<ComplexDbl> conj() const {
268
+ return *this ^ vd_isign_mask;
269
+ }
270
+
271
+ Vectorized<ComplexDbl> log() const {
272
+ // Most trigonomic ops use the log() op to improve complex number
273
+ // performance.
274
+ return map(std::log);
275
+ }
276
+
277
+ Vectorized<ComplexDbl> log2() const {
278
+ // log2eB_inv
279
+ auto ret = log();
280
+ return ret.elwise_mult(vd_log2e_inv);
281
+ }
282
+ Vectorized<ComplexDbl> log10() const {
283
+ auto ret = log();
284
+ return ret.elwise_mult(vd_log10e_inv);
285
+ }
286
+
287
+ Vectorized<ComplexDbl> log1p() const {
288
+ return map(std::log1p);
289
+ }
290
+
291
+ Vectorized<ComplexDbl> asin() const {
292
+ // asin(x)
293
+ // = -i*ln(iz + sqrt(1 -z^2))
294
+ // = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi)))
295
+ // = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi))
296
+ auto conj = conj_();
297
+ auto b_a = conj.el_swapped();
298
+ auto ab = conj.elwise_mult(b_a);
299
+ auto im = ab + ab;
300
+ auto val_2 = (*this).elwise_mult(*this);
301
+ auto val_2_swapped = val_2.el_swapped();
302
+ auto re = horizontal_sub(val_2, val_2_swapped);
303
+ re = Vectorized<ComplexDbl>(vd_one) - re;
304
+ auto root = el_blend<0x0A>(re, im).sqrt();
305
+ auto ln = (b_a + root).log();
306
+ return ln.el_swapped().conj();
307
+ }
308
+
309
+ Vectorized<ComplexDbl> acos() const {
310
+ // acos(x) = pi/2 - asin(x)
311
+ return Vectorized(vd_pi_2) - asin();
312
+ }
313
+
314
+ Vectorized<ComplexDbl> atan() const {
315
+ // atan(x) = i/2 * ln((i + z)/(i - z))
316
+ auto ione = Vectorized(vd_imag_one);
317
+ auto sum = ione + *this;
318
+ auto sub = ione - *this;
319
+ auto ln = (sum / sub).log(); // ln((i + z)/(i - z))
320
+ return ln * vd_imag_half; // i/2*ln()
321
+ }
322
+ Vectorized<ComplexDbl> atanh() const {
323
+ return map(std::atanh);
324
+ }
325
+
326
+ Vectorized<ComplexDbl> sin() const {
327
+ return map(std::sin);
328
+ }
329
+ Vectorized<ComplexDbl> sinh() const {
330
+ return map(std::sinh);
331
+ }
332
+ Vectorized<ComplexDbl> cos() const {
333
+ return map(std::cos);
334
+ }
335
+ Vectorized<ComplexDbl> cosh() const {
336
+ return map(std::cosh);
337
+ }
338
+
339
+ Vectorized<ComplexDbl> tan() const {
340
+ return map(std::tan);
341
+ }
342
+ Vectorized<ComplexDbl> tanh() const {
343
+ return map(std::tanh);
344
+ }
345
+ Vectorized<ComplexDbl> ceil() const {
346
+ return {vec_ceil(_vec0), vec_ceil(_vec1)};
347
+ }
348
+ Vectorized<ComplexDbl> floor() const {
349
+ return {vec_floor(_vec0), vec_floor(_vec1)};
350
+ }
351
+ Vectorized<ComplexDbl> neg() const {
352
+ auto z = Vectorized<ComplexDbl>(vd_zero);
353
+ return z - *this;
354
+ }
355
+ Vectorized<ComplexDbl> round() const {
356
+ return {vec_rint(_vec0), vec_rint(_vec1)};
357
+ }
358
+
359
+ Vectorized<ComplexDbl> trunc() const {
360
+ return {vec_trunc(_vec0), vec_trunc(_vec1)};
361
+ }
362
+
363
+ Vectorized<ComplexDbl> elwise_sqrt() const {
364
+ return {vec_sqrt(_vec0), vec_sqrt(_vec1)};
365
+ }
366
+
367
+ Vectorized<ComplexDbl> sqrt() const {
368
+ return map(std::sqrt);
369
+ }
370
+
371
+ Vectorized<ComplexDbl> reciprocal() const {
372
+ // re + im*i = (a + bi) / (c + di)
373
+ // re = (ac + bd)/abs_2() = c/abs_2()
374
+ // im = (bc - ad)/abs_2() = d/abs_2()
375
+ auto c_d = *this ^ vd_isign_mask; // c -d
376
+ auto abs = abs_2_();
377
+ return c_d.elwise_div(abs);
378
+ }
379
+
380
+ Vectorized<ComplexDbl> rsqrt() const {
381
+ return sqrt().reciprocal();
382
+ }
383
+
384
+ static Vectorized<ComplexDbl> horizontal_add(
385
+ Vectorized<ComplexDbl>& first,
386
+ Vectorized<ComplexDbl>& second) {
387
+ auto first_perm = first.el_swapped(); // 2perm
388
+ auto second_perm = second.el_swapped(); // 2perm
389
+ // summ
390
+ auto first_ret = first + first_perm; // 2add
391
+ auto second_ret = second + second_perm; // 2 add
392
+ // now lets choose evens
393
+ return el_mergee(first_ret, second_ret); // 2 mergee's
394
+ }
395
+
396
+ static Vectorized<ComplexDbl> horizontal_sub(
397
+ Vectorized<ComplexDbl>& first,
398
+ Vectorized<ComplexDbl>& second) {
399
+ // we will simulate it differently with 6 instructions total
400
+ // lets permute second so that we can add it getting horizontal sums
401
+ auto first_perm = first.el_swapped(); // 2perm
402
+ auto second_perm = second.el_swapped(); // 2perm
403
+ // summ
404
+ auto first_ret = first - first_perm; // 2sub
405
+ auto second_ret = second - second_perm; // 2 sub
406
+ // now lets choose evens
407
+ return el_mergee(first_ret, second_ret); // 2 mergee's
408
+ }
409
+
410
+ Vectorized<ComplexDbl> inline operator*(const Vectorized<ComplexDbl>& b) const {
411
+ //(a + bi) * (c + di) = (ac - bd) + (ad + bc)i
412
+ #if 1
413
+ // this is more vsx friendly than simulating horizontal from x86
414
+ auto vi = b.el_mergeo();
415
+ auto vr = b.el_mergee();
416
+ vi = vi ^ vd_rsign_mask;
417
+ auto ret = elwise_mult(vr);
418
+ auto vx_swapped = el_swapped();
419
+ ret = vx_swapped.el_madd(vi, ret);
420
+ #else
421
+ auto ac_bd = elwise_mult(b);
422
+ auto d_c = b.el_swapped();
423
+ d_c = d_c ^ vd_isign_mask;
424
+ auto ad_bc = elwise_mult(d_c);
425
+ auto ret = horizontal_sub(ac_bd, ad_bc);
426
+ #endif
427
+ return ret;
428
+ }
429
+
430
+ Vectorized<ComplexDbl> inline operator/(const Vectorized<ComplexDbl>& b) const {
431
+ // re + im*i = (a + bi) / (c + di)
432
+ // re = (ac + bd)/abs_2()
433
+ // im = (bc - ad)/abs_2()
434
+ #if 1
435
+ auto vi = b.el_mergeo();
436
+ auto vr = b.el_mergee();
437
+ auto abs_b = b.abs_2_();
438
+ vi = vi ^ vd_isign_mask;
439
+ auto ret = elwise_mult(vr);
440
+ auto vx_swapped = el_swapped();
441
+ ret = vx_swapped.el_madd(vi, ret);
442
+ ret = ret.elwise_div(abs_b);
443
+ #else
444
+ // Vectorized x86 simulation
445
+ auto ac_bd = elwise_mult(b);
446
+ auto d_c = b.el_swapped();
447
+ d_c = d_c ^ vd_rsign_mask;
448
+ auto ad_bc = elwise_mult(d_c);
449
+ auto abs_b = b.abs_2_();
450
+ auto re_im = horizontal_add(ac_bd, ad_bc);
451
+ auto ret = re_im.elwise_div(abs_b);
452
+ #endif
453
+ return ret;
454
+ }
455
+
456
+ Vectorized<ComplexDbl> exp() const {
457
+ return map(std::exp);
458
+ }
459
+ Vectorized<ComplexDbl> exp2() const {
460
+ return map(exp2_impl);
461
+ }
462
+ Vectorized<ComplexDbl> expm1() const {
463
+ return map(std::expm1);
464
+ }
465
+
466
+ Vectorized<ComplexDbl> pow(const Vectorized<ComplexDbl>& exp) const {
467
+ __at_align__ ComplexDbl x_tmp[size()];
468
+ __at_align__ ComplexDbl y_tmp[size()];
469
+ store(x_tmp);
470
+ exp.store(y_tmp);
471
+ for (const auto i : c10::irange(size())) {
472
+ x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]);
473
+ }
474
+ return loadu(x_tmp);
475
+ }
476
+
477
+ Vectorized<ComplexDbl> sgn() const {
478
+ return map(at::native::sgn_impl);
479
+ }
480
+
481
+ Vectorized<ComplexDbl> operator<(const Vectorized<ComplexDbl>& other) const {
482
+ TORCH_CHECK(false, "not supported for complex numbers");
483
+ }
484
+ Vectorized<ComplexDbl> operator<=(const Vectorized<ComplexDbl>& other) const {
485
+ TORCH_CHECK(false, "not supported for complex numbers");
486
+ }
487
+ Vectorized<ComplexDbl> operator>(const Vectorized<ComplexDbl>& other) const {
488
+ TORCH_CHECK(false, "not supported for complex numbers");
489
+ }
490
+ Vectorized<ComplexDbl> operator>=(const Vectorized<ComplexDbl>& other) const {
491
+ TORCH_CHECK(false, "not supported for complex numbers");
492
+ }
493
+
494
+ Vectorized<ComplexDbl> eq(const Vectorized<ComplexDbl>& other) const {
495
+ auto eq = (*this == other); // compares real and imag individually
496
+ // If both real numbers and imag numbers are equal, then the complex numbers are equal
497
+ return (eq.real() & eq.imag()) & vd_one;
498
+ }
499
+ Vectorized<ComplexDbl> ne(const Vectorized<ComplexDbl>& other) const {
500
+ auto ne = (*this != other); // compares real and imag individually
501
+ // If either real numbers or imag numbers are not equal, then the complex numbers are not equal
502
+ return (ne.real() | ne.imag()) & vd_one;
503
+ }
504
+
505
+ DEFINE_MEMBER_OP(operator==, ComplexDbl, vec_cmpeq)
506
+ DEFINE_MEMBER_OP(operator!=, ComplexDbl, vec_cmpne)
507
+
508
+ DEFINE_MEMBER_OP(operator+, ComplexDbl, vec_add)
509
+ DEFINE_MEMBER_OP(operator-, ComplexDbl, vec_sub)
510
+ DEFINE_MEMBER_OP(operator&, ComplexDbl, vec_and)
511
+ DEFINE_MEMBER_OP(operator|, ComplexDbl, vec_or)
512
+ DEFINE_MEMBER_OP(operator^, ComplexDbl, vec_xor)
513
+ // elelemtwise helpers
514
+ DEFINE_MEMBER_OP(elwise_mult, ComplexDbl, vec_mul)
515
+ DEFINE_MEMBER_OP(elwise_div, ComplexDbl, vec_div)
516
+ DEFINE_MEMBER_OP(elwise_gt, ComplexDbl, vec_cmpgt)
517
+ DEFINE_MEMBER_OP(elwise_ge, ComplexDbl, vec_cmpge)
518
+ DEFINE_MEMBER_OP(elwise_lt, ComplexDbl, vec_cmplt)
519
+ DEFINE_MEMBER_OP(elwise_le, ComplexDbl, vec_cmple)
520
+ };
521
+
522
+ template <>
523
+ Vectorized<ComplexDbl> inline maximum(
524
+ const Vectorized<ComplexDbl>& a,
525
+ const Vectorized<ComplexDbl>& b) {
526
+ auto abs_a = a.abs_2_();
527
+ auto abs_b = b.abs_2_();
528
+ // auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_LT_OQ);
529
+ // auto max = _mm256_blendv_ps(a, b, mask);
530
+ auto mask = abs_a.elwise_lt(abs_b);
531
+ auto max = Vectorized<ComplexDbl>::elwise_blendv(a, b, mask);
532
+
533
+ return max;
534
+ // Exploit the fact that all-ones is a NaN.
535
+ // auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q);
536
+ // return _mm256_or_ps(max, isnan);
537
+ }
538
+
539
+ template <>
540
+ Vectorized<ComplexDbl> inline minimum(
541
+ const Vectorized<ComplexDbl>& a,
542
+ const Vectorized<ComplexDbl>& b) {
543
+ auto abs_a = a.abs_2_();
544
+ auto abs_b = b.abs_2_();
545
+ // auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_GT_OQ);
546
+ // auto min = _mm256_blendv_ps(a, b, mask);
547
+ auto mask = abs_a.elwise_gt(abs_b);
548
+ auto min = Vectorized<ComplexDbl>::elwise_blendv(a, b, mask);
549
+ return min;
550
+ // Exploit the fact that all-ones is a NaN.
551
+ // auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q);
552
+ // return _mm256_or_ps(min, isnan);
553
+ }
554
+
555
+
556
+ } // namespace
557
+ } // namespace vec
558
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_complex_float_vsx.h ADDED
@@ -0,0 +1,628 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ #pragma once
3
+ #include <ATen/cpu/vec/intrinsics.h>
4
+ #include <ATen/cpu/vec/vec_base.h>
5
+ #include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
6
+ #include <c10/util/complex.h>
7
+ #include <c10/util/irange.h>
8
+
9
+ namespace at {
10
+ namespace vec {
11
+ // See Note [CPU_CAPABILITY namespace]
12
+ inline namespace CPU_CAPABILITY {
13
+ using ComplexFlt = c10::complex<float>;
14
+
15
+ template <>
16
+ class Vectorized<ComplexFlt> {
17
+ private:
18
+ union {
19
+ struct {
20
+ vfloat32 _vec0;
21
+ vfloat32 _vec1;
22
+ };
23
+ struct {
24
+ vbool32 _vecb0;
25
+ vbool32 _vecb1;
26
+ };
27
+
28
+ } __attribute__((__may_alias__));
29
+
30
+ public:
31
+ using value_type = ComplexFlt;
32
+ using vec_internal_type = vfloat32;
33
+ using vec_internal_mask_type = vbool32;
34
+ using size_type = int;
35
+
36
+ static constexpr size_type size() {
37
+ return 4;
38
+ }
39
+ Vectorized() {}
40
+
41
+ C10_ALWAYS_INLINE Vectorized(vfloat32 v) : _vec0{v}, _vec1{v} {}
42
+ C10_ALWAYS_INLINE Vectorized(vbool32 vmask) : _vecb0{vmask}, _vecb1{vmask} {}
43
+ C10_ALWAYS_INLINE Vectorized(vfloat32 v1, vfloat32 v2) : _vec0{v1}, _vec1{v2} {}
44
+ C10_ALWAYS_INLINE Vectorized(vbool32 v1, vbool32 v2) : _vecb0{v1}, _vecb1{v2} {}
45
+
46
+ Vectorized(ComplexFlt val) {
47
+ float real_value = val.real();
48
+ float imag_value = val.imag();
49
+ _vec0 = vfloat32{real_value, imag_value, real_value, imag_value};
50
+ _vec1 = vfloat32{real_value, imag_value, real_value, imag_value};
51
+ }
52
+
53
+ Vectorized(ComplexFlt val1, ComplexFlt val2, ComplexFlt val3, ComplexFlt val4) {
54
+ _vec0 = vfloat32{val1.real(), val1.imag(), val2.real(), val2.imag()};
55
+ _vec1 = vfloat32{val3.real(), val3.imag(), val4.real(), val4.imag()};
56
+ }
57
+
58
+ template <uint64_t mask>
59
+ static std::enable_if_t<blendChoiceComplex(mask) == 0, Vectorized<ComplexFlt>>
60
+ C10_ALWAYS_INLINE
61
+ blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) {
62
+ return a;
63
+ }
64
+
65
+ template <uint64_t mask>
66
+ static std::enable_if_t<blendChoiceComplex(mask) == 1, Vectorized<ComplexFlt>>
67
+ C10_ALWAYS_INLINE
68
+ blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) {
69
+ return b;
70
+ }
71
+
72
+ template <uint64_t mask>
73
+ static std::enable_if_t<blendChoiceComplex(mask) == 2, Vectorized<ComplexFlt>>
74
+ C10_ALWAYS_INLINE
75
+ blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) {
76
+ return {b._vec0, a._vec1};
77
+ }
78
+
79
+ template <uint64_t mask>
80
+ static std::enable_if_t<blendChoiceComplex(mask) == 3, Vectorized<ComplexFlt>>
81
+ C10_ALWAYS_INLINE
82
+ blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) {
83
+ return {a._vec0, b._vec1};
84
+ }
85
+
86
+ template <uint64_t mask>
87
+ static std::enable_if_t<blendChoiceComplex(mask) == 4, Vectorized<ComplexFlt>>
88
+ C10_ALWAYS_INLINE
89
+ blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) {
90
+ const vbool32 mask_1st = VsxComplexMask1(mask);
91
+ return {(vfloat32)vec_sel(a._vec0, b._vec0, mask_1st), a._vec1};
92
+ }
93
+
94
+ template <uint64_t mask>
95
+ static std::enable_if_t<blendChoiceComplex(mask) == 5, Vectorized<ComplexFlt>>
96
+ C10_ALWAYS_INLINE
97
+ blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) {
98
+ const vbool32 mask_1st = VsxComplexMask1(mask);
99
+ return {(vfloat32)vec_sel(a._vec0, b._vec0, mask_1st), b._vec1};
100
+ }
101
+
102
+ template <uint64_t mask>
103
+ static std::enable_if_t<blendChoiceComplex(mask) == 6, Vectorized<ComplexFlt>>
104
+ C10_ALWAYS_INLINE
105
+ blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) {
106
+ const vbool32 mask_2nd = VsxComplexMask2(mask);
107
+ // generated masks
108
+ return {a._vec0, (vfloat32)vec_sel(a._vec1, b._vec1, mask_2nd)};
109
+ }
110
+
111
+ template <uint64_t mask>
112
+ static std::enable_if_t<blendChoiceComplex(mask) == 7, Vectorized<ComplexFlt>>
113
+ C10_ALWAYS_INLINE
114
+ blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) {
115
+ const vbool32 mask_2nd = VsxComplexMask2(mask);
116
+ // generated masks
117
+ return {b._vec0, (vfloat32)vec_sel(a._vec1, b._vec1, mask_2nd)};
118
+ }
119
+
120
+ template <uint64_t mask>
121
+ static std::enable_if_t<blendChoiceComplex(mask) == 8, Vectorized<ComplexFlt>>
122
+ C10_ALWAYS_INLINE
123
+ blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) {
124
+ const vbool32 mask_1st = VsxComplexMask1(mask);
125
+ const vbool32 mask_2nd = VsxComplexMask2(mask);
126
+ return {
127
+ (vfloat32)vec_sel(a._vec0, b._vec0, mask_1st),
128
+ (vfloat32)vec_sel(a._vec1, b._vec1, mask_2nd)};
129
+ }
130
+
131
+ template <int64_t mask>
132
+ static Vectorized<ComplexFlt> C10_ALWAYS_INLINE
133
+ el_blend(const Vectorized<ComplexFlt>& a, const Vectorized<ComplexFlt>& b) {
134
+ const vbool32 mask_1st = VsxMask1(mask);
135
+ const vbool32 mask_2nd = VsxMask2(mask);
136
+ return {
137
+ (vfloat32)vec_sel(a._vec0, b._vec0, mask_1st),
138
+ (vfloat32)vec_sel(a._vec1, b._vec1, mask_2nd)};
139
+ }
140
+
141
+ static Vectorized<ComplexFlt> blendv(
142
+ const Vectorized<ComplexFlt>& a,
143
+ const Vectorized<ComplexFlt>& b,
144
+ const Vectorized<ComplexFlt>& mask) {
145
+ // convert std::complex<V> index mask to V index mask: xy -> xxyy
146
+ auto mask_complex = Vectorized<ComplexFlt>(
147
+ vec_mergeh(mask._vec0, mask._vec0), vec_mergeh(mask._vec1, mask._vec1));
148
+ return {
149
+ vec_sel(a._vec0, b._vec0, reinterpret_cast<vbool32>(mask_complex._vec0)),
150
+ vec_sel(a._vec1, b._vec1, reinterpret_cast<vbool32>(mask_complex._vec1)),
151
+ };
152
+ }
153
+
154
+ static Vectorized<ComplexFlt> elwise_blendv(
155
+ const Vectorized<ComplexFlt>& a,
156
+ const Vectorized<ComplexFlt>& b,
157
+ const Vectorized<ComplexFlt>& mask) {
158
+ return {
159
+ vec_sel(a._vec0, b._vec0, reinterpret_cast<vbool32>(mask._vec0)),
160
+ vec_sel(a._vec1, b._vec1, reinterpret_cast<vbool32>(mask._vec1)),
161
+ };
162
+ }
163
+
164
+ template <typename step_t>
165
+ static Vectorized<ComplexFlt> arange(
166
+ ComplexFlt base = 0.,
167
+ step_t step = static_cast<step_t>(1)) {
168
+ return Vectorized<ComplexFlt>(
169
+ base,
170
+ base + step,
171
+ base + ComplexFlt(2) * step,
172
+ base + ComplexFlt(3) * step);
173
+ }
174
+ static Vectorized<ComplexFlt> set(
175
+ const Vectorized<ComplexFlt>& a,
176
+ const Vectorized<ComplexFlt>& b,
177
+ int64_t count = size()) {
178
+ switch (count) {
179
+ case 0:
180
+ return a;
181
+ case 1:
182
+ return blend<1>(a, b);
183
+ case 2:
184
+ return blend<3>(a, b);
185
+ case 3:
186
+ return blend<7>(a, b);
187
+ }
188
+ return b;
189
+ }
190
+
191
+ static Vectorized<value_type> C10_ALWAYS_INLINE
192
+ loadu(const void* ptr, int count = size()) {
193
+ if (count == size()) {
194
+ return {
195
+ vec_vsx_ld(offset0, reinterpret_cast<const float*>(ptr)),
196
+ vec_vsx_ld(offset16, reinterpret_cast<const float*>(ptr))};
197
+ }
198
+
199
+ __at_align__ value_type tmp_values[size()] = {};
200
+ std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type));
201
+
202
+ return {
203
+ vec_vsx_ld(offset0, reinterpret_cast<const float*>(tmp_values)),
204
+ vec_vsx_ld(offset16, reinterpret_cast<const float*>(tmp_values))};
205
+ }
206
+
207
+ void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const {
208
+ if (count == size()) {
209
+ vec_vsx_st(_vec0, offset0, reinterpret_cast<float*>(ptr));
210
+ vec_vsx_st(_vec1, offset16, reinterpret_cast<float*>(ptr));
211
+ } else if (count > 0) {
212
+ __at_align__ value_type tmp_values[size()];
213
+ vec_vsx_st(_vec0, offset0, reinterpret_cast<float*>(tmp_values));
214
+ vec_vsx_st(_vec1, offset16, reinterpret_cast<float*>(tmp_values));
215
+ std::memcpy(
216
+ ptr, tmp_values, std::min(count, size()) * sizeof(value_type));
217
+ }
218
+ }
219
+
220
+ const ComplexFlt& operator[](int idx) const = delete;
221
+ ComplexFlt& operator[](int idx) = delete;
222
+
223
+ Vectorized<ComplexFlt> map(ComplexFlt (*const f)(ComplexFlt)) const {
224
+ __at_align__ ComplexFlt tmp[size()];
225
+ store(tmp);
226
+ for (const auto i : c10::irange(size())) {
227
+ tmp[i] = f(tmp[i]);
228
+ }
229
+ return loadu(tmp);
230
+ }
231
+
232
+ Vectorized<ComplexFlt> map(ComplexFlt (*const f)(const ComplexFlt&)) const {
233
+ __at_align__ ComplexFlt tmp[size()];
234
+ store(tmp);
235
+ for (const auto i : c10::irange(size())) {
236
+ tmp[i] = f(tmp[i]);
237
+ }
238
+ return loadu(tmp);
239
+ }
240
+
241
+ static Vectorized<ComplexFlt> horizontal_add_permD8(
242
+ Vectorized<ComplexFlt>& first,
243
+ Vectorized<ComplexFlt>& second) {
244
+ // we will simulate it differently with 6 instructions total
245
+ // lets permute second so that we can add it getting horizontal sums
246
+ auto first_perm = first.el_swapped(); // 2perm
247
+ auto second_perm = second.el_swapped(); // 2perm
248
+ // sum
249
+ auto first_ret = first + first_perm; // 2add
250
+ auto second_ret = second + second_perm; // 2 add
251
+ // now lets choose evens
252
+ return el_mergee(first_ret, second_ret); // 2 mergee's
253
+ }
254
+
255
+ static Vectorized<ComplexFlt> horizontal_sub_permD8(
256
+ Vectorized<ComplexFlt>& first,
257
+ Vectorized<ComplexFlt>& second) {
258
+ // we will simulate it differently with 6 instructions total
259
+ // lets permute second so that we can add it getting horizontal sums
260
+ auto first_perm = first.el_swapped(); // 2perm
261
+ auto second_perm = second.el_swapped(); // 2perm
262
+ // sum
263
+ auto first_ret = first - first_perm; // 2sub
264
+ auto second_ret = second - second_perm; // 2 sub
265
+ // now lets choose evens
266
+ return el_mergee(first_ret, second_ret); // 2 mergee's
267
+ }
268
+
269
+ Vectorized<ComplexFlt> abs_2_() const {
270
+ auto a = (*this).elwise_mult(*this);
271
+ auto permuted = a.el_swapped();
272
+ a = a + permuted;
273
+ return a.el_mergee();
274
+ }
275
+
276
+ Vectorized<ComplexFlt> abs_() const {
277
+ auto ret = abs_2_();
278
+ return ret.elwise_sqrt();
279
+ }
280
+
281
+ Vectorized<ComplexFlt> abs() const {
282
+ return abs_() & real_mask;
283
+ }
284
+
285
+ Vectorized<ComplexFlt> real_() const {
286
+ return *this & real_mask;
287
+ }
288
+ Vectorized<ComplexFlt> real() const {
289
+ return *this & real_mask;
290
+ }
291
+ Vectorized<ComplexFlt> imag_() const {
292
+ return *this & imag_mask;
293
+ }
294
+ Vectorized<ComplexFlt> imag() const {
295
+ // we can use swap_mask or sldwi
296
+ auto ret = imag_();
297
+ return {
298
+ vec_sldw(ret._vec0, ret._vec0, 3), vec_sldw(ret._vec1, ret._vec1, 3)};
299
+ }
300
+
301
+ Vectorized<ComplexFlt> conj_() const {
302
+ return *this ^ isign_mask;
303
+ }
304
+ Vectorized<ComplexFlt> conj() const {
305
+ return *this ^ isign_mask;
306
+ }
307
+
308
+ Vectorized<ComplexFlt> log() const {
309
+ // Most trigonomic ops use the log() op to improve complex number
310
+ // performance.
311
+ return map(std::log);
312
+ }
313
+
314
+ Vectorized<ComplexFlt> log2() const {
315
+ // log2eB_inv
316
+ auto ret = log();
317
+ return ret.elwise_mult(log2e_inv);
318
+ }
319
+ Vectorized<ComplexFlt> log10() const {
320
+ auto ret = log();
321
+ return ret.elwise_mult(log10e_inv);
322
+ }
323
+
324
+ Vectorized<ComplexFlt> log1p() const {
325
+ return map(std::log1p);
326
+ }
327
+
328
+ Vectorized<ComplexFlt> el_swapped() const {
329
+ vfloat32 v0 = vec_perm(_vec0, _vec0, swap_mask);
330
+ vfloat32 v1 = vec_perm(_vec1, _vec1, swap_mask);
331
+ return {v0, v1};
332
+ }
333
+
334
+ Vectorized<ComplexFlt> el_mergee() const {
335
+ // as mergee phased in , we can use vec_perm with mask
336
+ return {vec_mergee(_vecb0, _vecb0), vec_mergee(_vecb1, _vecb1)};
337
+ }
338
+
339
+ Vectorized<ComplexFlt> el_mergeo() const {
340
+ // as mergeo phased in , we can use vec_perm with mask
341
+ return {vec_mergeo(_vecb0, _vecb0), vec_mergeo(_vecb1, _vecb1)};
342
+ }
343
+
344
+ Vectorized<ComplexFlt> el_madd(
345
+ const Vectorized<ComplexFlt>& multiplier,
346
+ const Vectorized<ComplexFlt>& val) const {
347
+ return {
348
+ vec_madd(_vec0, multiplier._vec0, val._vec0),
349
+ vec_madd(_vec1, multiplier._vec1, val._vec1)};
350
+ }
351
+
352
+ static Vectorized<ComplexFlt> el_mergee(
353
+ Vectorized<ComplexFlt>& first,
354
+ Vectorized<ComplexFlt>& second) {
355
+ // as mergee phased in , we can use vec_perm with mask
356
+ return {
357
+ vec_mergee(first._vecb0, second._vecb0),
358
+ vec_mergee(first._vecb1, second._vecb1)};
359
+ }
360
+
361
+ Vectorized<ComplexFlt> angle_() const {
362
+ // angle = atan2(b/a)
363
+ // auto b_a = _mm256_permute_ps(values, 0xB1); // b a
364
+ // return Sleef_atan2f8_u10(values, b_a); // 90-angle angle
365
+ Vectorized<ComplexFlt> ret;
366
+ for (int i = 0; i < 4; i += 2) {
367
+ ret._vec0[i] = std::atan2(_vec0[i + 1], _vec0[i]);
368
+ ret._vec1[i] = std::atan2(_vec1[i + 1], _vec1[i]);
369
+ }
370
+ return ret;
371
+ }
372
+
373
+ Vectorized<ComplexFlt> angle() const {
374
+ return angle_() & real_mask;
375
+ }
376
+
377
+ Vectorized<ComplexFlt> sin() const {
378
+ return map(std::sin);
379
+ }
380
+ Vectorized<ComplexFlt> sinh() const {
381
+ return map(std::sinh);
382
+ }
383
+ Vectorized<ComplexFlt> cos() const {
384
+ return map(std::cos);
385
+ }
386
+ Vectorized<ComplexFlt> cosh() const {
387
+ return map(std::cosh);
388
+ }
389
+ Vectorized<ComplexFlt> ceil() const {
390
+ return {vec_ceil(_vec0), vec_ceil(_vec1)};
391
+ }
392
+ Vectorized<ComplexFlt> floor() const {
393
+ return {vec_floor(_vec0), vec_floor(_vec1)};
394
+ }
395
+ Vectorized<ComplexFlt> neg() const {
396
+ auto z = Vectorized<ComplexFlt>(zero);
397
+ return z - *this;
398
+ }
399
+ Vectorized<ComplexFlt> round() const {
400
+ return {vec_round(_vec0), vec_round(_vec1)};
401
+ }
402
+ Vectorized<ComplexFlt> tan() const {
403
+ return map(std::tan);
404
+ }
405
+ Vectorized<ComplexFlt> tanh() const {
406
+ return map(std::tanh);
407
+ }
408
+ Vectorized<ComplexFlt> trunc() const {
409
+ return {vec_trunc(_vec0), vec_trunc(_vec1)};
410
+ }
411
+
412
+ Vectorized<ComplexFlt> elwise_sqrt() const {
413
+ return {vec_sqrt(_vec0), vec_sqrt(_vec1)};
414
+ }
415
+
416
+ Vectorized<ComplexFlt> sqrt() const {
417
+ return map(std::sqrt);
418
+ }
419
+
420
+ Vectorized<ComplexFlt> reciprocal() const {
421
+ // re + im*i = (a + bi) / (c + di)
422
+ // re = (ac + bd)/abs_2() = c/abs_2()
423
+ // im = (bc - ad)/abs_2() = d/abs_2()
424
+ auto c_d = *this ^ isign_mask; // c -d
425
+ auto abs = abs_2_();
426
+ return c_d.elwise_div(abs);
427
+ }
428
+
429
+ Vectorized<ComplexFlt> rsqrt() const {
430
+ return sqrt().reciprocal();
431
+ }
432
+
433
+ Vectorized<ComplexFlt> pow(const Vectorized<ComplexFlt>& exp) const {
434
+ __at_align__ ComplexFlt x_tmp[size()];
435
+ __at_align__ ComplexFlt y_tmp[size()];
436
+ store(x_tmp);
437
+ exp.store(y_tmp);
438
+ for (const auto i : c10::irange(size())) {
439
+ x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]);
440
+ }
441
+ return loadu(x_tmp);
442
+ }
443
+
444
+ Vectorized<ComplexFlt> atan() const {
445
+ // atan(x) = i/2 * ln((i + z)/(i - z))
446
+ auto ione = Vectorized(imag_one);
447
+ auto sum = ione + *this;
448
+ auto sub = ione - *this;
449
+ auto ln = (sum / sub).log(); // ln((i + z)/(i - z))
450
+ return ln * imag_half; // i/2*ln()
451
+ }
452
+ Vectorized<ComplexFlt> atanh() const {
453
+ return map(std::atanh);
454
+ }
455
+
456
+ Vectorized<ComplexFlt> acos() const {
457
+ // acos(x) = pi/2 - asin(x)
458
+ return Vectorized(pi_2) - asin();
459
+ }
460
+
461
+ Vectorized<ComplexFlt> inline operator*(const Vectorized<ComplexFlt>& b) const {
462
+ //(a + bi) * (c + di) = (ac - bd) + (ad + bc)i
463
+
464
+ #if 1
465
+ // this is more vsx friendly than simulating horizontal from x86
466
+
467
+ auto vi = b.el_mergeo();
468
+ auto vr = b.el_mergee();
469
+ vi = vi ^ rsign_mask;
470
+ auto ret = elwise_mult(vr);
471
+ auto vx_swapped = el_swapped();
472
+ ret = vx_swapped.el_madd(vi, ret);
473
+ return ret;
474
+
475
+ #else
476
+
477
+ auto ac_bd = elwise_mult(b);
478
+ auto d_c = b.el_swapped();
479
+ d_c = d_c ^ isign_mask;
480
+ auto ad_bc = elwise_mult(d_c);
481
+ auto ret = horizontal_sub_permD8(ac_bd, ad_bc);
482
+ return ret;
483
+ #endif
484
+ }
485
+
486
+ Vectorized<ComplexFlt> inline operator/(const Vectorized<ComplexFlt>& b) const {
487
+ // re + im*i = (a + bi) / (c + di)
488
+ // re = (ac + bd)/abs_2()
489
+ // im = (bc - ad)/abs_2()
490
+ #if 1
491
+ auto vi = b.el_mergeo();
492
+ auto vr = b.el_mergee();
493
+ auto abs_b = b.abs_2_();
494
+ vi = vi ^ isign_mask;
495
+ auto ret = elwise_mult(vr);
496
+ auto vx_swapped = el_swapped();
497
+ ret = vx_swapped.el_madd(vi, ret);
498
+ ret = ret.elwise_div(abs_b);
499
+ #else
500
+ // Vectorized x86 simulation
501
+ auto ac_bd = elwise_mult(b);
502
+ auto d_c = b.el_swapped();
503
+ d_c = d_c ^ rsign_mask;
504
+ auto ad_bc = elwise_mult(d_c);
505
+ auto abs_b = b.abs_2_();
506
+ auto re_im = horizontal_add_permD8(ac_bd, ad_bc);
507
+ auto ret = re_im.elwise_div(abs_b);
508
+ #endif
509
+ return ret;
510
+ }
511
+
512
+ Vectorized<ComplexFlt> asin() const {
513
+ // asin(x)
514
+ // = -i*ln(iz + sqrt(1 -z^2))
515
+ // = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi)))
516
+ // = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi))
517
+
518
+ #if 1
519
+ auto conj = conj_();
520
+ auto b_a = conj.el_swapped();
521
+ auto ab = conj.elwise_mult(b_a);
522
+ auto im = ab + ab;
523
+ auto val_2 = (*this).elwise_mult(*this);
524
+ auto val_2_swapped = val_2.el_swapped();
525
+ auto re = horizontal_sub_permD8(val_2, val_2_swapped);
526
+ re = Vectorized<ComplexFlt>(one) - re;
527
+ auto root = el_blend<0xAA>(re, im).sqrt();
528
+ auto ln = (b_a + root).log();
529
+ return ln.el_swapped().conj();
530
+ #else
531
+ return map(std::asin);
532
+ #endif
533
+ }
534
+
535
+ Vectorized<ComplexFlt> exp() const {
536
+ return map(std::exp);
537
+ }
538
+ Vectorized<ComplexFlt> exp2() const {
539
+ return map(exp2_impl);
540
+ }
541
+ Vectorized<ComplexFlt> expm1() const {
542
+ return map(std::expm1);
543
+ }
544
+
545
+ Vectorized<ComplexFlt> eq(const Vectorized<ComplexFlt>& other) const {
546
+ auto eq = (*this == other); // compares real and imag individually
547
+ // If both real numbers and imag numbers are equal, then the complex numbers are equal
548
+ return (eq.real() & eq.imag()) & one;
549
+ }
550
+ Vectorized<ComplexFlt> ne(const Vectorized<ComplexFlt>& other) const {
551
+ auto ne = (*this != other); // compares real and imag individually
552
+ // If either real numbers or imag numbers are not equal, then the complex numbers are not equal
553
+ return (ne.real() | ne.imag()) & one;
554
+ }
555
+
556
+ Vectorized<ComplexFlt> sgn() const {
557
+ return map(at::native::sgn_impl);
558
+ }
559
+
560
+ Vectorized<ComplexFlt> operator<(const Vectorized<ComplexFlt>& other) const {
561
+ TORCH_CHECK(false, "not supported for complex numbers");
562
+ }
563
+
564
+ Vectorized<ComplexFlt> operator<=(const Vectorized<ComplexFlt>& other) const {
565
+ TORCH_CHECK(false, "not supported for complex numbers");
566
+ }
567
+
568
+ Vectorized<ComplexFlt> operator>(const Vectorized<ComplexFlt>& other) const {
569
+ TORCH_CHECK(false, "not supported for complex numbers");
570
+ }
571
+
572
+ Vectorized<ComplexFlt> operator>=(const Vectorized<ComplexFlt>& other) const {
573
+ TORCH_CHECK(false, "not supported for complex numbers");
574
+ }
575
+
576
+ DEFINE_MEMBER_OP(operator==, ComplexFlt, vec_cmpeq)
577
+ DEFINE_MEMBER_OP(operator!=, ComplexFlt, vec_cmpne)
578
+
579
+ DEFINE_MEMBER_OP(operator+, ComplexFlt, vec_add)
580
+ DEFINE_MEMBER_OP(operator-, ComplexFlt, vec_sub)
581
+ DEFINE_MEMBER_OP(operator&, ComplexFlt, vec_and)
582
+ DEFINE_MEMBER_OP(operator|, ComplexFlt, vec_or)
583
+ DEFINE_MEMBER_OP(operator^, ComplexFlt, vec_xor)
584
+ // elementwise helpers
585
+ DEFINE_MEMBER_OP(elwise_mult, ComplexFlt, vec_mul)
586
+ DEFINE_MEMBER_OP(elwise_div, ComplexFlt, vec_div)
587
+ DEFINE_MEMBER_OP(elwise_gt, ComplexFlt, vec_cmpgt)
588
+ DEFINE_MEMBER_OP(elwise_ge, ComplexFlt, vec_cmpge)
589
+ DEFINE_MEMBER_OP(elwise_lt, ComplexFlt, vec_cmplt)
590
+ DEFINE_MEMBER_OP(elwise_le, ComplexFlt, vec_cmple)
591
+ };
592
+
593
+ template <>
594
+ Vectorized<ComplexFlt> inline maximum(
595
+ const Vectorized<ComplexFlt>& a,
596
+ const Vectorized<ComplexFlt>& b) {
597
+ auto abs_a = a.abs_2_();
598
+ auto abs_b = b.abs_2_();
599
+ // auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_LT_OQ);
600
+ // auto max = _mm256_blendv_ps(a, b, mask);
601
+ auto mask = abs_a.elwise_lt(abs_b);
602
+ auto max = Vectorized<ComplexFlt>::elwise_blendv(a, b, mask);
603
+
604
+ return max;
605
+ // Exploit the fact that all-ones is a NaN.
606
+ // auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q);
607
+ // return _mm256_or_ps(max, isnan);
608
+ }
609
+
610
+ template <>
611
+ Vectorized<ComplexFlt> inline minimum(
612
+ const Vectorized<ComplexFlt>& a,
613
+ const Vectorized<ComplexFlt>& b) {
614
+ auto abs_a = a.abs_2_();
615
+ auto abs_b = b.abs_2_();
616
+ // auto mask = _mm256_cmp_ps(abs_a, abs_b, _CMP_GT_OQ);
617
+ // auto min = _mm256_blendv_ps(a, b, mask);
618
+ auto mask = abs_a.elwise_gt(abs_b);
619
+ auto min = Vectorized<ComplexFlt>::elwise_blendv(a, b, mask);
620
+ return min;
621
+ // Exploit the fact that all-ones is a NaN.
622
+ // auto isnan = _mm256_cmp_ps(abs_a, abs_b, _CMP_UNORD_Q);
623
+ // return _mm256_or_ps(min, isnan);
624
+ }
625
+
626
+ } // namespace
627
+ } // namespace vec
628
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_double_vsx.h ADDED
@@ -0,0 +1,422 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cpu/vec/intrinsics.h>
4
+ #include <ATen/cpu/vec/vec_base.h>
5
+ #include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
6
+ #include <c10/util/irange.h>
7
+
8
+ #include <sleef.h>
9
+
10
+ namespace at {
11
+ namespace vec {
12
+
13
+ inline namespace CPU_CAPABILITY {
14
+
15
+
16
+ template <>
17
+ class Vectorized<double> {
18
+ private:
19
+ union {
20
+ struct {
21
+ vfloat64 _vec0;
22
+ vfloat64 _vec1;
23
+ };
24
+ struct {
25
+ vbool64 _vecb0;
26
+ vbool64 _vecb1;
27
+ };
28
+
29
+ } __attribute__((__may_alias__));
30
+
31
+ public:
32
+ using value_type = double;
33
+ using vec_internal_type = vfloat64;
34
+ using vec_internal_mask_type = vbool64;
35
+ using size_type = int;
36
+ static constexpr size_type size() {
37
+ return 4;
38
+ }
39
+ Vectorized() {}
40
+ C10_ALWAYS_INLINE Vectorized(vfloat64 v) : _vec0{v}, _vec1{v} {}
41
+ C10_ALWAYS_INLINE Vectorized(vbool64 vmask) : _vecb0{vmask}, _vecb1{vmask} {}
42
+ C10_ALWAYS_INLINE Vectorized(vfloat64 v1, vfloat64 v2) : _vec0{v1}, _vec1{v2} {}
43
+ C10_ALWAYS_INLINE Vectorized(vbool64 v1, vbool64 v2) : _vecb0{v1}, _vecb1{v2} {}
44
+ C10_ALWAYS_INLINE Vectorized(double scalar)
45
+ : _vec0{vec_splats(scalar)}, _vec1{vec_splats(scalar)} {}
46
+ C10_ALWAYS_INLINE Vectorized(
47
+ double scalar1,
48
+ double scalar2,
49
+ double scalar3,
50
+ double scalar4)
51
+ : _vec0{vfloat64{scalar1, scalar2}}, _vec1{vfloat64{scalar3, scalar4}} {}
52
+ C10_ALWAYS_INLINE const vec_internal_type& vec0() const {
53
+ return _vec0;
54
+ }
55
+ C10_ALWAYS_INLINE const vec_internal_type& vec1() const {
56
+ return _vec1;
57
+ }
58
+
59
+ int zero_mask() const {
60
+ auto cmp = (*this == vd_zero);
61
+ return (cmp._vecb0[0] & 1) | (cmp._vecb0[1] & 2) | (cmp._vecb1[0] & 4) |
62
+ (cmp._vecb1[1] & 8);
63
+ }
64
+
65
+ template <int64_t mask>
66
+ static std::enable_if_t<blendChoiceDbl(mask) == 0, Vectorized<double>> C10_ALWAYS_INLINE
67
+ blend(const Vectorized<double>& a, const Vectorized<double>& b) {
68
+ return a;
69
+ }
70
+
71
+ template <int64_t mask>
72
+ static std::enable_if_t<blendChoiceDbl(mask) == 1, Vectorized<double>> C10_ALWAYS_INLINE
73
+ blend(const Vectorized<double>& a, const Vectorized<double>& b) {
74
+ return b;
75
+ }
76
+
77
+ template <int64_t mask>
78
+ static std::enable_if_t<blendChoiceDbl(mask) == 2, Vectorized<double>> C10_ALWAYS_INLINE
79
+ blend(const Vectorized<double>& a, const Vectorized<double>& b) {
80
+ return { b._vec0, a._vec1 };
81
+ }
82
+
83
+ template <int64_t mask>
84
+ static std::enable_if_t<blendChoiceDbl(mask) == 3, Vectorized<double>> C10_ALWAYS_INLINE
85
+ blend(const Vectorized<double>& a, const Vectorized<double>& b) {
86
+ return { a._vec0, b._vec1 };
87
+ }
88
+
89
+
90
+ template <int64_t mask>
91
+ static std::enable_if_t<blendChoiceDbl(mask) == 4, Vectorized<double>> C10_ALWAYS_INLINE
92
+ blend(const Vectorized<double>& a, const Vectorized<double>& b) {
93
+ const vbool64 mask_1st = VsxDblMask1(mask);
94
+ return { (vfloat64)vec_sel(a._vec0, b._vec0, mask_1st), a._vec1 };
95
+ }
96
+
97
+ template <int64_t mask>
98
+ static std::enable_if_t<blendChoiceDbl(mask) == 5, Vectorized<double>> C10_ALWAYS_INLINE
99
+ blend(const Vectorized<double>& a, const Vectorized<double>& b) {
100
+ const vbool64 mask_1st = VsxDblMask1(mask);
101
+ return { (vfloat64)vec_sel(a._vec0, b._vec0, mask_1st), b._vec1 };
102
+ }
103
+
104
+
105
+ template <int64_t mask>
106
+ static std::enable_if_t<blendChoiceDbl(mask) == 6,
107
+ Vectorized<double>>
108
+ C10_ALWAYS_INLINE blend(const Vectorized<double>& a, const Vectorized<double>& b) {
109
+ const vbool64 mask_2nd = VsxDblMask2(mask);
110
+ // generated masks
111
+ return { a._vec0,
112
+ (vfloat64)vec_sel(a._vec1, b._vec1, mask_2nd) };
113
+ }
114
+
115
+ template <int64_t mask>
116
+ static std::enable_if_t<blendChoiceDbl(mask) == 7,
117
+ Vectorized<double>>
118
+ C10_ALWAYS_INLINE blend(const Vectorized<double>& a, const Vectorized<double>& b) {
119
+ const vbool64 mask_2nd = VsxDblMask2(mask);
120
+ // generated masks
121
+ return { b._vec0,
122
+ (vfloat64)vec_sel(a._vec1, b._vec1, mask_2nd) };
123
+ }
124
+
125
+ template <int64_t mask>
126
+ static std::enable_if_t<blendChoiceDbl(mask) == 8, Vectorized<double>>
127
+ C10_ALWAYS_INLINE blend(const Vectorized<double>& a, const Vectorized<double>& b) {
128
+ const vbool64 mask_1st = VsxDblMask1(mask);
129
+ const vbool64 mask_2nd = VsxDblMask2(mask);
130
+ return {
131
+ (vfloat64)vec_sel(a._vec0, b._vec0, mask_1st),
132
+ (vfloat64)vec_sel(a._vec1, b._vec1, mask_2nd) };
133
+ }
134
+
135
+
136
+ static Vectorized<double> C10_ALWAYS_INLINE blendv(
137
+ const Vectorized<double>& a,
138
+ const Vectorized<double>& b,
139
+ const Vectorized<double>& mask) {
140
+ // the mask used here returned by comparision of vec256
141
+
142
+ return {
143
+ vec_sel(a._vec0, b._vec0, mask._vecb0),
144
+ vec_sel(a._vec1, b._vec1, mask._vecb1)};
145
+ }
146
+ template <typename step_t>
147
+ static Vectorized<double> arange(double base = 0., step_t step = static_cast<step_t>(1)) {
148
+ return Vectorized<double>(base, base + step, base + 2 * step, base + 3 * step);
149
+ }
150
+
151
+ static Vectorized<double> C10_ALWAYS_INLINE
152
+ set(const Vectorized<double>& a, const Vectorized<double>& b, size_t count = size()) {
153
+ switch (count) {
154
+ case 0:
155
+ return a;
156
+ case 1:
157
+ return blend<1>(a, b);
158
+ case 2:
159
+ return blend<3>(a, b);
160
+ case 3:
161
+ return blend<7>(a, b);
162
+ }
163
+
164
+ return b;
165
+ }
166
+ static Vectorized<value_type> C10_ALWAYS_INLINE
167
+ loadu(const void* ptr, int count = size()) {
168
+ if (count == size()) {
169
+ return {
170
+ vec_vsx_ld(offset0, reinterpret_cast<const value_type*>(ptr)),
171
+ vec_vsx_ld(offset16, reinterpret_cast<const value_type*>(ptr))};
172
+ }
173
+
174
+ __at_align__ value_type tmp_values[size()] = {};
175
+ std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type));
176
+
177
+ return {vec_vsx_ld(offset0, tmp_values), vec_vsx_ld(offset16, tmp_values)};
178
+ }
179
+ void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const {
180
+ if (count == size()) {
181
+ vec_vsx_st(_vec0, offset0, reinterpret_cast<value_type*>(ptr));
182
+ vec_vsx_st(_vec1, offset16, reinterpret_cast<value_type*>(ptr));
183
+ } else if (count > 0) {
184
+ __at_align__ value_type tmp_values[size()];
185
+ vec_vsx_st(_vec0, offset0, tmp_values);
186
+ vec_vsx_st(_vec1, offset16, tmp_values);
187
+ std::memcpy(
188
+ ptr, tmp_values, std::min(count, size()) * sizeof(value_type));
189
+ }
190
+ }
191
+ const double& operator[](int idx) const = delete;
192
+ double& operator[](int idx) = delete;
193
+ Vectorized<double> map(double (*const f)(double)) const {
194
+ Vectorized<double> ret;
195
+ for (const auto i : c10::irange(size()/2)) {
196
+ ret._vec0[i] = f(_vec0[i]);
197
+ }
198
+ for (const auto i : c10::irange(size()/2)) {
199
+ ret._vec1[i] = f(_vec1[i]);
200
+ }
201
+ return ret;
202
+ }
203
+
204
+ Vectorized<double> mapbi(double (*const f)(double, double), const Vectorized<double>& other)
205
+ const {
206
+ Vectorized<double> ret;
207
+ for (const auto i : c10::irange(size()/2)) {
208
+ ret._vec0[i] = f(_vec0[i], other._vec0[i]);
209
+ }
210
+ for (const auto i : c10::irange(size()/2)) {
211
+ ret._vec1[i] = f(_vec1[i], other._vec1[i]);
212
+ }
213
+ return ret;
214
+ }
215
+ Vectorized<double> C10_ALWAYS_INLINE abs() const {
216
+ return {vec_abs(_vec0), vec_abs(_vec1)};
217
+ }
218
+
219
+ Vectorized<double> C10_ALWAYS_INLINE acos() const {
220
+ return {Sleef_acosd2_u10(_vec0), Sleef_acosd2_u10(_vec1)};
221
+ }
222
+ Vectorized<double> C10_ALWAYS_INLINE asin() const {
223
+ return {Sleef_asind2_u10(_vec0), Sleef_asind2_u10(_vec1)};
224
+ }
225
+ Vectorized<double> atan() const {
226
+ return {Sleef_atand2_u10(_vec0), Sleef_atand2_u10(_vec1)};
227
+ }
228
+ Vectorized<double> atanh() const {
229
+ return {Sleef_atanhd2_u10(_vec0), Sleef_atanhd2_u10(_vec1)};
230
+ }
231
+ Vectorized<double> atan2(const Vectorized<double>& b) const {
232
+ return {Sleef_atan2d2_u10(_vec0, b._vec0), Sleef_atan2d2_u10(_vec1, b._vec1)};
233
+ }
234
+ Vectorized<double> copysign(const Vectorized<double> &sign) const {
235
+ return {Sleef_copysignd2(_vec0, sign._vec0), Sleef_copysignd2(_vec1, sign._vec1)};
236
+ }
237
+ Vectorized<double> erf() const {
238
+ return {Sleef_erfd2_u10(_vec0), Sleef_erfd2_u10(_vec1)};
239
+ }
240
+ Vectorized<double> erfc() const {
241
+ return {Sleef_erfcd2_u15(_vec0), Sleef_erfcd2_u15(_vec1)};
242
+ }
243
+ Vectorized<double> C10_ALWAYS_INLINE exp() const {
244
+ return {Sleef_expd2_u10(_vec0), Sleef_expd2_u10(_vec1)};
245
+ }
246
+ Vectorized<double> C10_ALWAYS_INLINE exp2() const {
247
+ return {Sleef_exp2d2_u10(_vec0), Sleef_exp2d2_u10(_vec1)};
248
+ }
249
+ Vectorized<double> expm1() const {
250
+ return {Sleef_expm1d2_u10(_vec0), Sleef_expm1d2_u10(_vec1)};
251
+ }
252
+
253
+ Vectorized<double> lgamma() const __ubsan_ignore_undefined__ {
254
+ return {Sleef_lgammad2_u10(_vec0), Sleef_lgammad2_u10(_vec1)};
255
+ }
256
+
257
+ Vectorized<double> erfinv() const {
258
+ return map(calc_erfinv);
259
+ }
260
+
261
+ Vectorized<double> angle() const {
262
+ auto tmp = blendv(
263
+ Vectorized<double>(0), Vectorized<double>(c10::pi<double>), *this < Vectorized<double>(0));
264
+ return blendv(tmp, *this, isnan());
265
+ }
266
+ Vectorized<double> real() const {
267
+ return *this;
268
+ }
269
+ Vectorized<double> imag() const {
270
+ return Vectorized<double>{0};
271
+ }
272
+ Vectorized<double> conj() const {
273
+ return *this;
274
+ }
275
+
276
+ Vectorized<double> C10_ALWAYS_INLINE log() const {
277
+ return {Sleef_logd2_u10(_vec0), Sleef_logd2_u10(_vec1)};
278
+ }
279
+ Vectorized<double> C10_ALWAYS_INLINE log10() const {
280
+ return {Sleef_log10d2_u10(_vec0), Sleef_log10d2_u10(_vec1)};
281
+ }
282
+ Vectorized<double> C10_ALWAYS_INLINE log1p() const {
283
+ return {Sleef_log1pd2_u10(_vec0), Sleef_log1pd2_u10(_vec1)};
284
+ }
285
+ Vectorized<double> C10_ALWAYS_INLINE log2() const {
286
+ return {Sleef_log2d2_u10(_vec0), Sleef_log2d2_u10(_vec1)};
287
+ }
288
+ Vectorized<double> C10_ALWAYS_INLINE ceil() const {
289
+ return {vec_ceil(_vec0), vec_ceil(_vec1)};
290
+ }
291
+ Vectorized<double> C10_ALWAYS_INLINE cos() const {
292
+ return {Sleef_cosd2_u10(_vec0), Sleef_cosd2_u10(_vec1)};
293
+ }
294
+ Vectorized<double> C10_ALWAYS_INLINE cosh() const {
295
+ return {Sleef_coshd2_u10(_vec0), Sleef_coshd2_u10(_vec1)};
296
+ }
297
+ Vectorized<double> C10_ALWAYS_INLINE floor() const {
298
+ return {vec_floor(_vec0), vec_floor(_vec1)};
299
+ }
300
+ Vectorized<double> C10_ALWAYS_INLINE neg() const {
301
+ return {vec_neg(_vec0), vec_neg(_vec1)};
302
+ }
303
+ Vectorized<double> C10_ALWAYS_INLINE round() const {
304
+ return {vec_rint(_vec0), vec_rint(_vec1)};
305
+ }
306
+ Vectorized<double> C10_ALWAYS_INLINE sin() const {
307
+ return {Sleef_sind2_u10(_vec0), Sleef_sind2_u10(_vec1)};
308
+ }
309
+ Vectorized<double> C10_ALWAYS_INLINE sinh() const {
310
+ return {Sleef_sinhd2_u10(_vec0), Sleef_sinhd2_u10(_vec1)};
311
+ }
312
+ Vectorized<double> C10_ALWAYS_INLINE tan() const {
313
+ return {Sleef_tand2_u10(_vec0), Sleef_tand2_u10(_vec1)};
314
+ }
315
+ Vectorized<double> C10_ALWAYS_INLINE tanh() const {
316
+ return {Sleef_tanhd2_u10(_vec0), Sleef_tanhd2_u10(_vec1)};
317
+ }
318
+ Vectorized<double> C10_ALWAYS_INLINE trunc() const {
319
+ return {vec_trunc(_vec0), vec_trunc(_vec1)};
320
+ }
321
+
322
+ Vectorized<double> C10_ALWAYS_INLINE frac() const {
323
+ return *this - trunc();
324
+ }
325
+
326
+ Vectorized<double> C10_ALWAYS_INLINE sqrt() const {
327
+ return {vec_sqrt(_vec0), vec_sqrt(_vec1)};
328
+ }
329
+ Vectorized<double> C10_ALWAYS_INLINE reciprocal() const {
330
+ return {
331
+ vec_div(vd_one, _vec0), // vec_re(_vec0) is estimated one.
332
+ vec_div(vd_one, _vec1)};
333
+ }
334
+ Vectorized<double> C10_ALWAYS_INLINE rsqrt() const {
335
+ return sqrt().reciprocal();
336
+ }
337
+
338
+ Vectorized<double> C10_ALWAYS_INLINE pow(const Vectorized<double>& b) const {
339
+ return {Sleef_powd2_u10(_vec0, b._vec0), Sleef_powd2_u10(_vec1, b._vec1)};
340
+ }
341
+ Vectorized<double> C10_ALWAYS_INLINE fmod(const Vectorized<double>& b) const {
342
+ return {Sleef_fmodd2(_vec0, b._vec0),Sleef_fmodd2(_vec1, b._vec1)};
343
+ }
344
+
345
+ Vectorized<double> hypot(const Vectorized<double>& b) const {
346
+ return {Sleef_hypotd2_u05(_vec0, b._vec0), Sleef_hypotd2_u05(_vec1, b._vec1)};
347
+ }
348
+
349
+ Vectorized<double> nextafter(const Vectorized<double>& b) const {
350
+ return {Sleef_nextafterd2(_vec0, b._vec0), Sleef_nextafterd2(_vec1, b._vec1)};
351
+ }
352
+
353
+ Vectorized<double> igamma(const Vectorized<double>& x) const {
354
+ return mapbi(calc_igamma, x);
355
+ }
356
+
357
+ Vectorized<double> igammac(const Vectorized<double>& x) const {
358
+ return mapbi(calc_igammac, x);
359
+ }
360
+
361
+
362
+ Vectorized<double> i0() const {
363
+ return map(calc_i0);
364
+ }
365
+
366
+ Vectorized<double> i0e() const {
367
+ return map(calc_i0e);
368
+ }
369
+
370
+ Vectorized<double> digamma() const {
371
+ return map(calc_digamma);
372
+ }
373
+
374
+ Vectorized<double> _nor() const {
375
+ return {vec_nor(_vec0, _vec0), vec_nor(_vec1, _vec1)};
376
+ }
377
+
378
+ Vectorized<double> isnan() const {
379
+ auto x = *this;
380
+ auto ret = (x == x);
381
+ return ret._nor();
382
+ }
383
+
384
+ DEFINE_MEMBER_OP(operator==, double, vec_cmpeq)
385
+ DEFINE_MEMBER_OP(operator!=, double, vec_cmpne)
386
+ DEFINE_MEMBER_OP(operator<, double, vec_cmplt)
387
+ DEFINE_MEMBER_OP(operator<=, double, vec_cmple)
388
+ DEFINE_MEMBER_OP(operator>, double, vec_cmpgt)
389
+ DEFINE_MEMBER_OP(operator>=, double, vec_cmpge)
390
+ DEFINE_MEMBER_OP_AND_ONE(eq, double, vec_cmpeq)
391
+ DEFINE_MEMBER_OP_AND_ONE(ne, double, vec_cmpne)
392
+ DEFINE_MEMBER_OP_AND_ONE(lt, double, vec_cmplt)
393
+ DEFINE_MEMBER_OP_AND_ONE(le, double, vec_cmple)
394
+ DEFINE_MEMBER_OP_AND_ONE(gt, double, vec_cmpgt)
395
+ DEFINE_MEMBER_OP_AND_ONE(ge, double, vec_cmpge)
396
+ DEFINE_MEMBER_OP(operator+, double, vec_add)
397
+ DEFINE_MEMBER_OP(operator-, double, vec_sub)
398
+ DEFINE_MEMBER_OP(operator*, double, vec_mul)
399
+ DEFINE_MEMBER_OP(operator/, double, vec_div)
400
+ DEFINE_MEMBER_OP(maximum, double, vec_max_nan2)
401
+ DEFINE_MEMBER_OP(minimum, double, vec_min_nan2)
402
+ DEFINE_MEMBER_OP(operator&, double, vec_and)
403
+ DEFINE_MEMBER_OP(operator|, double, vec_or)
404
+ DEFINE_MEMBER_OP(operator^, double, vec_xor)
405
+ DEFINE_MEMBER_TERNARY_OP(madd, double, vec_madd)
406
+ };
407
+ template <>
408
+ Vectorized<double> inline maximum(
409
+ const Vectorized<double>& a,
410
+ const Vectorized<double>& b) {
411
+ return a.maximum(b);
412
+ }
413
+
414
+ template <>
415
+ Vectorized<double> inline minimum(
416
+ const Vectorized<double>& a,
417
+ const Vectorized<double>& b) {
418
+ return a.minimum(b);
419
+ }
420
+ } // namespace
421
+ } // namespace vec
422
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/vsx/vec256_int64_vsx.h ADDED
@@ -0,0 +1,251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/cpu/vec/intrinsics.h>
4
+ #include <ATen/cpu/vec/vec_base.h>
5
+ #include <ATen/cpu/vec/vec256/vsx/vsx_helpers.h>
6
+ namespace at {
7
+ namespace vec {
8
+ // See Note [CPU_CAPABILITY namespace]
9
+ inline namespace CPU_CAPABILITY {
10
+
11
+ template <>
12
+ class Vectorized<int64_t> {
13
+ private:
14
+ union {
15
+ struct {
16
+ vint64 _vec0;
17
+ vint64 _vec1;
18
+ };
19
+ struct {
20
+ vbool64 _vecb0;
21
+ vbool64 _vecb1;
22
+ };
23
+
24
+ } __attribute__((__may_alias__));
25
+
26
+ public:
27
+ using value_type = int64_t;
28
+ using vec_internal_type = vint64;
29
+ using vec_internal_mask_type = vbool64;
30
+ using size_type = int;
31
+ using ElementType = signed long long;
32
+ static constexpr size_type size() {
33
+ return 4;
34
+ }
35
+ Vectorized() {}
36
+ C10_ALWAYS_INLINE Vectorized(vint64 v) : _vec0{v}, _vec1{v} {}
37
+ C10_ALWAYS_INLINE Vectorized(vbool64 vmask) : _vecb0{vmask}, _vecb1{vmask} {}
38
+ C10_ALWAYS_INLINE Vectorized(vint64 v1, vint64 v2) : _vec0{v1}, _vec1{v2} {}
39
+ C10_ALWAYS_INLINE Vectorized(vbool64 v1, vbool64 v2) : _vecb0{v1}, _vecb1{v2} {}
40
+ C10_ALWAYS_INLINE Vectorized(int64_t scalar)
41
+ : _vec0{vec_splats(scalar)}, _vec1{vec_splats(scalar)} {}
42
+ C10_ALWAYS_INLINE Vectorized(
43
+ int64_t scalar1,
44
+ int64_t scalar2,
45
+ int64_t scalar3,
46
+ int64_t scalar4)
47
+ : _vec0{vint64{scalar1, scalar2}}, _vec1{vint64{scalar3, scalar4}} {}
48
+
49
+ C10_ALWAYS_INLINE const vec_internal_type& vec0() const {
50
+ return _vec0;
51
+ }
52
+ C10_ALWAYS_INLINE const vec_internal_type& vec1() const {
53
+ return _vec1;
54
+ }
55
+
56
+ template <uint64_t mask>
57
+ static std::enable_if_t<mask == 0, Vectorized<int64_t>> C10_ALWAYS_INLINE
58
+ blend(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
59
+ return a;
60
+ }
61
+
62
+ template <uint64_t mask>
63
+ static std::enable_if_t<mask == 3, Vectorized<int64_t>> C10_ALWAYS_INLINE
64
+ blend(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
65
+ return {b._vec0, a._vec1};
66
+ }
67
+
68
+ template <uint64_t mask>
69
+ static std::enable_if_t<(mask & 15) == 15, Vectorized<int64_t>> C10_ALWAYS_INLINE
70
+ blend(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
71
+ return b;
72
+ }
73
+
74
+ template <uint64_t mask>
75
+ static std::enable_if_t<(mask > 0 && mask < 3), Vectorized<int64_t>> C10_ALWAYS_INLINE
76
+ blend(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
77
+ constexpr uint64_t g0 = (mask & 1) * 0xffffffffffffffff;
78
+ constexpr uint64_t g1 = ((mask & 2) >> 1) * 0xffffffffffffffff;
79
+ const vbool64 mask_1st = (vbool64){g0, g1};
80
+ return {(vint64)vec_sel(a._vec0, b._vec0, (vbool64)mask_1st), a._vec1};
81
+ }
82
+
83
+ template <uint64_t mask>
84
+ static std::enable_if_t<(mask > 3) && (mask & 3) == 0, Vectorized<int64_t>>
85
+ C10_ALWAYS_INLINE blend(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
86
+ constexpr uint64_t g0_2 = ((mask & 4) >> 2) * 0xffffffffffffffff;
87
+ constexpr uint64_t g1_2 = ((mask & 8) >> 3) * 0xffffffffffffffff;
88
+
89
+ const vbool64 mask_2nd = (vbool64){g0_2, g1_2};
90
+ return {a._vec0, (vint64)vec_sel(a._vec1, b._vec1, (vbool64)mask_2nd)};
91
+ }
92
+
93
+ template <uint64_t mask>
94
+ static std::enable_if_t<
95
+ (mask > 3) && (mask & 3) != 0 && (mask & 15) != 15,
96
+ Vectorized<int64_t>>
97
+ C10_ALWAYS_INLINE blend(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
98
+ constexpr uint64_t g0 = (mask & 1) * 0xffffffffffffffff;
99
+ constexpr uint64_t g1 = ((mask & 2) >> 1) * 0xffffffffffffffff;
100
+ constexpr uint64_t g0_2 = ((mask & 4) >> 2) * 0xffffffffffffffff;
101
+ constexpr uint64_t g1_2 = ((mask & 8) >> 3) * 0xffffffffffffffff;
102
+
103
+ const vbool64 mask_1st = (vbool64){g0, g1};
104
+ const vbool64 mask_2nd = (vbool64){g0_2, g1_2};
105
+ return {
106
+ (vint64)vec_sel(a._vec0, b._vec0, (vbool64)mask_1st),
107
+ (vint64)vec_sel(a._vec1, b._vec1, (vbool64)mask_2nd)};
108
+ }
109
+
110
+ static Vectorized<int64_t> C10_ALWAYS_INLINE blendv(
111
+ const Vectorized<int64_t>& a,
112
+ const Vectorized<int64_t>& b,
113
+ const Vectorized<int64_t>& mask) {
114
+ // the mask used here returned by comparision of vec256
115
+
116
+ return {
117
+ vec_sel(a._vec0, b._vec0, mask._vecb0),
118
+ vec_sel(a._vec1, b._vec1, mask._vecb1)};
119
+ }
120
+ template <typename step_t>
121
+ static Vectorized<int64_t> arange(int64_t base = 0., step_t step = static_cast<step_t>(1)) {
122
+ return Vectorized<int64_t>(base, base + step, base + 2 * step, base + 3 * step);
123
+ }
124
+
125
+ static Vectorized<int64_t> C10_ALWAYS_INLINE
126
+ set(const Vectorized<int64_t>& a,
127
+ const Vectorized<int64_t>& b,
128
+ size_t count = size()) {
129
+ switch (count) {
130
+ case 0:
131
+ return a;
132
+ case 1:
133
+ return blend<1>(a, b);
134
+ case 2:
135
+ return blend<3>(a, b);
136
+ case 3:
137
+ return blend<7>(a, b);
138
+ }
139
+
140
+ return b;
141
+ }
142
+ static Vectorized<value_type> C10_ALWAYS_INLINE
143
+ loadu(const void* ptr, int count = size()) {
144
+ if (count == size()) {
145
+ static_assert(sizeof(double) == sizeof(value_type));
146
+ const double* dptr = reinterpret_cast<const double*>(ptr);
147
+ return {// treat it as double load
148
+ (vint64)vec_vsx_ld(offset0, dptr),
149
+ (vint64)vec_vsx_ld(offset16, dptr)};
150
+ }
151
+
152
+ __at_align__ double tmp_values[size()] = {};
153
+ std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(value_type));
154
+
155
+ return {
156
+ (vint64)vec_vsx_ld(offset0, tmp_values),
157
+ (vint64)vec_vsx_ld(offset16, tmp_values)};
158
+ }
159
+ void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const {
160
+ if (count == size()) {
161
+ double* dptr = reinterpret_cast<double*>(ptr);
162
+ vec_vsx_st((vfloat64)_vec0, offset0, dptr);
163
+ vec_vsx_st((vfloat64)_vec1, offset16, dptr);
164
+ } else if (count > 0) {
165
+ __at_align__ double tmp_values[size()];
166
+ vec_vsx_st((vfloat64)_vec0, offset0, tmp_values);
167
+ vec_vsx_st((vfloat64)_vec1, offset16, tmp_values);
168
+ std::memcpy(
169
+ ptr, tmp_values, std::min(count, size()) * sizeof(value_type));
170
+ }
171
+ }
172
+ const int64_t& operator[](int idx) const = delete;
173
+ int64_t& operator[](int idx) = delete;
174
+
175
+ Vectorized<int64_t> angle() const {
176
+ return blendv(
177
+ Vectorized<int64_t>(0), Vectorized<int64_t>(c10::pi<int64_t>), *this < Vectorized<int64_t>(0));
178
+ }
179
+ Vectorized<int64_t> real() const {
180
+ return *this;
181
+ }
182
+ Vectorized<int64_t> imag() const {
183
+ return Vectorized<int64_t>{0};
184
+ }
185
+ Vectorized<int64_t> conj() const {
186
+ return *this;
187
+ }
188
+
189
+ Vectorized<int64_t> C10_ALWAYS_INLINE abs() const {
190
+ return {vec_abs(_vec0), vec_abs(_vec1)};
191
+ }
192
+
193
+ Vectorized<int64_t> C10_ALWAYS_INLINE neg() const {
194
+ return {vec_neg(_vec0), vec_neg(_vec1)};
195
+ }
196
+
197
+ DEFINE_MEMBER_UNARY_OP(operator~, int64_t, vec_not)
198
+ DEFINE_MEMBER_OP(operator==, int64_t, vec_cmpeq)
199
+ DEFINE_MEMBER_OP(operator!=, int64_t, vec_cmpne)
200
+ DEFINE_MEMBER_OP(operator<, int64_t, vec_cmplt)
201
+ DEFINE_MEMBER_OP(operator<=, int64_t, vec_cmple)
202
+ DEFINE_MEMBER_OP(operator>, int64_t, vec_cmpgt)
203
+ DEFINE_MEMBER_OP(operator>=, int64_t, vec_cmpge)
204
+ DEFINE_MEMBER_OP_AND_ONE(eq, int64_t, vec_cmpeq)
205
+ DEFINE_MEMBER_OP_AND_ONE(ne, int64_t, vec_cmpne)
206
+ DEFINE_MEMBER_OP_AND_ONE(lt, int64_t, vec_cmplt)
207
+ DEFINE_MEMBER_OP_AND_ONE(le, int64_t, vec_cmple)
208
+ DEFINE_MEMBER_OP_AND_ONE(gt, int64_t, vec_cmpgt)
209
+ DEFINE_MEMBER_OP_AND_ONE(ge, int64_t, vec_cmpge)
210
+ DEFINE_MEMBER_OP(operator+, int64_t, vec_add)
211
+ DEFINE_MEMBER_OP(operator-, int64_t, vec_sub)
212
+ DEFINE_MEMBER_OP(operator*, int64_t, vec_mul)
213
+ DEFINE_MEMBER_OP(operator/, int64_t, vec_div)
214
+ DEFINE_MEMBER_OP(maximum, int64_t, vec_max)
215
+ DEFINE_MEMBER_OP(minimum, int64_t, vec_min)
216
+ DEFINE_MEMBER_OP(operator&, int64_t, vec_and)
217
+ DEFINE_MEMBER_OP(operator|, int64_t, vec_or)
218
+ DEFINE_MEMBER_OP(operator^, int64_t, vec_xor)
219
+ };
220
+
221
+ template <>
222
+ Vectorized<int64_t> inline operator<<(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
223
+ vuint64 shift_vec0 = reinterpret_cast<vuint64>(b.vec0());
224
+ vuint64 shift_vec1 = reinterpret_cast<vuint64>(b.vec1()) ;
225
+ return Vectorized<int64_t>{vec_sl(a.vec0(), shift_vec0), vec_sl(a.vec1(), shift_vec1)};
226
+ }
227
+
228
+ template <>
229
+ Vectorized<int64_t> inline operator>>(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
230
+ vuint64 shift_vec0 = reinterpret_cast<vuint64>(b.vec0());
231
+ vuint64 shift_vec1 = reinterpret_cast<vuint64>(b.vec1()) ;
232
+ return Vectorized<int64_t>{vec_sr(a.vec0(), shift_vec0), vec_sr(a.vec1(), shift_vec1)};
233
+ }
234
+
235
+ template <>
236
+ Vectorized<int64_t> inline maximum(
237
+ const Vectorized<int64_t>& a,
238
+ const Vectorized<int64_t>& b) {
239
+ return a.maximum(b);
240
+ }
241
+
242
+ template <>
243
+ Vectorized<int64_t> inline minimum(
244
+ const Vectorized<int64_t>& a,
245
+ const Vectorized<int64_t>& b) {
246
+ return a.minimum(b);
247
+ }
248
+
249
+ } // namespace
250
+ } // namespace vec
251
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec256/zarch/vec256_zarch.h ADDED
@@ -0,0 +1,2797 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <cmath>
2
+ #include <cstring>
3
+ #include <limits>
4
+ #include <type_traits>
5
+ #include <utility>
6
+ #if defined(__clang__)
7
+ #include <sleef.h>
8
+ #elif defined(__GNUC__) || defined(__GNUG__)
9
+ #include <sleef.h>
10
+ #include <vecintrin.h>
11
+ #endif
12
+ #include <ATen/cpu/vec/intrinsics.h>
13
+ #include <ATen/cpu/vec/vec_base.h>
14
+ #include <c10/util/complex.h>
15
+
16
+ #define SLEEF_MEMORY_WORKAROUND
17
+
18
+ namespace at {
19
+ namespace vec {
20
+
21
+ // See Note [CPU_CAPABILITY namespace]
22
+ inline namespace CPU_CAPABILITY {
23
+
24
+ template <typename T>
25
+ constexpr bool is_zarch_implemented() {
26
+ return (
27
+ std::is_same<T, float>::value || std::is_same<T, double>::value ||
28
+ std::is_same<T, int8_t>::value || std::is_same<T, uint8_t>::value ||
29
+ std::is_same<T, uint16_t>::value || std::is_same<T, int16_t>::value ||
30
+ std::is_same<T, int32_t>::value || std::is_same<T, int64_t>::value);
31
+ }
32
+
33
+ template <typename T>
34
+ constexpr bool is_zarch_implemented_quant() {
35
+ return (
36
+ std::is_same<T, c10::qint32>::value ||
37
+ std::is_same<T, c10::qint8>::value ||
38
+ std::is_same<T, c10::quint8>::value);
39
+ }
40
+
41
+ template <typename T>
42
+ constexpr bool is_zarch_implemented_complex() {
43
+ return std::is_same<T, c10::complex<float>>::value ||
44
+ std::is_same<T, c10::complex<double>>::value;
45
+ }
46
+
47
+ constexpr int offset0 = 0;
48
+ constexpr int offset16 = 16;
49
+
50
+ template <int N>
51
+ struct VecBinaryType {
52
+ using type __attribute__((vector_size(16))) = uintmax_t;
53
+ };
54
+
55
+ template <>
56
+ struct VecBinaryType<8> {
57
+ using type = __attribute__((vector_size(16))) unsigned long long;
58
+ };
59
+
60
+ template <>
61
+ struct VecBinaryType<4> {
62
+ using type = __attribute__((vector_size(16))) unsigned int;
63
+ };
64
+
65
+ template <>
66
+ struct VecBinaryType<2> {
67
+ using type = __attribute__((vector_size(16))) unsigned short;
68
+ };
69
+
70
+ template <>
71
+ struct VecBinaryType<1> {
72
+ using type = __attribute__((vector_size(16))) unsigned char;
73
+ };
74
+
75
+ template <typename T>
76
+ struct VecInnerType {
77
+ using Type __attribute__((vector_size(16))) = T;
78
+ using BinaryType = typename VecBinaryType<sizeof(T)>::type;
79
+ using ElementType = T;
80
+ static constexpr int size = 16 / sizeof(T);
81
+ };
82
+
83
+ // define for int64_t properly for load
84
+ template <>
85
+ struct VecInnerType<int64_t> {
86
+ using Type = __attribute__((vector_size(16))) signed long long;
87
+ using ElementType = signed long long;
88
+ using BinaryType = typename VecBinaryType<sizeof(signed long long)>::type;
89
+ static constexpr int size = 16 / sizeof(signed long long);
90
+ };
91
+
92
+ template <typename T>
93
+ using ZSimdVect = typename VecInnerType<T>::Type;
94
+ template <typename T>
95
+ using ZSimdVectBinary = typename VecInnerType<T>::BinaryType;
96
+ template <typename T>
97
+ using ZSimdVectElement = typename VecInnerType<T>::ElementType;
98
+
99
+ constexpr int blendChoiceInner(
100
+ const uint64_t mask,
101
+ const uint64_t half1 = 0xF,
102
+ const uint64_t half2 = 0xF0) {
103
+ uint64_t none = 0;
104
+ uint64_t both = half1 | half2;
105
+ // clamp it between 0 and both
106
+ auto res_mask = mask & both;
107
+ // return (a._vec0, a._vec1)
108
+ if (res_mask == none)
109
+ return 0;
110
+ // return (b._vec0,b._vec1)
111
+ else if (res_mask == both)
112
+ return 1;
113
+ // return (b._vec0, a._vec1)
114
+ else if (res_mask == half1)
115
+ return 2;
116
+ // return (a._vec0,b._vec1)
117
+ else if (res_mask == half2)
118
+ return 3;
119
+ // return (*_vec0,a._vec1)
120
+ else if (res_mask > 0 && res_mask < half1)
121
+ return 4;
122
+ // return (*_vec0,b._vec1)
123
+ else if ((res_mask & half2) == half2)
124
+ return 5;
125
+ // return (a._vec0,*_vec1)
126
+ else if ((res_mask & half1) == 0 && res_mask > half1)
127
+ return 6;
128
+ // return (b._vec0,*_vec1)
129
+ else if ((res_mask & half1) == half1 && res_mask > half1)
130
+ return 7;
131
+ // return (*_vec0,*_vec1)
132
+ return 8;
133
+ }
134
+
135
+ // it can be used to emulate blend faster
136
+ template <int Z>
137
+ constexpr int blendChoice(const uint64_t mask) {
138
+ static_assert(Z < 1 || Z > 8, "not implemented");
139
+ return blendChoiceInner(mask);
140
+ }
141
+
142
+ template <>
143
+ constexpr int blendChoice<1>(const uint64_t mask) {
144
+ return blendChoiceInner(mask, 0x0000FFFF, 0xFFFF0000);
145
+ }
146
+
147
+ template <>
148
+ constexpr int blendChoice<2>(const uint64_t mask) {
149
+ return blendChoiceInner(mask, 0x00FF, 0xFF00);
150
+ }
151
+
152
+ template <>
153
+ constexpr int blendChoice<4>(const uint64_t mask) {
154
+ return blendChoiceInner(mask, 0xF, 0xF0);
155
+ }
156
+
157
+ template <>
158
+ constexpr int blendChoice<8>(const uint64_t mask) {
159
+ // clamp it 0 and 0xF
160
+ return blendChoiceInner(mask, 0x3, 0xC);
161
+ }
162
+
163
+ template <int N>
164
+ constexpr auto GetMask1(const uint64_t mask) {
165
+ return typename VecBinaryType<N>::type{};
166
+ }
167
+
168
+ template <int N>
169
+ constexpr auto GetMask2(const uint64_t mask) {
170
+ return typename VecBinaryType<N>::type{};
171
+ }
172
+
173
+ template <>
174
+ constexpr auto GetMask1<1>(const uint64_t mask) {
175
+ constexpr uint8_t t = (int)0xFF;
176
+ uint8_t g0 = (mask & 1) * t;
177
+ uint8_t g1 = ((mask & 2) >> 1) * t;
178
+ uint8_t g2 = ((mask & 4) >> 2) * t;
179
+ uint8_t g3 = ((mask & 8) >> 3) * t;
180
+ uint8_t g4 = ((mask & 16) >> 4) * t;
181
+ uint8_t g5 = ((mask & 32) >> 5) * t;
182
+ uint8_t g6 = ((mask & 64) >> 6) * t;
183
+ uint8_t g7 = ((mask & 128) >> 7) * t;
184
+ uint8_t g8 = ((mask & 256) >> 8) * t;
185
+ uint8_t g9 = ((mask & 512) >> 9) * t;
186
+ uint8_t g10 = ((mask & 1024) >> 10) * t;
187
+ uint8_t g11 = ((mask & 2048) >> 11) * t;
188
+ uint8_t g12 = ((mask & 4096) >> 12) * t;
189
+ uint8_t g13 = ((mask & 8192) >> 13) * t;
190
+ uint8_t g14 = ((mask & 16384) >> 14) * t;
191
+ uint8_t g15 = ((mask & 32768) >> 15) * t;
192
+ return (typename VecBinaryType<1>::type){
193
+ g0, g1, g2, g3, g4, g5, g6, g7, g8, g9, g10, g11, g12, g13, g14, g15};
194
+ }
195
+
196
+ template <>
197
+ constexpr auto GetMask2<1>(const uint64_t mask) {
198
+ uint64_t mask2 = (mask & 0xFFFFFFFF) >> 16;
199
+ return GetMask1<1>(mask2);
200
+ }
201
+
202
+ template <>
203
+ constexpr auto GetMask1<2>(const uint64_t mask) {
204
+ constexpr uint16_t t = (int)0xFFFF;
205
+ uint16_t g0 = (mask & 1) * t;
206
+ uint16_t g1 = ((mask & 2) >> 1) * t;
207
+ uint16_t g2 = ((mask & 4) >> 2) * t;
208
+ uint16_t g3 = ((mask & 8) >> 3) * t;
209
+ uint16_t g4 = ((mask & 16) >> 4) * t;
210
+ uint16_t g5 = ((mask & 32) >> 5) * t;
211
+ uint16_t g6 = ((mask & 64) >> 6) * t;
212
+ uint16_t g7 = ((mask & 128) >> 7) * t;
213
+ return (typename VecBinaryType<2>::type){g0, g1, g2, g3, g4, g5, g6, g7};
214
+ }
215
+
216
+ template <>
217
+ constexpr auto GetMask2<2>(const uint64_t mask) {
218
+ uint64_t mask2 = (mask & 0xFFFF) >> 8;
219
+ return GetMask1<2>(mask2);
220
+ }
221
+
222
+ template <>
223
+ constexpr auto GetMask1<4>(const uint64_t mask) {
224
+ uint32_t g0 = (mask & 1) * 0xffffffff;
225
+ uint32_t g1 = ((mask & 2) >> 1) * 0xffffffff;
226
+ uint32_t g2 = ((mask & 4) >> 2) * 0xffffffff;
227
+ uint32_t g3 = ((mask & 8) >> 3) * 0xffffffff;
228
+ return (typename VecBinaryType<4>::type){g0, g1, g2, g3};
229
+ }
230
+
231
+ template <>
232
+ constexpr auto GetMask2<4>(const uint64_t mask) {
233
+ uint64_t mask2 = (mask & 0xFF) >> 4;
234
+ return GetMask1<4>(mask2);
235
+ }
236
+
237
+ template <>
238
+ constexpr auto GetMask1<8>(const uint64_t mask) {
239
+ uint64_t g0 = (mask & 1) * 0xffffffffffffffff;
240
+ uint64_t g1 = ((mask & 2) >> 1) * 0xffffffffffffffff;
241
+ return (typename VecBinaryType<8>::type){g0, g1};
242
+ }
243
+
244
+ template <>
245
+ constexpr auto GetMask2<8>(const uint64_t mask) {
246
+ uint64_t mask2 = (mask & 0xF) >> 2;
247
+ return GetMask1<8>(mask2);
248
+ }
249
+
250
+ template <int Z>
251
+ constexpr int maskForComplex(uint32_t mask) {
252
+ return 0;
253
+ }
254
+
255
+ template <>
256
+ constexpr int maskForComplex<8>(uint32_t mask) {
257
+ mask = mask & 0xF;
258
+ int complex_mask = 0;
259
+ if (mask & 1)
260
+ complex_mask |= 3;
261
+ if (mask & 2)
262
+ complex_mask |= (3 << 2);
263
+ if (mask & 4)
264
+ complex_mask |= (3 << 4);
265
+ if (mask & 8)
266
+ complex_mask |= (3 << 6);
267
+ return complex_mask;
268
+ }
269
+
270
+ template <>
271
+ constexpr int maskForComplex<16>(uint32_t mask) {
272
+ mask = mask & 0x3;
273
+ int complex_mask = 0;
274
+ if (mask & 1)
275
+ complex_mask |= 3;
276
+ if (mask & 2)
277
+ complex_mask |= (3 << 2);
278
+ return complex_mask;
279
+ }
280
+
281
+ template <typename T = c10::complex<float>>
282
+ constexpr int blend_choice() {
283
+ return 0xAA;
284
+ }
285
+
286
+ template <>
287
+ constexpr int blend_choice<c10::complex<double>>() {
288
+ return 0x0A;
289
+ }
290
+
291
+ constexpr int64_t allbitset(int16_t x) {
292
+ int64_t onex = 1;
293
+ return (onex << x) - onex;
294
+ }
295
+
296
+ namespace { /* unnamed namespace */
297
+
298
+ ZSimdVect<float> vec_mergee(ZSimdVect<float> x, ZSimdVect<float> y) {
299
+ constexpr ZSimdVectBinary<uint8_t> mergee_mask{
300
+ 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27};
301
+ return vec_perm(x, y, mergee_mask);
302
+ }
303
+
304
+ ZSimdVect<double> vec_mergee(ZSimdVect<double> x, ZSimdVect<double> y) {
305
+ return vec_mergeh(x, y);
306
+ }
307
+
308
+ ZSimdVect<float> vec_mergeo(ZSimdVect<float> x, ZSimdVect<float> y) {
309
+ constexpr ZSimdVectBinary<uint8_t> mergeo_mask{
310
+ 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31};
311
+ return vec_perm(x, y, mergeo_mask);
312
+ }
313
+
314
+ ZSimdVect<double> vec_mergeo(ZSimdVect<double> x, ZSimdVect<double> y) {
315
+ return vec_mergel(x, y);
316
+ }
317
+
318
+ } /* unnamed namespace */
319
+
320
+ //
321
+ template <typename T>
322
+ constexpr auto GetBpermZeroMask() {
323
+ return ZSimdVectBinary<uint8_t>{
324
+ 128,
325
+ 128,
326
+ 128,
327
+ 128,
328
+ 128,
329
+ 128,
330
+ 128,
331
+ 128,
332
+ 128,
333
+ 128,
334
+ 128,
335
+ 128,
336
+ 96,
337
+ 64,
338
+ 32,
339
+ 0};
340
+ }
341
+
342
+ template <>
343
+ constexpr auto GetBpermZeroMask<double>() {
344
+ return ZSimdVectBinary<uint8_t>{
345
+ 128,
346
+ 128,
347
+ 128,
348
+ 128,
349
+ 128,
350
+ 128,
351
+ 128,
352
+ 128,
353
+ 128,
354
+ 128,
355
+ 128,
356
+ 128,
357
+ 128,
358
+ 128,
359
+ 64,
360
+ 0};
361
+ }
362
+
363
+ constexpr auto GetSwapMaskFloat() {
364
+ return ZSimdVectBinary<uint8_t>{
365
+ 4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11};
366
+ }
367
+
368
+ template <typename T>
369
+ struct Vectorized<T, std::enable_if_t<is_zarch_implemented<T>()>> {
370
+ public:
371
+ using value_type = T;
372
+ using vtype = ZSimdVect<T>;
373
+ using vmaskType = ZSimdVectBinary<T>;
374
+ using size_type = int;
375
+ // because of gcc inconsistency for int64_t we are obliged to use this, not
376
+ // value_type
377
+ using ElementType = ZSimdVectElement<T>;
378
+ using vinner_data = std::pair<vtype, vtype>;
379
+
380
+ private:
381
+ vtype _vec0;
382
+ vtype _vec1;
383
+
384
+ public:
385
+ static constexpr size_type size() {
386
+ return VECTOR_WIDTH / sizeof(ElementType);
387
+ }
388
+ Vectorized() {}
389
+
390
+ C10_ALWAYS_INLINE Vectorized(vtype v) : _vec0{v}, _vec1{v} {}
391
+ C10_ALWAYS_INLINE Vectorized(const vinner_data &v) : _vec0{v.first}, _vec1{v.second} {}
392
+ C10_ALWAYS_INLINE Vectorized(vtype v1, vtype v2) : _vec0{v1}, _vec1{v2} {}
393
+ C10_ALWAYS_INLINE Vectorized(T s)
394
+ : _vec0{vec_splats((ElementType)s)}, _vec1{vec_splats((ElementType)s)} {}
395
+
396
+ static Vectorized<value_type> C10_ALWAYS_INLINE
397
+ loadu(const void* ptr, int count = size()) {
398
+ if (count == size()) {
399
+ return {
400
+ vec_xl(offset0, reinterpret_cast<const ElementType*>(ptr)),
401
+ vec_xl(offset16, reinterpret_cast<const ElementType*>(ptr))};
402
+ }
403
+
404
+ __at_align__ ElementType tmp_values[size()] = {};
405
+ std::memcpy(tmp_values, ptr, std::min(count, size()) * sizeof(ElementType));
406
+
407
+ return {
408
+ vec_xl(offset0, reinterpret_cast<const ElementType*>(tmp_values)),
409
+ vec_xl(offset16, reinterpret_cast<const ElementType*>(tmp_values))};
410
+ }
411
+
412
+ static Vectorized<value_type> C10_ALWAYS_INLINE
413
+ loadu_one_fourth(const void* ptr) {
414
+ // load only first 8 bytes
415
+ // only intended to be used with uint8_t
416
+ return loadu(ptr, 8 / sizeof(ElementType));
417
+ }
418
+
419
+ void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const {
420
+ if (count == size()) {
421
+ vec_xst(_vec0, offset0, reinterpret_cast<ElementType*>(ptr));
422
+ vec_xst(_vec1, offset16, reinterpret_cast<ElementType*>(ptr));
423
+ } else if (count > 0) {
424
+ __at_align__ ElementType tmp_values[size()];
425
+ vec_xst(_vec0, offset0, reinterpret_cast<ElementType*>(tmp_values));
426
+ vec_xst(_vec1, offset16, reinterpret_cast<ElementType*>(tmp_values));
427
+ std::memcpy(
428
+ ptr, tmp_values, std::min(count, size()) * sizeof(ElementType));
429
+ }
430
+ }
431
+
432
+ C10_ALWAYS_INLINE const vtype& vec0() const {
433
+ return _vec0;
434
+ }
435
+
436
+ C10_ALWAYS_INLINE const vtype& vec1() const {
437
+ return _vec1;
438
+ }
439
+
440
+ C10_ALWAYS_INLINE vinner_data data() const {
441
+ return std::make_pair<>(_vec0, _vec1);
442
+ }
443
+
444
+ C10_ALWAYS_INLINE operator vinner_data() const {
445
+ return data();
446
+ }
447
+
448
+ C10_ALWAYS_INLINE const vmaskType vecb0() const {
449
+ return (vmaskType)_vec0;
450
+ }
451
+ C10_ALWAYS_INLINE const vmaskType vecb1() const {
452
+ return (vmaskType)_vec1;
453
+ }
454
+
455
+ static Vectorized<T> C10_ALWAYS_INLINE blendv(
456
+ const Vectorized<T>& a,
457
+ const Vectorized<T>& b,
458
+ const Vectorized<T>& mask) {
459
+ return {
460
+ vec_sel(a._vec0, b._vec0, mask.vecb0()),
461
+ vec_sel(a._vec1, b._vec1, mask.vecb1())};
462
+ }
463
+
464
+ template <typename U = T, std::enable_if_t<(sizeof(U) == 8), int> = 0>
465
+ C10_ALWAYS_INLINE Vectorized(T s1, T s2, T s3, T s4)
466
+ : _vec0{s1, s2}, _vec1{s3, s4} {}
467
+
468
+ template <typename U = T, std::enable_if_t<(sizeof(U) == 4), int> = 0>
469
+ C10_ALWAYS_INLINE Vectorized(T s1, T s2, T s3, T s4, T s5, T s6, T s7, T s8)
470
+ : _vec0{s1, s2, s3, s4}, _vec1{s5, s6, s7, s8} {}
471
+
472
+ template <typename U = T, std::enable_if_t<(sizeof(U) == 2), int> = 0>
473
+ C10_ALWAYS_INLINE Vectorized(
474
+ T s1,
475
+ T s2,
476
+ T s3,
477
+ T s4,
478
+ T s5,
479
+ T s6,
480
+ T s7,
481
+ T s8,
482
+ T s9,
483
+ T s10,
484
+ T s11,
485
+ T s12,
486
+ T s13,
487
+ T s14,
488
+ T s15,
489
+ T s16)
490
+ : _vec0{s1, s2, s3, s4, s5, s6, s7, s8},
491
+ _vec1{s9, s10, s11, s12, s13, s14, s15, s16} {}
492
+
493
+ template <typename U = T, std::enable_if_t<(sizeof(U) == 1), int> = 0>
494
+ C10_ALWAYS_INLINE Vectorized(
495
+ T s1,
496
+ T s2,
497
+ T s3,
498
+ T s4,
499
+ T s5,
500
+ T s6,
501
+ T s7,
502
+ T s8,
503
+ T s9,
504
+ T s10,
505
+ T s11,
506
+ T s12,
507
+ T s13,
508
+ T s14,
509
+ T s15,
510
+ T s16,
511
+ T s17,
512
+ T s18,
513
+ T s19,
514
+ T s20,
515
+ T s21,
516
+ T s22,
517
+ T s23,
518
+ T s24,
519
+ T s25,
520
+ T s26,
521
+ T s27,
522
+ T s28,
523
+ T s29,
524
+ T s30,
525
+ T s31,
526
+ T s32)
527
+ : _vec0{s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15, s16},
528
+ _vec1{
529
+ s17,
530
+ s18,
531
+ s19,
532
+ s20,
533
+ s21,
534
+ s22,
535
+ s23,
536
+ s24,
537
+ s25,
538
+ s26,
539
+ s27,
540
+ s28,
541
+ s29,
542
+ s30,
543
+ s31,
544
+ s32} {}
545
+
546
+ template <typename step_t, typename U = T>
547
+ static std::enable_if_t<sizeof(U) == 8, Vectorized<T>> arange(
548
+ T base = 0,
549
+ step_t step = static_cast<step_t>(1)) {
550
+ return Vectorized<T>(base, base + step, base + 2 * step, base + 3 * step);
551
+ }
552
+
553
+ template <typename step_t, typename U = T>
554
+ static std::enable_if_t<sizeof(U) == 4, Vectorized<T>> arange(
555
+ T base = 0,
556
+ step_t step = static_cast<step_t>(1)) {
557
+ return Vectorized<T>(
558
+ base,
559
+ base + step,
560
+ base + 2 * step,
561
+ base + 3 * step,
562
+ base + 4 * step,
563
+ base + 5 * step,
564
+ base + 6 * step,
565
+ base + 7 * step);
566
+ }
567
+
568
+ template <typename step_t, typename U = T>
569
+ static std::enable_if_t<sizeof(U) == 2, Vectorized<T>> arange(
570
+ T base = 0,
571
+ step_t step = static_cast<step_t>(1)) {
572
+ return Vectorized<T>(
573
+ base,
574
+ base + step,
575
+ base + 2 * step,
576
+ base + 3 * step,
577
+ base + 4 * step,
578
+ base + 5 * step,
579
+ base + 6 * step,
580
+ base + 7 * step,
581
+ base + 8 * step,
582
+ base + 9 * step,
583
+ base + 10 * step,
584
+ base + 11 * step,
585
+ base + 12 * step,
586
+ base + 13 * step,
587
+ base + 14 * step,
588
+ base + 15 * step);
589
+ }
590
+
591
+ template <typename step_t, typename U = T>
592
+ static std::enable_if_t<sizeof(U) == 1, Vectorized<T>> arange(
593
+ T base = 0,
594
+ step_t step = static_cast<step_t>(1)) {
595
+ return Vectorized<T>(
596
+ base,
597
+ base + step,
598
+ base + 2 * step,
599
+ base + 3 * step,
600
+ base + 4 * step,
601
+ base + 5 * step,
602
+ base + 6 * step,
603
+ base + 7 * step,
604
+ base + 8 * step,
605
+ base + 9 * step,
606
+ base + 10 * step,
607
+ base + 11 * step,
608
+ base + 12 * step,
609
+ base + 13 * step,
610
+ base + 14 * step,
611
+ base + 15 * step,
612
+ base + 16 * step,
613
+ base + 17 * step,
614
+ base + 18 * step,
615
+ base + 19 * step,
616
+ base + 20 * step,
617
+ base + 21 * step,
618
+ base + 22 * step,
619
+ base + 23 * step,
620
+ base + 24 * step,
621
+ base + 25 * step,
622
+ base + 26 * step,
623
+ base + 27 * step,
624
+ base + 28 * step,
625
+ base + 29 * step,
626
+ base + 30 * step,
627
+ base + 31 * step);
628
+ }
629
+
630
+ // blend section
631
+ template <int64_t mask>
632
+ static std::enable_if_t<blendChoice<sizeof(T)>(mask) == 0, Vectorized<T>>
633
+ C10_ALWAYS_INLINE blend(const Vectorized<T>& a, const Vectorized<T>& b) {
634
+ return a;
635
+ }
636
+
637
+ template <int64_t mask>
638
+ static std::enable_if_t<blendChoice<sizeof(T)>(mask) == 1, Vectorized<T>>
639
+ C10_ALWAYS_INLINE blend(const Vectorized<T>& a, const Vectorized<T>& b) {
640
+ return b;
641
+ }
642
+
643
+ template <int64_t mask>
644
+ static std::enable_if_t<blendChoice<sizeof(T)>(mask) == 2, Vectorized<T>>
645
+ C10_ALWAYS_INLINE blend(const Vectorized<T>& a, const Vectorized<T>& b) {
646
+ return {b._vec0, a._vec1};
647
+ }
648
+
649
+ template <int64_t mask>
650
+ static std::enable_if_t<blendChoice<sizeof(T)>(mask) == 3, Vectorized<T>>
651
+ C10_ALWAYS_INLINE blend(const Vectorized<T>& a, const Vectorized<T>& b) {
652
+ return {a._vec0, b._vec1};
653
+ }
654
+
655
+ template <int64_t mask>
656
+ static std::enable_if_t<blendChoice<sizeof(T)>(mask) == 4, Vectorized<T>>
657
+ C10_ALWAYS_INLINE blend(const Vectorized<T>& a, const Vectorized<T>& b) {
658
+ const vmaskType mask_1st = GetMask1<sizeof(T)>(mask);
659
+ return {(vtype)vec_sel(a._vec0, b._vec0, mask_1st), a._vec1};
660
+ }
661
+
662
+ template <int64_t mask>
663
+ static std::enable_if_t<blendChoice<sizeof(T)>(mask) == 5, Vectorized<T>>
664
+ C10_ALWAYS_INLINE blend(const Vectorized<T>& a, const Vectorized<T>& b) {
665
+ const vmaskType mask_1st = GetMask1<sizeof(T)>(mask);
666
+ return {(vtype)vec_sel(a._vec0, b._vec0, mask_1st), b._vec1};
667
+ }
668
+
669
+ template <int64_t mask>
670
+ static std::enable_if_t<blendChoice<sizeof(T)>(mask) == 6, Vectorized<T>>
671
+ C10_ALWAYS_INLINE blend(const Vectorized<T>& a, const Vectorized<T>& b) {
672
+ const vmaskType mask_2nd = GetMask2<sizeof(T)>(mask);
673
+ // generated masks
674
+ return {a._vec0, (vtype)vec_sel(a._vec1, b._vec1, mask_2nd)};
675
+ }
676
+
677
+ template <int64_t mask>
678
+ static std::enable_if_t<blendChoice<sizeof(T)>(mask) == 7, Vectorized<T>>
679
+ C10_ALWAYS_INLINE blend(const Vectorized<T>& a, const Vectorized<T>& b) {
680
+ const vmaskType mask_2nd = GetMask2<sizeof(T)>(mask);
681
+ // generated masks
682
+ return {b._vec0, (vtype)vec_sel(a._vec1, b._vec1, mask_2nd)};
683
+ }
684
+
685
+ template <int64_t mask>
686
+ static std::enable_if_t<blendChoice<sizeof(T)>(mask) == 8, Vectorized<T>>
687
+ C10_ALWAYS_INLINE blend(const Vectorized<T>& a, const Vectorized<T>& b) {
688
+ const vmaskType mask_1st = GetMask1<sizeof(T)>(mask);
689
+ const vmaskType mask_2nd = GetMask2<sizeof(T)>(mask);
690
+ return {
691
+ (vtype)vec_sel(a._vec0, b._vec0, mask_1st),
692
+ (vtype)vec_sel(a._vec1, b._vec1, mask_2nd)};
693
+ }
694
+
695
+ template <int16_t Z, int16_t C>
696
+ static inline std::enable_if_t<(Z >= C), Vectorized<T>> set_inner(
697
+ const Vectorized<T>& a,
698
+ const Vectorized<T>& b,
699
+ size_t count) {
700
+ return b;
701
+ }
702
+
703
+ template <int16_t Z, int16_t C>
704
+ static inline std::enable_if_t<(Z < C), Vectorized<T>> set_inner(
705
+ const Vectorized<T>& a,
706
+ const Vectorized<T>& b,
707
+ size_t count) {
708
+ if (count == Z)
709
+ return blend<allbitset(Z)>(a, b);
710
+ else
711
+ return set_inner<Z + 1, C>(a, b, count);
712
+ }
713
+
714
+ static Vectorized<T> set(
715
+ const Vectorized<T>& a,
716
+ const Vectorized<T>& b,
717
+ size_t count = size()) {
718
+ if (count == 0)
719
+ return a;
720
+ return set_inner<1, size()>(a, b, count);
721
+ }
722
+
723
+ const ElementType& operator[](int idx) const = delete;
724
+ ElementType& operator[](int idx) = delete;
725
+
726
+ Vectorized<T> C10_ALWAYS_INLINE operator+(const Vectorized<T>& other) const {
727
+ return Vectorized<T>{_vec0 + other._vec0, _vec1 + other._vec1};
728
+ }
729
+
730
+ Vectorized<T> C10_ALWAYS_INLINE operator-(const Vectorized<T>& other) const {
731
+ return Vectorized<T>{_vec0 - other._vec0, _vec1 - other._vec1};
732
+ }
733
+
734
+ Vectorized<T> C10_ALWAYS_INLINE operator*(const Vectorized<T>& other) const {
735
+ return Vectorized<T>{_vec0 * other._vec0, _vec1 * other._vec1};
736
+ }
737
+
738
+ Vectorized<T> C10_ALWAYS_INLINE operator/(const Vectorized<T>& other) const {
739
+ return Vectorized<T>{_vec0 / other._vec0, _vec1 / other._vec1};
740
+ }
741
+
742
+ Vectorized<T> C10_ALWAYS_INLINE operator&(const Vectorized<T>& other) const {
743
+ return Vectorized<T>{
744
+ (vtype)(vecb0() & other.vecb0()), (vtype)(vecb1() & other.vecb1())};
745
+ }
746
+
747
+ Vectorized<T> C10_ALWAYS_INLINE operator|(const Vectorized<T>& other) const {
748
+ return Vectorized<T>{
749
+ (vtype)(vecb0() | other.vecb0()), (vtype)(vecb1() | other.vecb1())};
750
+ }
751
+
752
+ Vectorized<T> C10_ALWAYS_INLINE operator^(const Vectorized<T>& other) const {
753
+ return Vectorized<T>{
754
+ (vtype)(vecb0() ^ other.vecb0()), (vtype)(vecb1() ^ other.vecb1())};
755
+ }
756
+
757
+ Vectorized<T> C10_ALWAYS_INLINE operator<<(const Vectorized<T> &other) const {
758
+ constexpr ElementType max_shift = sizeof(ElementType) * CHAR_BIT;
759
+
760
+ ElementType a_array[Vectorized<T>::size()];
761
+ ElementType b_array[Vectorized<T>::size()];
762
+ ElementType c_array[Vectorized<T>::size()];
763
+
764
+ store(a_array);
765
+ other.store(b_array);
766
+
767
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
768
+ T shift = b_array[i];
769
+ if ((static_cast<std::make_signed_t<T>>(shift) < 0) || (shift >= max_shift)) {
770
+ c_array[i] = 0;
771
+ } else {
772
+ c_array[i] = static_cast<std::make_unsigned_t<T>>(a_array[i]) << shift;
773
+ }
774
+ }
775
+
776
+ return loadu(c_array);
777
+ }
778
+
779
+ Vectorized<T> C10_ALWAYS_INLINE operator>>(const Vectorized<T> &other) const {
780
+ // right shift value to retain sign bit for signed and no bits for unsigned
781
+ constexpr ElementType max_shift = sizeof(T) * CHAR_BIT - std::is_signed_v<T>;
782
+
783
+ ElementType a_array[Vectorized<T>::size()];
784
+ ElementType b_array[Vectorized<T>::size()];
785
+ ElementType c_array[Vectorized<T>::size()];
786
+
787
+ store(a_array);
788
+ other.store(b_array);
789
+
790
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
791
+ T shift = b_array[i];
792
+ if ((static_cast<std::make_signed_t<T>>(shift) < 0) || (shift >= max_shift)) {
793
+ c_array[i] = a_array[i] >> max_shift;
794
+ } else {
795
+ c_array[i] = a_array[i] >> shift;
796
+ }
797
+ }
798
+
799
+ return loadu(c_array);
800
+ }
801
+
802
+ Vectorized<T> _not() const {
803
+ return {(vtype)vec_nor(vecb0(), vecb0()), (vtype)vec_nor(vecb1(), vecb1())};
804
+ }
805
+
806
+ Vectorized<T> C10_ALWAYS_INLINE operator==(const Vectorized<T>& other) const {
807
+ return Vectorized<T>{
808
+ vec_cmpeq(_vec0, other._vec0), vec_cmpeq(_vec1, other._vec1)};
809
+ }
810
+
811
+ Vectorized<T> C10_ALWAYS_INLINE operator!=(const Vectorized<T>& other) const {
812
+ return Vectorized<T>{
813
+ vec_cmpeq(_vec0, other._vec0), vec_cmpeq(_vec1, other._vec1)}
814
+ ._not();
815
+ }
816
+ Vectorized<T> C10_ALWAYS_INLINE operator>(const Vectorized<T>& other) const {
817
+ return Vectorized<T>{
818
+ vec_cmpgt(_vec0, other._vec0), vec_cmpgt(_vec1, other._vec1)};
819
+ }
820
+ Vectorized<T> C10_ALWAYS_INLINE operator>=(const Vectorized<T>& other) const {
821
+ return Vectorized<T>{
822
+ vec_cmpge(_vec0, other._vec0), vec_cmpge(_vec1, other._vec1)};
823
+ }
824
+
825
+ Vectorized<T> C10_ALWAYS_INLINE operator<(const Vectorized<T>& other) const {
826
+ return Vectorized<T>{
827
+ vec_cmplt(_vec0, other._vec0), vec_cmplt(_vec1, other._vec1)};
828
+ }
829
+
830
+ Vectorized<T> C10_ALWAYS_INLINE operator<=(const Vectorized<T>& other) const {
831
+ return Vectorized<T>{
832
+ vec_cmple(_vec0, other._vec0), vec_cmple(_vec1, other._vec1)};
833
+ }
834
+
835
+ Vectorized<T> C10_ALWAYS_INLINE eq(const Vectorized<T>& other) const {
836
+ return (*this == other) & Vectorized<T>((T)1.0);
837
+ }
838
+ Vectorized<T> C10_ALWAYS_INLINE ne(const Vectorized<T>& other) const {
839
+ return (*this != other) & Vectorized<T>((T)1.0);
840
+ }
841
+ Vectorized<T> C10_ALWAYS_INLINE gt(const Vectorized<T>& other) const {
842
+ return (*this > other) & Vectorized<T>((T)1.0);
843
+ }
844
+ Vectorized<T> C10_ALWAYS_INLINE ge(const Vectorized<T>& other) const {
845
+ return (*this >= other) & Vectorized<T>((T)1.0);
846
+ }
847
+ Vectorized<T> C10_ALWAYS_INLINE lt(const Vectorized<T>& other) const {
848
+ return (*this < other) & Vectorized<T>((T)1.0);
849
+ }
850
+ Vectorized<T> C10_ALWAYS_INLINE le(const Vectorized<T>& other) const {
851
+ return (*this <= other) & Vectorized<T>((T)1.0);
852
+ }
853
+
854
+ template <
855
+ typename U = T,
856
+ std::enable_if_t<!std::is_unsigned<U>::value, int> = 0>
857
+ Vectorized<U> C10_ALWAYS_INLINE abs() const {
858
+ return {vec_abs(_vec0), vec_abs(_vec1)};
859
+ }
860
+
861
+ template <
862
+ typename U = T,
863
+ std::enable_if_t<std::is_unsigned<U>::value, int> = 0>
864
+ Vectorized<U> C10_ALWAYS_INLINE abs() const {
865
+ return {_vec0, _vec1};
866
+ }
867
+
868
+ Vectorized<T> C10_ALWAYS_INLINE neg() const {
869
+ return {-_vec0, -_vec1};
870
+ }
871
+
872
+ Vectorized<T> isnan() const {
873
+ auto x = *this;
874
+ auto ret = (x == x);
875
+ return ret._not();
876
+ }
877
+
878
+ template <
879
+ typename U = T,
880
+ std::enable_if_t<std::is_floating_point<U>::value, int> = 0>
881
+ Vectorized<U> angle() const {
882
+ auto tmp = blendv(
883
+ Vectorized<U>(0), Vectorized<U>(c10::pi<U>), *this < Vectorized<U>(0));
884
+ return blendv(tmp, *this, isnan());
885
+ }
886
+
887
+ template <
888
+ typename U = T,
889
+ std::enable_if_t<!std::is_floating_point<U>::value, int> = 0>
890
+ Vectorized<U> angle() const {
891
+ return blendv(
892
+ Vectorized<U>(0), Vectorized<U>(c10::pi<U>), *this < Vectorized<U>(0));
893
+ }
894
+
895
+ Vectorized<T> real() const {
896
+ return *this;
897
+ }
898
+ Vectorized<T> imag() const {
899
+ return Vectorized<T>{0};
900
+ }
901
+ Vectorized<T> conj() const {
902
+ return *this;
903
+ }
904
+
905
+ template <
906
+ typename U = T,
907
+ std::enable_if_t<std::is_floating_point<U>::value, int> = 0>
908
+ int zero_mask() const {
909
+ auto cmp = (*this == Vectorized<U>(0));
910
+ constexpr auto mask_zero_bits = GetBpermZeroMask<U>();
911
+ ZSimdVectBinary<uint64_t> result0 =
912
+ vec_bperm_u128((ZSimdVectBinary<uint8_t>)cmp.vecb0(), mask_zero_bits);
913
+ ZSimdVectBinary<uint64_t> result1 =
914
+ vec_bperm_u128((ZSimdVectBinary<uint8_t>)cmp.vecb1(), mask_zero_bits);
915
+ return (result0[0] | (result1[0] << (size() / 2)));
916
+ }
917
+
918
+ Vectorized<T> C10_ALWAYS_INLINE floor() const {
919
+ return {vec_floor(_vec0), vec_floor(_vec1)};
920
+ }
921
+
922
+ Vectorized<T> C10_ALWAYS_INLINE ceil() const {
923
+ return {vec_ceil(_vec0), vec_ceil(_vec1)};
924
+ }
925
+
926
+ Vectorized<T> C10_ALWAYS_INLINE round() const {
927
+ return {vec_round(_vec0), vec_round(_vec1)};
928
+ }
929
+
930
+ Vectorized<T> C10_ALWAYS_INLINE rint() const {
931
+ return {vec_rint(_vec0), vec_rint(_vec1)};
932
+ }
933
+
934
+ Vectorized<T> C10_ALWAYS_INLINE trunc() const {
935
+ return {vec_trunc(_vec0), vec_trunc(_vec1)};
936
+ }
937
+
938
+ Vectorized<T> C10_ALWAYS_INLINE frac() const {
939
+ return *this - trunc();
940
+ }
941
+
942
+ Vectorized<T> C10_ALWAYS_INLINE sqrt() const {
943
+ return {vec_sqrt(_vec0), vec_sqrt(_vec1)};
944
+ }
945
+ Vectorized<T> C10_ALWAYS_INLINE reciprocal() const {
946
+ return Vectorized<T>((T)1) / (*this);
947
+ }
948
+ Vectorized<T> C10_ALWAYS_INLINE rsqrt() const {
949
+ return sqrt().reciprocal();
950
+ }
951
+
952
+ template <
953
+ typename U = T,
954
+ std::enable_if_t<std::is_same<U, float>::value, int> = 0>
955
+ inline Vectorized<T> mapOrdinary(float (*const f)(float)) const {
956
+ float a00 = f(_vec0[0]);
957
+ float a01 = f(_vec0[1]);
958
+ float a02 = f(_vec0[2]);
959
+ float a03 = f(_vec0[3]);
960
+ float a10 = f(_vec1[0]);
961
+ float a11 = f(_vec1[1]);
962
+ float a12 = f(_vec1[2]);
963
+ float a13 = f(_vec1[3]);
964
+ return Vectorized<T>{a00, a01, a02, a03, a10, a11, a12, a13};
965
+ }
966
+
967
+ template <
968
+ typename U = T,
969
+ std::enable_if_t<std::is_same<U, double>::value, int> = 0>
970
+ inline Vectorized<T> mapOrdinary(double (*const f)(double)) const {
971
+ return Vectorized<T>(f(_vec0[0]), f(_vec0[1]), f(_vec1[0]), f(_vec1[1]));
972
+ }
973
+
974
+ template <
975
+ typename U = T,
976
+ std::enable_if_t<std::is_same<U, float>::value, int> = 0>
977
+ inline Vectorized<T> mapOrdinary(
978
+ float (*const f)(float, float),
979
+ const Vectorized<T>& b) const {
980
+ float a00 = f(_vec0[0], b._vec0[0]);
981
+ float a01 = f(_vec0[1], b._vec0[1]);
982
+ float a02 = f(_vec0[2], b._vec0[2]);
983
+ float a03 = f(_vec0[3], b._vec0[3]);
984
+ float a10 = f(_vec1[0], b._vec1[0]);
985
+ float a11 = f(_vec1[1], b._vec1[1]);
986
+ float a12 = f(_vec1[2], b._vec1[2]);
987
+ float a13 = f(_vec1[3], b._vec1[3]);
988
+ return Vectorized<T>{a00, a01, a02, a03, a10, a11, a12, a13};
989
+ }
990
+
991
+ template <
992
+ typename U = T,
993
+ std::enable_if_t<std::is_same<U, double>::value, int> = 0>
994
+ inline Vectorized<T> mapOrdinary(
995
+ double (*const f)(double, double),
996
+ const Vectorized<T>& b) const {
997
+ return Vectorized<T>(
998
+ f(_vec0[0], b._vec0[0]),
999
+ f(_vec0[1], b._vec0[1]),
1000
+ f(_vec1[0], b._vec1[0]),
1001
+ f(_vec1[1], b._vec1[1]));
1002
+ }
1003
+
1004
+ template <
1005
+ typename FloatOp,
1006
+ typename DoubleOp,
1007
+ typename U = T,
1008
+ std::enable_if_t<std::is_same<U, float>::value, int> = 0>
1009
+ inline Vectorized<T> mapSleef(FloatOp f, DoubleOp d) const {
1010
+ vtype a0 = f(_vec0);
1011
+ vtype a1 = f(_vec1);
1012
+ return Vectorized<T>{a0, a1};
1013
+ }
1014
+
1015
+ template <
1016
+ typename FloatOp,
1017
+ typename DoubleOp,
1018
+ typename U = T,
1019
+ std::enable_if_t<std::is_same<U, double>::value, int> = 0>
1020
+ inline Vectorized<T> mapSleef(FloatOp f, DoubleOp d) const {
1021
+ return Vectorized<T>(d(_vec0), d(_vec1));
1022
+ }
1023
+
1024
+ template <
1025
+ typename FloatOp,
1026
+ typename DoubleOp,
1027
+ typename U = T,
1028
+ std::enable_if_t<std::is_same<U, float>::value, int> = 0>
1029
+ inline Vectorized<T> mapSleef(FloatOp f, DoubleOp d, const Vectorized<T>& b)
1030
+ const {
1031
+ vtype a0 = f(_vec0, b._vec0);
1032
+ vtype a1 = f(_vec1, b._vec1);
1033
+ return Vectorized<T>{a0, a1};
1034
+ }
1035
+
1036
+ template <
1037
+ typename FloatOp,
1038
+ typename DoubleOp,
1039
+ typename U = T,
1040
+ std::enable_if_t<std::is_same<U, double>::value, int> = 0>
1041
+ inline Vectorized<T> mapSleef(FloatOp f, DoubleOp d, const Vectorized<T>& b)
1042
+ const {
1043
+ return Vectorized<T>(d(_vec0, b._vec0), d(_vec1, b._vec1));
1044
+ }
1045
+
1046
+ Vectorized<T> acos() const {
1047
+ return mapSleef(Sleef_acosf4_u10, Sleef_acosd2_u10);
1048
+ }
1049
+ Vectorized<T> asin() const {
1050
+ return mapSleef(Sleef_asinf4_u10, Sleef_asind2_u10);
1051
+ }
1052
+ Vectorized<T> atan() const {
1053
+ return mapSleef(Sleef_atanf4_u10, Sleef_atand2_u10);
1054
+ }
1055
+ Vectorized<T> atanh() const {
1056
+ return mapSleef(Sleef_atanhf4_u10, Sleef_atanhd2_u10);
1057
+ }
1058
+
1059
+ Vectorized<T> erf() const {
1060
+ return mapSleef(Sleef_erff4_u10, Sleef_erfd2_u10);
1061
+ }
1062
+ Vectorized<T> erfc() const {
1063
+ return mapSleef(Sleef_erfcf4_u15, Sleef_erfcd2_u15);
1064
+ }
1065
+
1066
+ Vectorized<T> exp() const {
1067
+ return mapSleef(Sleef_expf4_u10, Sleef_expd2_u10);
1068
+ }
1069
+ Vectorized<T> exp2() const {
1070
+ return mapSleef(Sleef_exp2f4_u10, Sleef_exp2d2_u10);
1071
+ }
1072
+ Vectorized<T> expm1() const {
1073
+ return mapSleef(Sleef_expm1f4_u10, Sleef_expm1d2_u10);
1074
+ }
1075
+
1076
+ Vectorized<T> log() const {
1077
+ return mapSleef(Sleef_logf4_u10, Sleef_logd2_u10);
1078
+ }
1079
+ Vectorized<T> log2() const {
1080
+ return mapSleef(Sleef_log2f4_u10, Sleef_log2d2_u10);
1081
+ }
1082
+ Vectorized<T> log10() const {
1083
+ return mapSleef(Sleef_log10f4_u10, Sleef_log10d2_u10);
1084
+ }
1085
+ Vectorized<T> log1p() const {
1086
+ return mapSleef(Sleef_log1pf4_u10, Sleef_log1pd2_u10);
1087
+ }
1088
+
1089
+ Vectorized<T> sin() const {
1090
+ #ifndef SLEEF_MEMORY_WORKAROUND
1091
+ return mapSleef(Sleef_sinf4_u10, Sleef_sind2_u10);
1092
+ #else
1093
+ return mapOrdinary(std::sin);
1094
+ #endif
1095
+ }
1096
+ Vectorized<T> sinh() const {
1097
+ return mapSleef(Sleef_sinhf4_u10, Sleef_sinhd2_u10);
1098
+ }
1099
+ Vectorized<T> cos() const {
1100
+ #ifndef SLEEF_MEMORY_WORKAROUND
1101
+ return mapSleef(Sleef_cosf4_u10, Sleef_cosd2_u10);
1102
+ #else
1103
+ return mapOrdinary(std::cos);
1104
+ #endif
1105
+ }
1106
+ Vectorized<T> cosh() const {
1107
+ return mapSleef(Sleef_coshf4_u10, Sleef_coshd2_u10);
1108
+ }
1109
+
1110
+ Vectorized<T> tan() const {
1111
+ #ifndef SLEEF_MEMORY_WORKAROUND
1112
+ return mapSleef(Sleef_tanf4_u10, Sleef_tand2_u10);
1113
+ #else
1114
+ return mapOrdinary(std::tan);
1115
+ #endif
1116
+ }
1117
+ Vectorized<T> tanh() const {
1118
+ return mapSleef(Sleef_tanhf4_u10, Sleef_tanhd2_u10);
1119
+ }
1120
+
1121
+ Vectorized<T> lgamma() const {
1122
+ return mapSleef(Sleef_lgammaf4_u10, Sleef_lgammad2_u10);
1123
+ }
1124
+
1125
+ Vectorized<T> atan2(const Vectorized<T>& b) const {
1126
+ return mapSleef(Sleef_atan2f4_u10, Sleef_atan2d2_u10, b);
1127
+ }
1128
+ Vectorized<T> copysign(const Vectorized<T>& sign) const {
1129
+ return mapSleef(Sleef_copysignf4, Sleef_copysignd2, sign);
1130
+ }
1131
+ Vectorized<T> fmod(const Vectorized<T>& q) const {
1132
+ return mapSleef(Sleef_fmodf4, Sleef_fmodd2, q);
1133
+ }
1134
+
1135
+ Vectorized<T> hypot(const Vectorized<T>& b) const {
1136
+ return mapSleef(Sleef_hypotf4_u05, Sleef_hypotd2_u05, b);
1137
+ }
1138
+
1139
+ Vectorized<T> pow(const Vectorized<T>& b) const {
1140
+ return mapSleef(Sleef_powf4_u10, Sleef_powd2_u10, b);
1141
+ }
1142
+
1143
+ Vectorized<T> nextafter(const Vectorized<T>& b) const {
1144
+ return mapSleef(Sleef_nextafterf4, Sleef_nextafterd2, b);
1145
+ }
1146
+
1147
+ Vectorized<T> erfinv() const {
1148
+ return mapOrdinary(calc_erfinv);
1149
+ }
1150
+
1151
+ Vectorized<T> digamma() const {
1152
+ return mapOrdinary(calc_digamma);
1153
+ }
1154
+
1155
+ Vectorized<T> igamma(const Vectorized<T>& x) const {
1156
+ return mapOrdinary(calc_igamma, x);
1157
+ }
1158
+
1159
+ Vectorized<T> igammac(const Vectorized<T>& x) const {
1160
+ return mapOrdinary(calc_igammac, x);
1161
+ }
1162
+
1163
+ Vectorized<T> i0() const {
1164
+ return mapOrdinary(calc_i0);
1165
+ }
1166
+
1167
+ Vectorized<T> i0e() const {
1168
+ return mapOrdinary(calc_i0e);
1169
+ }
1170
+
1171
+ template <
1172
+ typename U = T,
1173
+ std::enable_if_t<!std::is_floating_point<U>::value, int> = 0>
1174
+ Vectorized<T> minimum(const Vectorized<T>& other) const {
1175
+ return {vec_min(_vec0, other._vec0), vec_min(_vec1, other._vec1)};
1176
+ }
1177
+
1178
+ /* Propagates NaN if either input is a NaN. */
1179
+ template <
1180
+ typename U = T,
1181
+ std::enable_if_t<std::is_floating_point<U>::value, int> = 0>
1182
+ Vectorized<T> minimum(const Vectorized<T>& other) const {
1183
+ Vectorized<T> tmp = {vec_min(_vec0, other._vec0), vec_min(_vec1, other._vec1)};
1184
+ tmp = blendv(tmp, *this, isnan());
1185
+ return blendv(tmp, other, other.isnan());
1186
+ }
1187
+
1188
+ template <
1189
+ typename U = T,
1190
+ std::enable_if_t<!std::is_floating_point<U>::value, int> = 0>
1191
+ Vectorized<T> maximum(const Vectorized<T>& other) const {
1192
+ return {vec_max(_vec0, other._vec0), vec_max(_vec1, other._vec1)};
1193
+ }
1194
+
1195
+ /* Propagates NaN if either input is a NaN. */
1196
+ template <
1197
+ typename U = T,
1198
+ std::enable_if_t<std::is_floating_point<U>::value, int> = 0>
1199
+ Vectorized<T> maximum(const Vectorized<T>& other) const {
1200
+ Vectorized<T> tmp = {vec_max(_vec0, other._vec0), vec_max(_vec1, other._vec1)};
1201
+ tmp = blendv(tmp, *this, isnan());
1202
+ return blendv(tmp, other, other.isnan());
1203
+ }
1204
+
1205
+ template <
1206
+ typename U = T,
1207
+ std::enable_if_t<!std::is_floating_point<U>::value, int> = 0>
1208
+ Vectorized<T> clamp_min(const Vectorized<T>& min) const {
1209
+ return {vec_max(_vec0, min._vec0), vec_max(_vec1, min._vec1)};
1210
+ }
1211
+
1212
+ /* Keeps NaN if actual value is NaN */
1213
+ template <
1214
+ typename U = T,
1215
+ std::enable_if_t<std::is_floating_point<U>::value, int> = 0>
1216
+ Vectorized<T> clamp_min(const Vectorized<T>& min) const {
1217
+ Vectorized<T> tmp = {vec_max(_vec0, min._vec0), vec_max(_vec1, min._vec1)};
1218
+ return blendv(tmp, *this, isnan());
1219
+ }
1220
+
1221
+ template <
1222
+ typename U = T,
1223
+ std::enable_if_t<!std::is_floating_point<U>::value, int> = 0>
1224
+ Vectorized<T> clamp_max(const Vectorized<T>& max) const {
1225
+ return {vec_min(_vec0, max._vec0), vec_min(_vec1, max._vec1)};
1226
+ }
1227
+
1228
+ /* Keeps NaN if actual value is NaN */
1229
+ template <
1230
+ typename U = T,
1231
+ std::enable_if_t<std::is_floating_point<U>::value, int> = 0>
1232
+ Vectorized<T> clamp_max(const Vectorized<T>& max) const {
1233
+ Vectorized<T> tmp = {vec_min(_vec0, max._vec0), vec_min(_vec1, max._vec1)};
1234
+ return blendv(tmp, *this, isnan());
1235
+ }
1236
+
1237
+ template <
1238
+ typename U = T,
1239
+ std::enable_if_t<std::is_same<U, float>::value, int> = 0>
1240
+ Vectorized<T> swapped() const {
1241
+ auto swap_mask = GetSwapMaskFloat();
1242
+ vtype v0 = vec_perm(_vec0, _vec0, swap_mask);
1243
+ vtype v1 = vec_perm(_vec1, _vec1, swap_mask);
1244
+ return {v0, v1};
1245
+ }
1246
+
1247
+ template <
1248
+ typename U = T,
1249
+ std::enable_if_t<std::is_same<U, double>::value, int> = 0>
1250
+ Vectorized<T> swapped() const {
1251
+ vtype v0 = vec_permi(_vec0, _vec0, 2);
1252
+ vtype v1 = vec_permi(_vec1, _vec1, 2);
1253
+ return {v0, v1};
1254
+ }
1255
+
1256
+ template <
1257
+ typename U = T,
1258
+ std::enable_if_t<std::is_floating_point<U>::value, int> = 0>
1259
+ static Vectorized<T> mergee(Vectorized<T>& first, Vectorized<T>& second) {
1260
+ return {
1261
+ vec_mergee(first._vec0, second._vec0),
1262
+ vec_mergee(first._vec1, second._vec1)};
1263
+ }
1264
+
1265
+ template <
1266
+ typename U = T,
1267
+ std::enable_if_t<std::is_floating_point<U>::value, int> = 0>
1268
+ static Vectorized<T> mergeo(Vectorized<T>& first, Vectorized<T>& second) {
1269
+ return {
1270
+ vec_mergeo(first._vec0, second._vec0),
1271
+ vec_mergeo(first._vec1, second._vec1)};
1272
+ }
1273
+
1274
+ static Vectorized<T> horizontal_add_perm(
1275
+ Vectorized<T>& first,
1276
+ Vectorized<T>& second) {
1277
+ // we will simulate it differently with 6 instructions total
1278
+ // lets permute second so that we can add it getting horizontal sums
1279
+ auto first_perm = first.swapped(); // 2perm
1280
+ auto second_perm = second.swapped(); // 2perm
1281
+ // summ
1282
+ auto first_ret = first + first_perm; // 2add
1283
+ auto second_ret = second + second_perm; // 2 add
1284
+ // now lets choose evens
1285
+ return mergee(first_ret, second_ret); // 2 mergee's
1286
+ }
1287
+
1288
+ static Vectorized<T> horizontal_sub_perm(
1289
+ Vectorized<T>& first,
1290
+ Vectorized<T>& second) {
1291
+ // we will simulate it differently with 6 instructions total
1292
+ // lets permute second so that we can add it getting horizontal sums
1293
+ auto first_perm = first.swapped(); // 2perm
1294
+ auto second_perm = second.swapped(); // 2perm
1295
+ // summ
1296
+ auto first_ret = first - first_perm; // 2sub
1297
+ auto second_ret = second - second_perm; // 2 sub
1298
+ // now lets choose evens
1299
+ return mergee(first_ret, second_ret); // 2 mergee's
1300
+ }
1301
+
1302
+ template <
1303
+ typename U = T,
1304
+ std::enable_if_t<std::is_floating_point<U>::value, int> = 0>
1305
+ Vectorized<T> mergee() const {
1306
+ return {vec_mergee(_vec0, _vec0), vec_mergee(_vec1, _vec1)};
1307
+ }
1308
+
1309
+ template <
1310
+ typename U = T,
1311
+ std::enable_if_t<std::is_floating_point<U>::value, int> = 0>
1312
+ Vectorized<T> mergeo() const {
1313
+ return {vec_mergeo(_vec0, _vec0), vec_mergeo(_vec1, _vec1)};
1314
+ }
1315
+
1316
+ template <
1317
+ typename U = T,
1318
+ std::enable_if_t<std::is_same<U, uint8_t>::value, int> = 0>
1319
+ Vectorized<int32_t> to_vec_float_helper() const {
1320
+ int32_t values[8] = {
1321
+ _vec0[0],
1322
+ _vec0[1],
1323
+ _vec0[2],
1324
+ _vec0[3],
1325
+ _vec0[4],
1326
+ _vec0[5],
1327
+ _vec0[6],
1328
+ _vec0[7],
1329
+ };
1330
+
1331
+ return Vectorized<int32_t>{
1332
+ values[0], values[1], values[2], values[3],
1333
+ values[4], values[5], values[6], values[7]
1334
+ };
1335
+ }
1336
+
1337
+ template <
1338
+ typename U = T,
1339
+ std::enable_if_t<std::is_same<U, int32_t>::value, int> = 0>
1340
+ Vectorized<uint8_t> to_vec_uint8_helper() const {
1341
+ // helper function for float to uint8_t conversion
1342
+ uint8_t values[8] = {
1343
+ static_cast<uint8_t>(_vec0[0]),
1344
+ static_cast<uint8_t>(_vec0[1]),
1345
+ static_cast<uint8_t>(_vec0[2]),
1346
+ static_cast<uint8_t>(_vec0[3]),
1347
+ static_cast<uint8_t>(_vec1[0]),
1348
+ static_cast<uint8_t>(_vec1[1]),
1349
+ static_cast<uint8_t>(_vec1[2]),
1350
+ static_cast<uint8_t>(_vec1[3]),
1351
+ };
1352
+
1353
+ return Vectorized<uint8_t>{
1354
+ values[0], values[1], values[2], values[3],
1355
+ values[4], values[5], values[6], values[7],
1356
+ 0, 0, 0, 0,
1357
+ 0, 0, 0, 0,
1358
+ 0, 0, 0, 0,
1359
+ 0, 0, 0, 0,
1360
+ 0, 0, 0, 0,
1361
+ 0, 0, 0, 0,
1362
+ };
1363
+ }
1364
+ };
1365
+
1366
+ template <>
1367
+ inline Vectorized<int64_t> operator~(const Vectorized<int64_t>& a) {
1368
+ return a._not();
1369
+ }
1370
+
1371
+ template <>
1372
+ inline Vectorized<int32_t> operator~(const Vectorized<int32_t>& a) {
1373
+ return a._not();
1374
+ }
1375
+
1376
+ template <>
1377
+ inline Vectorized<int16_t> operator~(const Vectorized<int16_t>& a) {
1378
+ return a._not();
1379
+ }
1380
+
1381
+ template <>
1382
+ inline Vectorized<int8_t> operator~(const Vectorized<int8_t>& a) {
1383
+ return a._not();
1384
+ }
1385
+
1386
+ template <>
1387
+ inline Vectorized<uint8_t> operator~(const Vectorized<uint8_t>& a) {
1388
+ return a._not();
1389
+ }
1390
+
1391
+ #define DEFINE_MAXMIN_FUNCS(operand_type) \
1392
+ template <> \
1393
+ Vectorized<operand_type> inline maximum( \
1394
+ const Vectorized<operand_type>& a, const Vectorized<operand_type>& b) { \
1395
+ return a.maximum(b); \
1396
+ } \
1397
+ template <> \
1398
+ Vectorized<operand_type> inline minimum( \
1399
+ const Vectorized<operand_type>& a, const Vectorized<operand_type>& b) { \
1400
+ return a.minimum(b); \
1401
+ }
1402
+
1403
+ #define DEFINE_CLAMP_MAXMIN_FUNCS(typex) \
1404
+ DEFINE_MAXMIN_FUNCS(typex) \
1405
+ template <> \
1406
+ Vectorized<typex> C10_ALWAYS_INLINE clamp_min( \
1407
+ const Vectorized<typex>& a, const Vectorized<typex>& min) { \
1408
+ return a.clamp_min(min); \
1409
+ } \
1410
+ template <> \
1411
+ Vectorized<typex> C10_ALWAYS_INLINE clamp_max( \
1412
+ const Vectorized<typex>& a, const Vectorized<typex>& max) { \
1413
+ return a.clamp_max(max); \
1414
+ } \
1415
+ template <> \
1416
+ Vectorized<typex> C10_ALWAYS_INLINE clamp( \
1417
+ const Vectorized<typex>& a, \
1418
+ const Vectorized<typex>& min, \
1419
+ const Vectorized<typex>& max) { \
1420
+ return clamp_max(clamp_min(a, min), max); \
1421
+ }
1422
+
1423
+ DEFINE_CLAMP_MAXMIN_FUNCS(int8_t)
1424
+ DEFINE_CLAMP_MAXMIN_FUNCS(uint8_t)
1425
+ DEFINE_CLAMP_MAXMIN_FUNCS(int16_t)
1426
+ DEFINE_CLAMP_MAXMIN_FUNCS(int32_t)
1427
+ DEFINE_CLAMP_MAXMIN_FUNCS(int64_t)
1428
+ DEFINE_CLAMP_MAXMIN_FUNCS(float)
1429
+ DEFINE_CLAMP_MAXMIN_FUNCS(double)
1430
+
1431
+ namespace { /* unnamed namespace */
1432
+
1433
+ #if !defined(vec_float) || __ARCH__ < 13
1434
+ #warning \
1435
+ "float->int and int->float conversion is simulated. compile for z15 for improved performance"
1436
+ inline ZSimdVect<float> vec_int_flt(const ZSimdVect<int> x) {
1437
+ return ZSimdVect<float>{float(x[0]), float(x[1]), float(x[2]), float(x[3])};
1438
+ }
1439
+ inline ZSimdVect<int> vec_flt_int(const ZSimdVect<float> x) {
1440
+ return ZSimdVect<int>{int(x[0]), int(x[1]), int(x[2]), int(x[3])};
1441
+ }
1442
+ #else
1443
+ #define vec_int_flt vec_float
1444
+ #define vec_flt_int vec_signed
1445
+ #endif
1446
+
1447
+ Vectorized<float> convert_to_float(const Vectorized<int32_t>& x) {
1448
+ return {vec_int_flt(x.vec0()), vec_int_flt(x.vec1())};
1449
+ }
1450
+
1451
+ Vectorized<int32_t> convert_to_int(const Vectorized<float>& x) {
1452
+ return {vec_flt_int(x.vec0()), vec_flt_int(x.vec1())};
1453
+ }
1454
+
1455
+ Vectorized<double> convert_to_float(const Vectorized<int64_t>& x) {
1456
+ return {vec_double(x.vec0()), vec_double(x.vec1())};
1457
+ }
1458
+
1459
+ Vectorized<int64_t> convert_to_int(const Vectorized<double>& x) {
1460
+ return {vec_signed(x.vec0()), vec_signed(x.vec1())};
1461
+ }
1462
+
1463
+ } /* unnamed namespace */
1464
+
1465
+ template <typename T, typename V>
1466
+ Vectorized<V> cast_zvector(const Vectorized<T>& x) {
1467
+ using cast_type = typename Vectorized<V>::vtype;
1468
+ return Vectorized<V>{(cast_type)x.vec0(), (cast_type)x.vec1()};
1469
+ }
1470
+
1471
+ template <>
1472
+ Vectorized<float> C10_ALWAYS_INLINE fmadd(
1473
+ const Vectorized<float>& a,
1474
+ const Vectorized<float>& b,
1475
+ const Vectorized<float>& c) {
1476
+ return Vectorized<float>{
1477
+ __builtin_s390_vfmasb(a.vec0(), b.vec0(), c.vec0()),
1478
+ __builtin_s390_vfmasb(a.vec1(), b.vec1(), c.vec1())};
1479
+ }
1480
+ template <>
1481
+ Vectorized<double> C10_ALWAYS_INLINE fmadd(
1482
+ const Vectorized<double>& a,
1483
+ const Vectorized<double>& b,
1484
+ const Vectorized<double>& c) {
1485
+ return Vectorized<double>{
1486
+ __builtin_s390_vfmadb(a.vec0(), b.vec0(), c.vec0()),
1487
+ __builtin_s390_vfmadb(a.vec1(), b.vec1(), c.vec1())};
1488
+ }
1489
+ template <>
1490
+ Vectorized<int16_t> C10_ALWAYS_INLINE fmadd(
1491
+ const Vectorized<int16_t>& a,
1492
+ const Vectorized<int16_t>& b,
1493
+ const Vectorized<int16_t>& c) {
1494
+ return Vectorized<int16_t>{
1495
+ a.vec0() * b.vec0() + c.vec0(), a.vec1() * b.vec1() + c.vec1()};
1496
+ }
1497
+ template <>
1498
+ Vectorized<int32_t> C10_ALWAYS_INLINE fmadd(
1499
+ const Vectorized<int32_t>& a,
1500
+ const Vectorized<int32_t>& b,
1501
+ const Vectorized<int32_t>& c) {
1502
+ return Vectorized<int32_t>{
1503
+ a.vec0() * b.vec0() + c.vec0(), a.vec1() * b.vec1() + c.vec1()};
1504
+ }
1505
+ template <>
1506
+ Vectorized<int64_t> C10_ALWAYS_INLINE fmadd(
1507
+ const Vectorized<int64_t>& a,
1508
+ const Vectorized<int64_t>& b,
1509
+ const Vectorized<int64_t>& c) {
1510
+ return Vectorized<int64_t>{
1511
+ a.vec0() * b.vec0() + c.vec0(), a.vec1() * b.vec1() + c.vec1()};
1512
+ }
1513
+
1514
+ template <>
1515
+ Vectorized<int64_t> C10_ALWAYS_INLINE
1516
+ convert_to_int_of_same_size<double>(const Vectorized<double>& src) {
1517
+ return convert_to_int(src);
1518
+ }
1519
+
1520
+ template <>
1521
+ Vectorized<int32_t> C10_ALWAYS_INLINE
1522
+ convert_to_int_of_same_size<float>(const Vectorized<float>& src) {
1523
+ return convert_to_int(src);
1524
+ }
1525
+
1526
+ template <>
1527
+ inline void convert(const int32_t* src, float* dst, int64_t n) {
1528
+ // int32_t and float have same size
1529
+ int64_t i;
1530
+ for (i = 0; i <= (n - Vectorized<float>::size());
1531
+ i += Vectorized<float>::size()) {
1532
+ const int32_t* src_a = src + i;
1533
+ float* dst_a = dst + i;
1534
+ auto input_vec = Vectorized<int32_t>::loadu(src_a);
1535
+ auto output_vec = convert_to_float(input_vec);
1536
+ output_vec.store(dst_a);
1537
+ }
1538
+
1539
+ for (; i < n; i++) {
1540
+ dst[i] = static_cast<float>(src[i]);
1541
+ }
1542
+ }
1543
+
1544
+ template <>
1545
+ inline void convert(const int64_t* src, double* dst, int64_t n) {
1546
+ int64_t i;
1547
+ for (i = 0; i <= (n - Vectorized<double>::size());
1548
+ i += Vectorized<double>::size()) {
1549
+ const int64_t* src_a = src + i;
1550
+ double* dst_a = dst + i;
1551
+ auto input_vec = Vectorized<int64_t>::loadu(src_a);
1552
+ auto output_vec = convert_to_float(input_vec);
1553
+ output_vec.store(dst_a);
1554
+ }
1555
+ for (; i < n; i++) {
1556
+ dst[i] = static_cast<double>(src[i]);
1557
+ }
1558
+ }
1559
+
1560
+ #define DEFINE_REINTERPRET_CAST_FUNCS(Fst, Cst) \
1561
+ template <> \
1562
+ C10_ALWAYS_INLINE Vectorized<Cst> cast<Cst, Fst>( \
1563
+ const Vectorized<Fst>& src) { \
1564
+ return cast_zvector<Fst, Cst>(src); \
1565
+ }
1566
+
1567
+ #define DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(Fst) \
1568
+ DEFINE_REINTERPRET_CAST_FUNCS(Fst, double) \
1569
+ DEFINE_REINTERPRET_CAST_FUNCS(Fst, float) \
1570
+ DEFINE_REINTERPRET_CAST_FUNCS(Fst, int64_t) \
1571
+ DEFINE_REINTERPRET_CAST_FUNCS(Fst, int32_t) \
1572
+ DEFINE_REINTERPRET_CAST_FUNCS(Fst, int16_t)
1573
+
1574
+ DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(float)
1575
+ DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(double)
1576
+ DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(int64_t)
1577
+ DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(int32_t)
1578
+ DEFINE_REINTERPRET_CAST_TO_ALL_FUNCS(int16_t)
1579
+
1580
+ #undef DEFINE_REINTERPRET_CAST_FUNCS
1581
+
1582
+ template <typename T>
1583
+ struct unpack_type {
1584
+ using type = T;
1585
+ };
1586
+ template <>
1587
+ struct unpack_type<int8_t> {
1588
+ using type = int16_t;
1589
+ };
1590
+ template <>
1591
+ struct unpack_type<uint8_t> {
1592
+ using type = int16_t;
1593
+ };
1594
+ template <>
1595
+ struct unpack_type<int16_t> {
1596
+ using type = int32_t;
1597
+ };
1598
+
1599
+ template <typename T>
1600
+ struct pack_type {
1601
+ using type = T;
1602
+ };
1603
+ template <>
1604
+ struct pack_type<int16_t> {
1605
+ using type = int8_t;
1606
+ };
1607
+ template <>
1608
+ struct pack_type<int32_t> {
1609
+ using type = int16_t;
1610
+ };
1611
+
1612
+ namespace { /* unnamed namespace */
1613
+
1614
+ template <typename T, typename V = typename unpack_type<T>::type>
1615
+ std::pair<Vectorized<V>, Vectorized<V>> unpack(const Vectorized<T>& x) {
1616
+ auto vec0 = vec_unpackh(x.vec0());
1617
+ auto vec1 = vec_unpackl(x.vec0());
1618
+ auto vec2 = vec_unpackh(x.vec1());
1619
+ auto vec3 = vec_unpackl(x.vec1());
1620
+ return {Vectorized<V>{vec0, vec1}, Vectorized<V>{vec2, vec3}};
1621
+ }
1622
+
1623
+ template <>
1624
+ std::pair<Vectorized<int16_t>, Vectorized<int16_t>> unpack<uint8_t, int16_t>(
1625
+ const Vectorized<uint8_t>& x) {
1626
+ using typeX = typename Vectorized<uint16_t>::vtype;
1627
+ typeX vec0 = vec_unpackh(x.vec0());
1628
+ typeX vec1 = vec_unpackl(x.vec0());
1629
+ typeX vec2 = vec_unpackh(x.vec1());
1630
+ typeX vec3 = vec_unpackl(x.vec1());
1631
+ // auto mask = Vectorized<uint16_t>(0xFF);
1632
+ // vec0 = vec0 & mask;
1633
+ // vec1 = vec1 & mask;
1634
+ // vec2 = vec2 & mask;
1635
+ // vec3 = vec3 & mask;
1636
+ return {
1637
+ cast_zvector<uint16_t, int16_t>(Vectorized<uint16_t>{vec0, vec1}),
1638
+ cast_zvector<uint16_t, int16_t>(Vectorized<uint16_t>{vec2, vec3})};
1639
+ }
1640
+
1641
+ template <typename T, typename V = typename pack_type<T>::type>
1642
+ Vectorized<V> pack(const Vectorized<T>& first, const Vectorized<T>& second) {
1643
+ auto vec0 = vec_packs(first.vec0(), first.vec1());
1644
+ auto vec1 = vec_packs(second.vec0(), second.vec1());
1645
+ return Vectorized<V>{vec0, vec1};
1646
+ }
1647
+
1648
+ template <>
1649
+ Vectorized<uint8_t> pack(
1650
+ const Vectorized<int16_t>& first,
1651
+ const Vectorized<int16_t>& second) {
1652
+ auto vec0 = vec_packsu(first.vec0(), first.vec1());
1653
+ auto vec1 = vec_packsu(second.vec0(), second.vec1());
1654
+ return Vectorized<uint8_t>{vec0, vec1};
1655
+ }
1656
+
1657
+ } /* unnamed namespace */
1658
+
1659
+ //////////////////////////////////QUANT///////////////////////////////////////////
1660
+ template <typename T>
1661
+ struct Vectorized<T, std::enable_if_t<is_zarch_implemented_quant<T>()>> {
1662
+ public:
1663
+ using value_type = typename T::underlying;
1664
+ using vtype = ZSimdVect<value_type>;
1665
+ using vmaskType = ZSimdVectBinary<value_type>;
1666
+ using vinner_type = Vectorized<value_type>;
1667
+ using size_type = int;
1668
+
1669
+ static constexpr size_type size() {
1670
+ return VECTOR_WIDTH / sizeof(value_type);
1671
+ }
1672
+
1673
+ static constexpr size_t float_num_vecs() {
1674
+ return size() / Vectorized<float>::size();
1675
+ }
1676
+ static constexpr int int_num_vecs() {
1677
+ return float_num_vecs();
1678
+ }
1679
+ using float_vec_return_type = std::array<Vectorized<float>, float_num_vecs()>;
1680
+ using int_vec_return_type =
1681
+ std::array<Vectorized<c10::qint32>, int_num_vecs()>;
1682
+
1683
+ private:
1684
+ vinner_type _vec;
1685
+
1686
+ public:
1687
+ Vectorized() {}
1688
+
1689
+ explicit C10_ALWAYS_INLINE Vectorized(vinner_type v) : _vec{v} {}
1690
+ Vectorized(const T& val) : _vec(val.val_) {}
1691
+
1692
+ C10_ALWAYS_INLINE const vinner_type& vec() const {
1693
+ return _vec;
1694
+ }
1695
+
1696
+ static Vectorized<T> C10_ALWAYS_INLINE
1697
+ loadu(const void* ptr, int count = size()) {
1698
+ return Vectorized<T>{vinner_type::loadu(ptr, count)};
1699
+ }
1700
+
1701
+ void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const {
1702
+ _vec.store(ptr, count);
1703
+ }
1704
+
1705
+ Vectorized<T> relu(Vectorized<T> zero_point) const {
1706
+ return Vectorized<T>{_vec.maximum(zero_point._vec)};
1707
+ }
1708
+
1709
+ Vectorized<T> relu6(Vectorized<T> zero_point, Vectorized<T> q_six) const {
1710
+ auto ret_max = _vec.maximum(zero_point._vec);
1711
+ auto ret_min = ret_max.minimum(q_six._vec);
1712
+ return Vectorized<T>{ret_min};
1713
+ }
1714
+
1715
+ template <
1716
+ typename U = T,
1717
+ std::enable_if_t<Vectorized<U>::float_num_vecs() == 1, int> = 0>
1718
+ int_vec_return_type widening_subtract(Vectorized<T> b) const {
1719
+ return {*this - b};
1720
+ }
1721
+
1722
+ template <
1723
+ typename U = T,
1724
+ std::enable_if_t<Vectorized<U>::float_num_vecs() == 1, int> = 0>
1725
+ float_vec_return_type dequantize(
1726
+ Vectorized<float> scale,
1727
+ Vectorized<float> zero_point,
1728
+ Vectorized<float> scale_zp_premul) const {
1729
+ auto float_val = convert_to_float(_vec);
1730
+ return {fmadd(scale, float_val, scale_zp_premul)};
1731
+ }
1732
+
1733
+ template <
1734
+ typename U = T,
1735
+ std::enable_if_t<Vectorized<U>::float_num_vecs() == 1, int> = 0>
1736
+ float_vec_return_type dequantize(
1737
+ Vectorized<float> scale,
1738
+ Vectorized<float> zero_point) const {
1739
+ auto float_val = convert_to_float(_vec);
1740
+ return {(float_val - zero_point) * scale};
1741
+ }
1742
+
1743
+ template <
1744
+ typename U = T,
1745
+ std::enable_if_t<Vectorized<U>::float_num_vecs() == 1, int> = 0>
1746
+ static Vectorized<T> quantize(
1747
+ const float_vec_return_type& rhs,
1748
+ float scale,
1749
+ int32_t zero_point,
1750
+ float inverse_scale) {
1751
+ Vectorized<float> vecf = rhs[0];
1752
+ vecf = vecf * Vectorized<float>(inverse_scale);
1753
+ vecf = vecf.rint() + Vectorized<float>((float)(zero_point));
1754
+ auto veci = convert_to_int(vecf);
1755
+
1756
+ return Vectorized<T>{veci};
1757
+ }
1758
+
1759
+ template <
1760
+ typename U = T,
1761
+ std::enable_if_t<Vectorized<U>::int_num_vecs() == 1, int> = 0>
1762
+ static Vectorized<T> requantize_from_int(
1763
+ const int_vec_return_type& inp,
1764
+ float multiplier,
1765
+ int32_t zero_point) {
1766
+ Vectorized<T> vi = inp[0];
1767
+ auto vecf = convert_to_float(vi.vec());
1768
+ vecf = vecf * Vectorized<float>(multiplier);
1769
+ vecf = vecf.rint();
1770
+ auto veci = convert_to_int(vecf) + Vectorized<int>(zero_point);
1771
+
1772
+ return Vectorized<T>{veci};
1773
+ }
1774
+
1775
+ template <
1776
+ typename U = T,
1777
+ std::enable_if_t<Vectorized<U>::int_num_vecs() == 4, int> = 0>
1778
+ int_vec_return_type widening_subtract(Vectorized<U> b) const {
1779
+ auto ret16 = unpack(_vec);
1780
+ auto ret16B = unpack(b.vec());
1781
+ auto ret32_0 = unpack(ret16.first);
1782
+ auto ret32_1 = unpack(ret16.second);
1783
+ auto ret32B_0 = unpack(ret16B.first);
1784
+ auto ret32B_1 = unpack(ret16B.second);
1785
+
1786
+ return {
1787
+ Vectorized<c10::qint32>(ret32_0.first - ret32B_0.first),
1788
+ Vectorized<c10::qint32>(ret32_0.second - ret32B_0.second),
1789
+ Vectorized<c10::qint32>(ret32_1.first - ret32B_1.first),
1790
+ Vectorized<c10::qint32>(ret32_1.second - ret32B_1.second)};
1791
+ }
1792
+
1793
+ template <
1794
+ typename U = T,
1795
+ std::enable_if_t<Vectorized<U>::float_num_vecs() == 4, int> = 0>
1796
+ float_vec_return_type C10_ALWAYS_INLINE dequantize(
1797
+ Vectorized<float> scale,
1798
+ Vectorized<float> zero_point,
1799
+ Vectorized<float> scale_zp_premul) const {
1800
+ // unpacking unsigned as signed
1801
+ auto ret16 = unpack(_vec);
1802
+ auto ret32_0 = unpack(ret16.first);
1803
+ auto ret32_1 = unpack(ret16.second);
1804
+
1805
+ auto vecf_0 = convert_to_float(ret32_0.first);
1806
+ auto vecf_1 = convert_to_float(ret32_0.second);
1807
+
1808
+ auto vecf_2 = convert_to_float(ret32_1.first);
1809
+ auto vecf_3 = convert_to_float(ret32_1.second);
1810
+ return {
1811
+ fmadd(scale, vecf_0, scale_zp_premul),
1812
+ fmadd(scale, vecf_1, scale_zp_premul),
1813
+ fmadd(scale, vecf_2, scale_zp_premul),
1814
+ fmadd(scale, vecf_3, scale_zp_premul)};
1815
+ }
1816
+
1817
+ template <
1818
+ typename U = T,
1819
+ std::enable_if_t<Vectorized<U>::float_num_vecs() == 4, int> = 0>
1820
+ float_vec_return_type dequantize(
1821
+ Vectorized<float> scale,
1822
+ Vectorized<float> zero_point) const {
1823
+ // unpacking unsigned as signed
1824
+ auto ret16 = unpack(_vec);
1825
+ auto ret32_0 = unpack(ret16.first);
1826
+ auto ret32_1 = unpack(ret16.second);
1827
+
1828
+ auto vecf_0 = convert_to_float(ret32_0.first);
1829
+ auto vecf_1 = convert_to_float(ret32_0.second);
1830
+
1831
+ auto vecf_2 = convert_to_float(ret32_1.first);
1832
+ auto vecf_3 = convert_to_float(ret32_1.second);
1833
+
1834
+ return {
1835
+ (vecf_0 - zero_point) * scale,
1836
+ (vecf_1 - zero_point) * scale,
1837
+ (vecf_2 - zero_point) * scale,
1838
+ (vecf_3 - zero_point) * scale };
1839
+ }
1840
+
1841
+ template <
1842
+ typename U = T,
1843
+ std::enable_if_t<Vectorized<U>::float_num_vecs() == 4, int> = 0>
1844
+ static Vectorized<T> quantize(
1845
+ const float_vec_return_type& rhs,
1846
+ float scale,
1847
+ int32_t zero_point,
1848
+ float inverse_scale) {
1849
+ auto vec_inverse = Vectorized<float>(inverse_scale);
1850
+ auto vec_zero_point = Vectorized<float>((float)zero_point);
1851
+
1852
+ auto vecf0 = rhs[0];
1853
+ auto vecf2 = rhs[1];
1854
+ auto vecf4 = rhs[2];
1855
+ auto vecf6 = rhs[3];
1856
+
1857
+ vecf0 = vecf0 * vec_inverse;
1858
+ vecf2 = vecf2 * vec_inverse;
1859
+ vecf4 = vecf4 * vec_inverse;
1860
+ vecf6 = vecf6 * vec_inverse;
1861
+
1862
+ vecf0 = vecf0.rint() + vec_zero_point;
1863
+ vecf2 = vecf2.rint() + vec_zero_point;
1864
+ vecf4 = vecf4.rint() + vec_zero_point;
1865
+ vecf6 = vecf6.rint() + vec_zero_point;
1866
+
1867
+ auto veci0 = convert_to_int(vecf0);
1868
+ auto veci2 = convert_to_int(vecf2);
1869
+ auto veci4 = convert_to_int(vecf4);
1870
+ auto veci6 = convert_to_int(vecf6);
1871
+
1872
+ auto vecshi0 = pack(veci0, veci2);
1873
+ auto vecshi2 = pack(veci4, veci6);
1874
+ auto ret = pack<int16_t, typename U::underlying>(vecshi0, vecshi2);
1875
+
1876
+ return Vectorized<T>{ret};
1877
+ }
1878
+
1879
+ template <
1880
+ typename U = T,
1881
+ std::enable_if_t<Vectorized<U>::int_num_vecs() == 4, int> = 0>
1882
+ static Vectorized<U> requantize_from_int(
1883
+ const int_vec_return_type& inp,
1884
+ float multiplier,
1885
+ int32_t zero_point) {
1886
+ Vectorized<float> vec_multiplier = Vectorized<float>(multiplier);
1887
+ Vectorized<int32_t> vec_zero_point = Vectorized<int32_t>(zero_point);
1888
+
1889
+ Vectorized<c10::qint32> vi0 = inp[0];
1890
+ Vectorized<c10::qint32> vi1 = inp[1];
1891
+ Vectorized<c10::qint32> vi2 = inp[2];
1892
+ Vectorized<c10::qint32> vi3 = inp[3];
1893
+
1894
+ auto vecf0 = convert_to_float(vi0.vec());
1895
+ auto vecf2 = convert_to_float(vi1.vec());
1896
+
1897
+ auto vecf4 = convert_to_float(vi2.vec());
1898
+ auto vecf6 = convert_to_float(vi3.vec());
1899
+
1900
+ vecf0 = vecf0 * vec_multiplier;
1901
+ vecf2 = vecf2 * vec_multiplier;
1902
+
1903
+ vecf4 = vecf4 * vec_multiplier;
1904
+ vecf6 = vecf6 * vec_multiplier;
1905
+
1906
+ vecf0 = vecf0.rint();
1907
+ vecf2 = vecf2.rint();
1908
+ vecf4 = vecf4.rint();
1909
+ vecf6 = vecf6.rint();
1910
+
1911
+ auto veci0 = convert_to_int(vecf0);
1912
+ auto veci2 = convert_to_int(vecf2);
1913
+ auto veci4 = convert_to_int(vecf4);
1914
+ auto veci6 = convert_to_int(vecf6);
1915
+
1916
+ veci0 = veci0 + vec_zero_point;
1917
+ veci2 = veci2 + vec_zero_point;
1918
+
1919
+ veci4 = veci4 + vec_zero_point;
1920
+ veci6 = veci6 + vec_zero_point;
1921
+
1922
+ auto vecshi0 = pack<int32_t, int16_t>(veci0, veci2);
1923
+ auto vecshi2 = pack<int32_t, int16_t>(veci4, veci6);
1924
+
1925
+ auto ret = pack<int16_t, typename U::underlying>(vecshi0, vecshi2);
1926
+
1927
+ return Vectorized<U>{ret};
1928
+ }
1929
+
1930
+ Vectorized<T> C10_ALWAYS_INLINE operator+(const Vectorized<T>& other) const {
1931
+ return Vectorized<T>{_vec + other._vec};
1932
+ }
1933
+
1934
+ Vectorized<T> C10_ALWAYS_INLINE operator-(const Vectorized<T>& other) const {
1935
+ return Vectorized<T>{_vec - other._vec};
1936
+ }
1937
+
1938
+ Vectorized<T> C10_ALWAYS_INLINE operator*(const Vectorized<T>& other) const {
1939
+ return Vectorized<T>{_vec * other._vec};
1940
+ }
1941
+
1942
+ Vectorized<T> C10_ALWAYS_INLINE operator/(const Vectorized<T>& other) const {
1943
+ return Vectorized<T>{_vec / other._vec};
1944
+ }
1945
+
1946
+ Vectorized<T> C10_ALWAYS_INLINE operator&(const Vectorized<T>& other) const {
1947
+ return Vectorized<T>{_vec & other._vec};
1948
+ }
1949
+
1950
+ Vectorized<T> C10_ALWAYS_INLINE operator|(const Vectorized<T>& other) const {
1951
+ return Vectorized<T>{_vec | other._vec};
1952
+ }
1953
+
1954
+ Vectorized<T> C10_ALWAYS_INLINE operator^(const Vectorized<T>& other) const {
1955
+ return Vectorized<T>{_vec ^ other._vec};
1956
+ }
1957
+ Vectorized<T> C10_ALWAYS_INLINE operator==(const Vectorized<T>& other) const {
1958
+ return Vectorized<T>{_vec == other._vec};
1959
+ }
1960
+
1961
+ Vectorized<T> C10_ALWAYS_INLINE operator!=(const Vectorized<T>& other) const {
1962
+ return Vectorized<T>{_vec != other._vec};
1963
+ }
1964
+ Vectorized<T> C10_ALWAYS_INLINE operator>(const Vectorized<T>& other) const {
1965
+ return Vectorized<T>{_vec > other._vec};
1966
+ }
1967
+ Vectorized<T> C10_ALWAYS_INLINE operator>=(const Vectorized<T>& other) const {
1968
+ return Vectorized<T>{_vec >= other._vec};
1969
+ }
1970
+
1971
+ Vectorized<T> C10_ALWAYS_INLINE operator<(const Vectorized<T>& other) const {
1972
+ return Vectorized<T>{_vec < other._vec};
1973
+ }
1974
+
1975
+ Vectorized<T> C10_ALWAYS_INLINE operator<=(const Vectorized<T>& other) const {
1976
+ return Vectorized<T>{_vec <= other._vec};
1977
+ }
1978
+
1979
+ Vectorized<T> C10_ALWAYS_INLINE eq(const Vectorized<T>& other) const {
1980
+ return Vectorized<T>{_vec.eq(other._vec)};
1981
+ }
1982
+ Vectorized<T> C10_ALWAYS_INLINE ne(const Vectorized<T>& other) const {
1983
+ return Vectorized<T>{_vec.ne(other._vec)};
1984
+ }
1985
+ Vectorized<T> C10_ALWAYS_INLINE gt(const Vectorized<T>& other) const {
1986
+ return Vectorized<T>{_vec.gt(other._vec)};
1987
+ }
1988
+ Vectorized<T> C10_ALWAYS_INLINE ge(const Vectorized<T>& other) const {
1989
+ return Vectorized<T>{_vec.ge(other._vec)};
1990
+ }
1991
+ Vectorized<T> C10_ALWAYS_INLINE lt(const Vectorized<T>& other) const {
1992
+ return Vectorized<T>{_vec.lt(other._vec)};
1993
+ }
1994
+ Vectorized<T> C10_ALWAYS_INLINE le(const Vectorized<T>& other) const {
1995
+ return Vectorized<T>{_vec.le(other._vec)};
1996
+ }
1997
+
1998
+ Vectorized<T> clamp_min(const Vectorized<T>& min) const {
1999
+ return Vectorized<T>{_vec.clamp_min(min._vec)};
2000
+ }
2001
+
2002
+ Vectorized<T> clamp_max(const Vectorized<T>& max) const {
2003
+ return Vectorized<T>{_vec.clamp_max(max._vec)};
2004
+ }
2005
+
2006
+ Vectorized<T> minimum(const Vectorized<T>& other) const {
2007
+ return Vectorized<T>{_vec.minimum(other._vec)};
2008
+ }
2009
+
2010
+ Vectorized<T> maximum(const Vectorized<T>& other) const {
2011
+ return Vectorized<T>{_vec.maximum(other._vec)};
2012
+ }
2013
+ };
2014
+
2015
+ DEFINE_CLAMP_MAXMIN_FUNCS(c10::quint8)
2016
+ DEFINE_CLAMP_MAXMIN_FUNCS(c10::qint8)
2017
+ DEFINE_CLAMP_MAXMIN_FUNCS(c10::qint32)
2018
+
2019
+ template <typename U = float>
2020
+ constexpr auto real_mask() {
2021
+ return (ZSimdVect<U>)ZSimdVectBinary<float>{0xFFFFFFFF, 0, 0xFFFFFFFF, 0};
2022
+ }
2023
+
2024
+ template <>
2025
+ constexpr auto real_mask<double>() {
2026
+ return (ZSimdVect<double>)ZSimdVectBinary<double>{0xFFFFFFFFFFFFFFFF, 0};
2027
+ }
2028
+
2029
+ template <typename U = float>
2030
+ constexpr auto image_mask() {
2031
+ return (ZSimdVect<U>)ZSimdVectBinary<U>{0, 0xFFFFFFFF, 0, 0xFFFFFFFF};
2032
+ }
2033
+
2034
+ template <>
2035
+ constexpr auto image_mask<double>() {
2036
+ return (ZSimdVect<double>)ZSimdVectBinary<double>{0, 0xFFFFFFFFFFFFFFFF};
2037
+ }
2038
+
2039
+ template <typename U = float>
2040
+ constexpr auto rsign_mask() {
2041
+ return ZSimdVect<U>{-0.f, 0.f, -0.f, 0.f};
2042
+ }
2043
+
2044
+ template <>
2045
+ constexpr auto rsign_mask<double>() {
2046
+ return ZSimdVect<double>{-0.0, 0.f};
2047
+ }
2048
+
2049
+ template <typename U = float>
2050
+ constexpr auto isign_mask() {
2051
+ return ZSimdVect<U>{0.0, -0.f, 0.0, -0.f};
2052
+ }
2053
+
2054
+ template <>
2055
+ constexpr auto isign_mask<double>() {
2056
+ return ZSimdVect<double>{0.0, -0.0};
2057
+ }
2058
+
2059
+ template <typename U = float>
2060
+ constexpr auto image_one() {
2061
+ return ZSimdVect<U>{0, 1.f, 0, 1.f};
2062
+ }
2063
+
2064
+ template <>
2065
+ constexpr auto image_one<double>() {
2066
+ return ZSimdVect<double>{0.0, 1.0};
2067
+ }
2068
+
2069
+ template <typename U = float>
2070
+ constexpr auto pi_half() {
2071
+ return ZSimdVect<U>{(float)(M_PI / 2.0), 0.f, (float)(M_PI / 2.0), 0.f};
2072
+ }
2073
+
2074
+ template <>
2075
+ constexpr auto pi_half<double>() {
2076
+ return ZSimdVect<double>{M_PI / 2.0, 0.0};
2077
+ }
2078
+
2079
+ template <typename U = float>
2080
+ constexpr auto image_half() {
2081
+ return ZSimdVect<U>{0, 0.5f, 0, 0.5f};
2082
+ }
2083
+
2084
+ template <>
2085
+ constexpr auto image_half<double>() {
2086
+ return ZSimdVect<double>{0.0, 0.5};
2087
+ }
2088
+
2089
+ template <typename U>
2090
+ constexpr U log2e_inv() {
2091
+ return static_cast<U>(1.4426950408889634);
2092
+ }
2093
+
2094
+ template <typename U>
2095
+ constexpr U log10e_inv() {
2096
+ return static_cast<U>(0.43429448190325176);
2097
+ }
2098
+
2099
+ template <typename T>
2100
+ struct Vectorized<T, std::enable_if_t<is_zarch_implemented_complex<T>()>> {
2101
+ public:
2102
+ using underline_type = decltype(std::declval<T>().imag());
2103
+ using value_type = T;
2104
+ using vtype = ZSimdVect<underline_type>;
2105
+ using vmaskType = ZSimdVectBinary<underline_type>;
2106
+ using vinner_type = Vectorized<underline_type>;
2107
+ using size_type = int;
2108
+ using vinner_data = typename Vectorized<underline_type>::vinner_data;
2109
+
2110
+ static constexpr size_type size() {
2111
+ return VECTOR_WIDTH / sizeof(value_type);
2112
+ }
2113
+
2114
+ private:
2115
+ vinner_type _vec;
2116
+
2117
+ public:
2118
+ Vectorized() {}
2119
+
2120
+ C10_ALWAYS_INLINE Vectorized(const vinner_data &v) : _vec{v.first, v.second} {}
2121
+
2122
+ template <typename U = T, std::enable_if_t<(sizeof(U) == 16), int> = 0>
2123
+ C10_ALWAYS_INLINE Vectorized(T s1, T s2)
2124
+ : _vec{s1.real(), s1.imag(), s2.real(), s2.imag()} {}
2125
+
2126
+ template <typename U = T, std::enable_if_t<(sizeof(U) == 8), int> = 0>
2127
+ C10_ALWAYS_INLINE Vectorized(T s1, T s2, T s3, T s4)
2128
+ : _vec{
2129
+ s1.real(),
2130
+ s1.imag(),
2131
+ s2.real(),
2132
+ s2.imag(),
2133
+ s3.real(),
2134
+ s3.imag(),
2135
+ s4.real(),
2136
+ s4.imag()} {}
2137
+
2138
+ template <typename U = T, std::enable_if_t<(sizeof(U) == 16), int> = 0>
2139
+ C10_ALWAYS_INLINE Vectorized(T s) : Vectorized<T>(s, s) {}
2140
+
2141
+ template <typename U = T, std::enable_if_t<(sizeof(U) == 8), int> = 0>
2142
+ C10_ALWAYS_INLINE Vectorized(T s) : Vectorized<T>(s, s, s, s) {}
2143
+
2144
+ C10_ALWAYS_INLINE operator vinner_type() const {
2145
+ return _vec;
2146
+ }
2147
+
2148
+ C10_ALWAYS_INLINE const vinner_type& vec() const {
2149
+ return _vec;
2150
+ }
2151
+
2152
+ C10_ALWAYS_INLINE operator vinner_data() const {
2153
+ return _vec.data();
2154
+ }
2155
+
2156
+ C10_ALWAYS_INLINE vinner_data data() const {
2157
+ return _vec.data();
2158
+ }
2159
+
2160
+ static Vectorized<T> C10_ALWAYS_INLINE
2161
+ loadu(const void* ptr, int count = size()) {
2162
+ return Vectorized<T>{vinner_type::loadu(ptr, 2 * count)};
2163
+ }
2164
+
2165
+ void C10_ALWAYS_INLINE store(void* ptr, int count = size()) const {
2166
+ return _vec.store(ptr, 2 * count);
2167
+ }
2168
+
2169
+ static Vectorized<T> blendv(
2170
+ const Vectorized<T>& a,
2171
+ const Vectorized<T>& b,
2172
+ const Vectorized<T>& mask) {
2173
+ // convert std::complex<V> index mask to V index mask: xy -> xxyy
2174
+ vinner_type vmask = mask.vec();
2175
+ auto mask_complex = vinner_type(
2176
+ vec_mergeh(vmask.vec0(), vmask.vec0()),
2177
+ vec_mergeh(vmask.vec1(), vmask.vec1()));
2178
+ return Vectorized<T>{vinner_type::blendv(a.vec(), b.vec(), mask_complex)};
2179
+ }
2180
+
2181
+ template <int64_t mask>
2182
+ static auto C10_ALWAYS_INLINE
2183
+ blend(const Vectorized<T>& a, const Vectorized<T>& b) {
2184
+ constexpr int mask_complex = maskForComplex<sizeof(T)>(mask);
2185
+ return Vectorized<T>{
2186
+ vinner_type::template blend<mask_complex>(a.vec(), b.vec())};
2187
+ }
2188
+
2189
+ template <typename step_t, typename U = T>
2190
+ static std::enable_if_t<sizeof(U) == 16, Vectorized<T>> arange(
2191
+ T base = 0,
2192
+ step_t step = static_cast<step_t>(1)) {
2193
+ return Vectorized<T>(base, base + step);
2194
+ }
2195
+
2196
+ template <typename step_t, typename U = T>
2197
+ static std::enable_if_t<sizeof(U) == 8, Vectorized<T>> arange(
2198
+ T base = 0,
2199
+ step_t step = static_cast<step_t>(1)) {
2200
+ return Vectorized<T>(
2201
+ base,
2202
+ base + step,
2203
+ base + value_type(2) * step,
2204
+ base + value_type(3) * step);
2205
+ }
2206
+
2207
+ template <int16_t Z, int16_t C>
2208
+ static inline std::enable_if_t<(Z >= C), Vectorized<T>> set_inner(
2209
+ const Vectorized<T>& a,
2210
+ const Vectorized<T>& b,
2211
+ size_t count) {
2212
+ return b;
2213
+ }
2214
+
2215
+ template <int16_t Z, int16_t C>
2216
+ static inline std::enable_if_t<(Z < C), Vectorized<T>> set_inner(
2217
+ const Vectorized<T>& a,
2218
+ const Vectorized<T>& b,
2219
+ size_t count) {
2220
+ if (count == Z)
2221
+ return blend<allbitset(Z)>(a, b);
2222
+ else
2223
+ return set_inner<Z + 1, C>(a, b, count);
2224
+ }
2225
+
2226
+ static Vectorized<T> set(
2227
+ const Vectorized<T>& a,
2228
+ const Vectorized<T>& b,
2229
+ size_t count = size()) {
2230
+ if (count == 0)
2231
+ return a;
2232
+ return set_inner<1, size()>(a, b, count);
2233
+ }
2234
+
2235
+ const T& operator[](int idx) const = delete;
2236
+ T& operator[](int idx) = delete;
2237
+
2238
+ template <
2239
+ typename U = T,
2240
+ std::enable_if_t<std::is_same<U, c10::complex<float>>::value, int> = 0>
2241
+ Vectorized<T> mapOrdinary(T (*const f)(const T&)) const {
2242
+ auto v0 = _vec.vec0();
2243
+ auto v1 = _vec.vec1();
2244
+ return Vectorized<T>{
2245
+ f(T(v0[0], v0[1])),
2246
+ f(T(v0[2], v0[3])),
2247
+ f(T(v1[0], v1[1])),
2248
+ f(T(v1[2], v1[3]))};
2249
+ }
2250
+
2251
+ template <
2252
+ typename U = T,
2253
+ std::enable_if_t<std::is_same<U, c10::complex<double>>::value, int> = 0>
2254
+ Vectorized<U> mapOrdinary(T (*const f)(const T&)) const {
2255
+ auto v0 = _vec.vec0();
2256
+ auto v1 = _vec.vec1();
2257
+ return Vectorized<T>{f(T(v0[0], v0[1])), f(T(v1[0], v1[1]))};
2258
+ }
2259
+
2260
+ template <
2261
+ typename U = T,
2262
+ std::enable_if_t<std::is_same<U, c10::complex<float>>::value, int> = 0>
2263
+ Vectorized<T> mapOrdinary(T (*const f)(T)) const {
2264
+ auto v0 = _vec.vec0();
2265
+ auto v1 = _vec.vec1();
2266
+ return Vectorized<T>{
2267
+ f(T(v0[0], v0[1])),
2268
+ f(T(v0[2], v0[3])),
2269
+ f(T(v1[0], v1[1])),
2270
+ f(T(v1[2], v1[3]))};
2271
+ }
2272
+
2273
+ template <
2274
+ typename U = T,
2275
+ std::enable_if_t<std::is_same<U, c10::complex<double>>::value, int> = 0>
2276
+ Vectorized<T> mapOrdinary(T (*const f)(T)) const {
2277
+ auto v0 = _vec.vec0();
2278
+ auto v1 = _vec.vec1();
2279
+ return Vectorized<T>{f(T(v0[0], v0[1])), f(T(v1[0], v1[1]))};
2280
+ }
2281
+
2282
+ template <
2283
+ typename U = T,
2284
+ std::enable_if_t<std::is_same<U, c10::complex<float>>::value, int> = 0>
2285
+ inline Vectorized<T> mapOrdinary(
2286
+ T (*const f)(const T&, const T&),
2287
+ const Vectorized<T>& b) const {
2288
+ auto v0 = _vec.vec0();
2289
+ auto v1 = _vec.vec1();
2290
+ auto bvec = b.vec();
2291
+ auto b0 = bvec.vec0();
2292
+ auto b1 = bvec.vec1();
2293
+ T a00 = f(T(v0[0], v0[1]), T(b0[0], b0[1]));
2294
+ T a01 = f(T(v0[2], v0[3]), T(b0[2], b0[3]));
2295
+ T a02 = f(T(v1[0], v1[1]), T(b1[0], b1[1]));
2296
+ T a03 = f(T(v1[2], v1[3]), T(b1[2], b1[3]));
2297
+ return Vectorized<T>{a00, a01, a02, a03};
2298
+ }
2299
+
2300
+ template <
2301
+ typename U = T,
2302
+ std::enable_if_t<std::is_same<U, c10::complex<double>>::value, int> = 0>
2303
+ inline Vectorized<T> mapOrdinary(
2304
+ T (*const f)(const T&, const T&),
2305
+ const Vectorized<T>& b) const {
2306
+ auto v0 = _vec.vec0();
2307
+ auto v1 = _vec.vec1();
2308
+ auto bvec = b.vec();
2309
+ auto b0 = bvec.vec0();
2310
+ auto b1 = bvec.vec1();
2311
+ U a00 = f(U(v0[0], v0[1]), U(b0[0], b0[1]));
2312
+ U a01 = f(U(v1[0], v1[1]), U(b1[0], b1[1]));
2313
+ return Vectorized<T>{a00, a01};
2314
+ }
2315
+
2316
+ Vectorized<T> C10_ALWAYS_INLINE operator+(const Vectorized<T>& other) const {
2317
+ return Vectorized<T>{_vec + other._vec};
2318
+ }
2319
+
2320
+ Vectorized<T> C10_ALWAYS_INLINE operator-(const Vectorized<T>& other) const {
2321
+ return Vectorized<T>{_vec - other._vec};
2322
+ }
2323
+
2324
+ Vectorized<T> inline operator*(const Vectorized<T>& b) const {
2325
+ //(a + bi) * (c + di) = (ac - bd) + (ad + bc)i
2326
+ vinner_type bv = b.vec();
2327
+ #if !defined(ZVECTOR_SIMULATE_X86_MULT)
2328
+ // this is more z arch friendly than simulating horizontal from x86
2329
+ vinner_type vi = bv.mergeo();
2330
+ vinner_type vr = bv.mergee();
2331
+ vi = vi ^ rsign_mask<underline_type>();
2332
+ vinner_type ret = _vec * vr;
2333
+ vinner_type vx_swapped = _vec.swapped();
2334
+ ret = fmadd(vx_swapped, vi, ret);
2335
+ #else
2336
+ vinner_type ac_bd = _vec * b;
2337
+ vinner_type d_c = bv.swapped();
2338
+ d_c = d_c ^ isign_mask<underline_type>();
2339
+ vinner_type ad_bc = _vec * d_c;
2340
+ vinner_type ret = vinner_type::horizontal_sub_perm(ac_bd, ad_bc);
2341
+ #endif
2342
+ return Vectorized<T>{ret};
2343
+ }
2344
+
2345
+ template <
2346
+ typename U = T,
2347
+ std::enable_if_t<std::is_same<U, c10::complex<float>>::value, int> = 0>
2348
+ static typename Vectorized<T>::vinner_type real_neg(const typename Vectorized<T>::vinner_type &a)
2349
+ {
2350
+ const auto swap_mask = ZSimdVectBinary<uint8_t>{
2351
+ 0, 1, 2, 3, 20, 21, 22, 23, 8, 9, 10, 11, 28, 29, 30, 31};
2352
+
2353
+ auto a_neg = a.neg();
2354
+ vtype v0 = vec_perm(a_neg.vec0(), a.vec0(), swap_mask);
2355
+ vtype v1 = vec_perm(a_neg.vec1(), a.vec1(), swap_mask);
2356
+ return {v0, v1};
2357
+ }
2358
+
2359
+ template <
2360
+ typename U = T,
2361
+ std::enable_if_t<std::is_same<U, c10::complex<double>>::value, int> = 0>
2362
+ static typename Vectorized<T>::vinner_type real_neg(const typename Vectorized<T>::vinner_type &a)
2363
+ {
2364
+ auto a_neg = a.neg();
2365
+ auto v0 = vec_permi(a_neg.vec0(), a.vec0(), 1);
2366
+ auto v1 = vec_permi(a_neg.vec1(), a.vec1(), 1);
2367
+ return { v0, v1 };
2368
+ }
2369
+
2370
+ Vectorized<T> inline operator/(const Vectorized<T>& b) const {
2371
+ // Unfortunately, this breaks some tests
2372
+ // Implement it like it's done for avx2
2373
+ auto fabs_cd = b.vec().abs(); // |c| |d|
2374
+ auto fabs_dc = fabs_cd.swapped(); // |d| |c|
2375
+ auto scale = vinner_type {1.0} / maximum(fabs_cd, fabs_dc); // 1/sc 1/sc
2376
+ auto a2 = vec() * scale; // a/sc b/sc
2377
+ auto b2 = b.vec() * scale; // c/sc d/sc
2378
+ auto acbd2 = a2 * b2; // ac/sc^2 bd/sc^2
2379
+
2380
+ auto dc2 = b2.swapped(); // d/sc c/sc
2381
+ dc2 = Vectorized<T>::real_neg(dc2); // -d/|c,d| c/sc
2382
+ auto adbc2 = a2 * dc2; // -ad/sc^2 bc/sc^2
2383
+ auto sum1 = acbd2 + acbd2.swapped(); // (ac+bd)/sc^2 (ac+bd)/sc^2
2384
+ auto sum2 = adbc2 + adbc2.swapped(); // (bc-ad)/sc^2 (bc-ad)/sc^2
2385
+ auto res2 = vinner_type::mergee(sum1, sum2); // (ac+bd)/sc^2 (bc-ad)/sc^2
2386
+
2387
+ // get the denominator
2388
+ auto denom2 = Vectorized<T>{b2}.abs_2_(); // (c^2+d^2)/sc^2 (c^2+d^2)/sc^2
2389
+ res2 = res2 / denom2;
2390
+ return Vectorized<T>{ res2 };
2391
+ }
2392
+
2393
+ Vectorized<T> angle2_() const {
2394
+ auto b_a = _vec.swapped(); // b a
2395
+ return Vectorized<T>{_vec.atan2(b_a).swapped()};
2396
+ }
2397
+
2398
+ Vectorized<T> angle() const {
2399
+ return angle2_().real();
2400
+ }
2401
+
2402
+ Vectorized<T> atan() const {
2403
+ // atan(x) = i/2 * ln((i + z)/(i - z))
2404
+ auto ione = Vectorized<T>{vinner_type(image_one<underline_type>())};
2405
+ auto sum = ione + *this;
2406
+ auto sub = ione - *this;
2407
+ auto ln = (sum / sub).log(); // ln((i + z)/(i - z))
2408
+ return ln *
2409
+ Vectorized<T>{vinner_type(image_half<underline_type>())}; // i/2*ln()
2410
+ }
2411
+
2412
+ Vectorized<T> atanh() const {
2413
+ return mapOrdinary(std::atanh);
2414
+ }
2415
+
2416
+ Vectorized<T> asin() const {
2417
+ // asin(x)
2418
+ // = -i*ln(iz + sqrt(1 -z^2))
2419
+ // = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi)))
2420
+ // = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi))
2421
+ #if 1
2422
+ vinner_type cnj = conj().vec();
2423
+ vinner_type b_a = cnj.swapped();
2424
+ vinner_type ab = cnj * b_a;
2425
+ vinner_type im = ab + ab;
2426
+ vinner_type val_2 = _vec * _vec;
2427
+ vinner_type val_2_swapped = val_2.swapped();
2428
+ vinner_type re = vinner_type::horizontal_sub_perm(val_2, val_2_swapped);
2429
+ re = vinner_type(static_cast<underline_type>(1)) - re;
2430
+ constexpr int blend_mask =
2431
+ blend_choice<T>(); // 0x0A for complex<double> , 0xAA for complex<float>
2432
+ vinner_type blendx = vinner_type::template blend<blend_mask>(re, im);
2433
+ auto root = Vectorized<T>(blendx).sqrt();
2434
+ auto ln = Vectorized<T>(Vectorized<T>(b_a) + root).log();
2435
+ return Vectorized<T>(ln.vec().swapped()).conj();
2436
+ #else
2437
+ return mapOrdinary(std::asin);
2438
+ #endif
2439
+ }
2440
+
2441
+ Vectorized<T> acos() const {
2442
+ // acos(x) = pi/2 - asin(x)
2443
+ return Vectorized<T>(vinner_type(pi_half<underline_type>())) - asin();
2444
+ }
2445
+
2446
+ Vectorized<T> sin() const {
2447
+ return mapOrdinary(std::sin);
2448
+ }
2449
+ Vectorized<T> sinh() const {
2450
+ return mapOrdinary(std::sinh);
2451
+ }
2452
+ Vectorized<T> cos() const {
2453
+ return mapOrdinary(std::cos);
2454
+ }
2455
+ Vectorized<T> cosh() const {
2456
+ return mapOrdinary(std::cosh);
2457
+ }
2458
+ Vectorized<T> ceil() const {
2459
+ return Vectorized<T>{_vec.ceil()};
2460
+ }
2461
+ Vectorized<T> floor() const {
2462
+ return Vectorized<T>{_vec.floor()};
2463
+ }
2464
+ Vectorized<T> neg() const {
2465
+ return Vectorized<T>(_vec.neg());
2466
+ }
2467
+ Vectorized<T> round() const {
2468
+ return Vectorized<T>{_vec.round()};
2469
+ }
2470
+ Vectorized<T> tan() const {
2471
+ return mapOrdinary(std::tan);
2472
+ }
2473
+ Vectorized<T> tanh() const {
2474
+ return mapOrdinary(std::tanh);
2475
+ }
2476
+ Vectorized<T> trunc() const {
2477
+ return Vectorized<T>{_vec.trunc()};
2478
+ }
2479
+
2480
+ Vectorized<T> C10_ALWAYS_INLINE operator&(const Vectorized<T>& other) const {
2481
+ return Vectorized<T>{_vec & other._vec};
2482
+ }
2483
+
2484
+ Vectorized<T> C10_ALWAYS_INLINE operator|(const Vectorized<T>& other) const {
2485
+ return Vectorized<T>{_vec | other._vec};
2486
+ }
2487
+
2488
+ Vectorized<T> C10_ALWAYS_INLINE operator^(const Vectorized<T>& other) const {
2489
+ return Vectorized<T>{_vec ^ other._vec};
2490
+ }
2491
+ Vectorized<T> C10_ALWAYS_INLINE operator==(const Vectorized<T>& other) const {
2492
+ return Vectorized<T>{_vec == other._vec};
2493
+ }
2494
+
2495
+ Vectorized<T> C10_ALWAYS_INLINE operator!=(const Vectorized<T>& other) const {
2496
+ return Vectorized<T>{_vec != other._vec};
2497
+ }
2498
+
2499
+ Vectorized<T> C10_ALWAYS_INLINE eq(const Vectorized<T>& other) const {
2500
+ auto eq = _vec.eq(other._vec); // compares real and imag individually
2501
+ // If both real numbers and imag numbers are equal, then the complex numbers are equal
2502
+ auto real = eq & vinner_type(real_mask<underline_type>());
2503
+ auto imag = (eq & vinner_type(image_mask<underline_type>())).swapped();
2504
+ return Vectorized<T>{real & imag};
2505
+ }
2506
+ Vectorized<T> C10_ALWAYS_INLINE ne(const Vectorized<T>& other) const {
2507
+ auto ne = _vec.ne(other._vec); // compares real and imag individually
2508
+ // If either real numbers or imag numbers are not equal, then the complex numbers are not equal
2509
+ auto real = ne & vinner_type(real_mask<underline_type>());
2510
+ auto imag = (ne & vinner_type(image_mask<underline_type>())).swapped();
2511
+ return Vectorized<T>{real | imag};
2512
+ }
2513
+
2514
+ Vectorized<T> real() const {
2515
+ return Vectorized<T>(_vec & vinner_type(real_mask<underline_type>()));
2516
+ }
2517
+ Vectorized<T> imag_() const {
2518
+ return Vectorized<T>(_vec & vinner_type(image_mask<underline_type>()));
2519
+ }
2520
+ Vectorized<T> imag() const {
2521
+ return Vectorized<T>{
2522
+ (_vec & vinner_type(image_mask<underline_type>())).swapped()};
2523
+ }
2524
+
2525
+ Vectorized<T> conj() const {
2526
+ return Vectorized<T>(_vec ^ vinner_type(isign_mask<underline_type>()));
2527
+ }
2528
+
2529
+ vinner_data abs_2_() const {
2530
+ auto a = _vec * _vec;
2531
+ a = a + a.swapped();
2532
+ return a.mergee().data();
2533
+ }
2534
+
2535
+ static T abs_helper(const T &value)
2536
+ {
2537
+ return T(std::abs(value));
2538
+ }
2539
+
2540
+ Vectorized<T> abs() const {
2541
+ return mapOrdinary(abs_helper);
2542
+ }
2543
+
2544
+ Vectorized<T> exp() const {
2545
+ return mapOrdinary(std::exp);
2546
+ }
2547
+
2548
+ Vectorized<T> exp2() const {
2549
+ return mapOrdinary(exp2_impl);
2550
+ }
2551
+
2552
+ Vectorized<T> expm1() const {
2553
+ return mapOrdinary(std::expm1);
2554
+ }
2555
+
2556
+ Vectorized<T> log() const {
2557
+ return mapOrdinary(std::log);
2558
+ }
2559
+
2560
+ Vectorized<T> log2() const {
2561
+ // log2eB_inv
2562
+ auto ret = log();
2563
+ return Vectorized<T>{ret._vec * vinner_type(log2e_inv<underline_type>())};
2564
+ }
2565
+
2566
+ Vectorized<T> log10() const {
2567
+ auto ret = log();
2568
+ return Vectorized<T>{ret._vec * vinner_type(log10e_inv<underline_type>())};
2569
+ }
2570
+
2571
+ Vectorized<T> log1p() const {
2572
+ return mapOrdinary(std::log1p);
2573
+ }
2574
+
2575
+ Vectorized<T> sgn() const {
2576
+ return mapOrdinary(at::native::sgn_impl);
2577
+ }
2578
+
2579
+ Vectorized<T> pow(const Vectorized<T>& exp) const {
2580
+ return mapOrdinary(std::pow, exp);
2581
+ }
2582
+
2583
+ Vectorized<T> sqrt() const {
2584
+ return mapOrdinary(std::sqrt);
2585
+ }
2586
+
2587
+ Vectorized<T> reciprocal() const {
2588
+ // re + im*i = (a + bi) / (c + di)
2589
+ // re = (ac + bd)/abs_2() = c/abs_2()
2590
+ // im = (bc - ad)/abs_2() = d/abs_2()
2591
+ vinner_type c_d = _vec ^ vinner_type(isign_mask<underline_type>());
2592
+ vinner_type abs = abs_2_();
2593
+ return Vectorized<T>{c_d / abs};
2594
+ }
2595
+
2596
+ Vectorized<T> rsqrt() const {
2597
+ return sqrt().reciprocal();
2598
+ }
2599
+
2600
+ Vectorized<T> operator<(const Vectorized<T>& other) const {
2601
+ TORCH_CHECK(false, "not supported for complex numbers");
2602
+ }
2603
+
2604
+ Vectorized<T> operator<=(const Vectorized<T>& other) const {
2605
+ TORCH_CHECK(false, "not supported for complex numbers");
2606
+ }
2607
+
2608
+ Vectorized<T> operator>(const Vectorized<T>& other) const {
2609
+ TORCH_CHECK(false, "not supported for complex numbers");
2610
+ }
2611
+
2612
+ Vectorized<T> operator>=(const Vectorized<T>& other) const {
2613
+ TORCH_CHECK(false, "not supported for complex numbers");
2614
+ }
2615
+
2616
+ Vectorized<T> lt(const Vectorized<T>& other) const {
2617
+ TORCH_CHECK(false, "not supported for complex numbers");
2618
+ }
2619
+
2620
+ Vectorized<T> le(const Vectorized<T>& other) const {
2621
+ TORCH_CHECK(false, "not supported for complex numbers");
2622
+ }
2623
+
2624
+ Vectorized<T> gt(const Vectorized<T>& other) const {
2625
+ TORCH_CHECK(false, "not supported for complex numbers");
2626
+ }
2627
+
2628
+ Vectorized<T> ge(const Vectorized<T>& other) const {
2629
+ TORCH_CHECK(false, "not supported for complex numbers");
2630
+ }
2631
+ };
2632
+
2633
+ template <typename T, std::enable_if_t<(sizeof(T) == 8), int> = 0>
2634
+ std::pair<Vectorized<T>, Vectorized<T>> inline inner_interleave2(
2635
+ const Vectorized<T>& a,
2636
+ const Vectorized<T>& b) {
2637
+ // inputs:
2638
+ // a = {a0, a1, a2, a3}
2639
+ // b = {b0, b1, b2, b3}
2640
+ using vtype = typename Vectorized<T>::vtype;
2641
+ vtype ab00 = vec_permi(a.vec0(), b.vec0(), 0);
2642
+ vtype ab11 = vec_permi(a.vec0(), b.vec0(), 3);
2643
+ vtype ab2_00 = vec_permi(a.vec1(), b.vec1(), 0);
2644
+ vtype ab2_11 = vec_permi(a.vec1(), b.vec1(), 3);
2645
+ // return {a0, b0, a1, b1}
2646
+ // {a2, b2, a3, b3}
2647
+ return std::make_pair(
2648
+ Vectorized<T>{ab00, ab11}, Vectorized<T>{ab2_00, ab2_11});
2649
+ }
2650
+
2651
+ template <typename T, std::enable_if_t<(sizeof(T) == 8), int> = 0>
2652
+ std::pair<Vectorized<T>, Vectorized<T>> inline inner_deinterleave2(
2653
+ const Vectorized<T>& a,
2654
+ const Vectorized<T>& b) {
2655
+ // inputs:
2656
+ // a = {a0, b0, a1, b1}
2657
+ // b = {a2, b2, a3, b3}
2658
+ using vtype = typename Vectorized<T>::vtype;
2659
+ vtype aa01 = vec_permi(a.vec0(), a.vec1(), 0);
2660
+ vtype aa23 = vec_permi(b.vec0(), b.vec1(), 0);
2661
+
2662
+ vtype bb_01 = vec_permi(a.vec0(), a.vec1(), 3);
2663
+ vtype bb_23 = vec_permi(b.vec0(), b.vec1(), 3);
2664
+
2665
+ // swap lanes:
2666
+ // return {a0, a1, a2, a3}
2667
+ // {b0, b1, b2, b3}
2668
+ return std::make_pair(Vectorized<T>{aa01, aa23}, Vectorized<T>{bb_01, bb_23});
2669
+ }
2670
+
2671
+ template <typename T, std::enable_if_t<(sizeof(T) == 4), int> = 0>
2672
+ std::pair<Vectorized<T>, Vectorized<T>> inline inner_interleave2(
2673
+ const Vectorized<T>& a,
2674
+ const Vectorized<T>& b) {
2675
+ // inputs:
2676
+ // a = {a0, a1, a2, a3,, a4, a5, a6, a7}
2677
+ // b = {b0, b1, b2, b3,, b4, b5, b6, b7}
2678
+ using vtype = typename Vectorized<T>::vtype;
2679
+ vtype ab0011 = vec_mergeh(a.vec0(), b.vec0());
2680
+ vtype ab2233 = vec_mergel(a.vec0(), b.vec0());
2681
+
2682
+ vtype ab2_0011 = vec_mergeh(a.vec1(), b.vec1());
2683
+ vtype ab2_2233 = vec_mergel(a.vec1(), b.vec1());
2684
+ // group cols crossing lanes:
2685
+ // return {a0, b0, a1, b1,, a2, b2, a3, b3}
2686
+ // {a4, b4, a5, b5,, a6, b6, a7, b7}
2687
+
2688
+ return std::make_pair(
2689
+ Vectorized<T>{ab0011, ab2233}, Vectorized<T>{ab2_0011, ab2_2233});
2690
+ }
2691
+
2692
+ template <typename T, std::enable_if_t<(sizeof(T) == 4), int> = 0>
2693
+ std::pair<Vectorized<T>, Vectorized<T>> inline inner_deinterleave2(
2694
+ const Vectorized<T>& a,
2695
+ const Vectorized<T>& b) {
2696
+ // inputs:
2697
+ // a = {a0, b0, a1, b1,, a2, b2, a3, b3}
2698
+ // b = {a4, b4, a5, b5,, a6, b6, a7, b7}
2699
+ using vtype = typename Vectorized<T>::vtype;
2700
+ // {a0,a2,b0,b2} {a1,a3,b1,b3}
2701
+ vtype a0a2b0b2 = vec_mergeh(a.vec0(), a.vec1());
2702
+ vtype a1a3b1b3 = vec_mergel(a.vec0(), a.vec1());
2703
+
2704
+ vtype aa0123 = vec_mergeh(a0a2b0b2, a1a3b1b3);
2705
+ vtype bb0123 = vec_mergel(a0a2b0b2, a1a3b1b3);
2706
+
2707
+ vtype a0a2b0b2_2 = vec_mergeh(b.vec0(), b.vec1());
2708
+ vtype a1a3b1b3_2 = vec_mergel(b.vec0(), b.vec1());
2709
+
2710
+ vtype aa0123_2 = vec_mergeh(a0a2b0b2_2, a1a3b1b3_2);
2711
+ vtype bb0123_2 = vec_mergel(a0a2b0b2_2, a1a3b1b3_2);
2712
+
2713
+ // it could be done with vec_perm ,too
2714
+ // swap lanes:
2715
+ // return {a0, a1, a2, a3,, a4, a5, a6, a7}
2716
+ // {b0, b1, b2, b3,, b4, b5, b6, b7}
2717
+
2718
+ return std::make_pair(
2719
+ Vectorized<T>{aa0123, aa0123_2}, Vectorized<T>{bb0123, bb0123_2});
2720
+ }
2721
+
2722
+ template <>
2723
+ std::pair<Vectorized<float>, Vectorized<float>> inline interleave2<float>(
2724
+ const Vectorized<float>& a,
2725
+ const Vectorized<float>& b) {
2726
+ return inner_interleave2<float>(a, b);
2727
+ }
2728
+
2729
+ template <>
2730
+ std::pair<Vectorized<int32_t>, Vectorized<int32_t>> inline interleave2<int32_t>(
2731
+ const Vectorized<int32_t>& a,
2732
+ const Vectorized<int32_t>& b) {
2733
+ return inner_interleave2<int32_t>(a, b);
2734
+ }
2735
+
2736
+ template <>
2737
+ std::pair<Vectorized<double>, Vectorized<double>> inline interleave2<double>(
2738
+ const Vectorized<double>& a,
2739
+ const Vectorized<double>& b) {
2740
+ return inner_interleave2<double>(a, b);
2741
+ }
2742
+
2743
+ template <>
2744
+ std::pair<Vectorized<int64_t>, Vectorized<int64_t>> inline interleave2<int64_t>(
2745
+ const Vectorized<int64_t>& a,
2746
+ const Vectorized<int64_t>& b) {
2747
+ return inner_interleave2<int64_t>(a, b);
2748
+ }
2749
+
2750
+ template <>
2751
+ std::pair<Vectorized<float>, Vectorized<float>> inline deinterleave2<float>(
2752
+ const Vectorized<float>& a,
2753
+ const Vectorized<float>& b) {
2754
+ return inner_deinterleave2<float>(a, b);
2755
+ }
2756
+
2757
+ template <>
2758
+ std::pair<Vectorized<int32_t>, Vectorized<int32_t>> inline deinterleave2<
2759
+ int32_t>(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
2760
+ return inner_deinterleave2<int32_t>(a, b);
2761
+ }
2762
+
2763
+ template <>
2764
+ std::pair<Vectorized<double>, Vectorized<double>> inline deinterleave2<double>(
2765
+ const Vectorized<double>& a,
2766
+ const Vectorized<double>& b) {
2767
+ return inner_deinterleave2<double>(a, b);
2768
+ }
2769
+
2770
+ template <>
2771
+ std::pair<Vectorized<int64_t>, Vectorized<int64_t>> inline deinterleave2<
2772
+ int64_t>(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
2773
+ return inner_deinterleave2<int64_t>(a, b);
2774
+ }
2775
+
2776
+ inline Vectorized<float> convert_uint8_to_float(const Vectorized<uint8_t> &src) {
2777
+ // Note: this function only convert inputs number of elements equal to at::vec::Vectorized<float>.size()
2778
+ // Only handle first 64 bits
2779
+ auto vec_int = src.to_vec_float_helper();
2780
+
2781
+ return convert_to_float(vec_int);
2782
+ }
2783
+
2784
+ inline Vectorized<uint8_t> convert_float_to_uint8(const Vectorized<float> &src) {
2785
+ constexpr auto min_val = std::numeric_limits<uint8_t>::min();
2786
+ constexpr auto max_val = std::numeric_limits<uint8_t>::max();
2787
+
2788
+ auto vec_int = clamp(convert_to_int(src), Vectorized<int32_t>(min_val), Vectorized<int32_t>(max_val));
2789
+
2790
+ return vec_int.to_vec_uint8_helper();
2791
+ }
2792
+
2793
+ #undef DEFINE_CLAMP_MAXMIN_FUNCS
2794
+ #undef DEFINE_MAXMIN_FUNCS
2795
+ } // namespace
2796
+ } // namespace vec
2797
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512.h ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/intrinsics.h>
7
+
8
+ #include <ATen/cpu/vec/vec_base.h>
9
+ #include <ATen/cpu/vec/vec512/vec512_float.h>
10
+ #include <ATen/cpu/vec/vec512/vec512_bfloat16.h>
11
+ #include <ATen/cpu/vec/vec512/vec512_double.h>
12
+ #include <ATen/cpu/vec/vec512/vec512_int.h>
13
+ #include <ATen/cpu/vec/vec512/vec512_qint.h>
14
+ #include <ATen/cpu/vec/vec512/vec512_complex_float.h>
15
+ #include <ATen/cpu/vec/vec512/vec512_complex_double.h>
16
+
17
+ #include <algorithm>
18
+ #include <cstddef>
19
+ #include <cstdint>
20
+ #include <cstring>
21
+ #include <ostream>
22
+
23
+ namespace at {
24
+ namespace vec {
25
+
26
+ // See Note [CPU_CAPABILITY namespace]
27
+ inline namespace CPU_CAPABILITY {
28
+
29
+ inline std::ostream& operator<<(std::ostream& stream, const c10::qint32& val) {
30
+ stream << val.val_;
31
+ return stream;
32
+ }
33
+ inline std::ostream& operator<<(std::ostream& stream, const c10::qint8& val) {
34
+ stream << static_cast<int>(val.val_);
35
+ return stream;
36
+ }
37
+ inline std::ostream& operator<<(std::ostream& stream, const c10::quint8& val) {
38
+ stream << static_cast<unsigned int>(val.val_);
39
+ return stream;
40
+ }
41
+
42
+ template <typename T>
43
+ std::ostream& operator<<(std::ostream& stream, const Vectorized<T>& vec) {
44
+ T buf[Vectorized<T>::size()];
45
+ vec.store(buf);
46
+ stream << "vec[";
47
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
48
+ if (i != 0) {
49
+ stream << ", ";
50
+ }
51
+ stream << buf[i];
52
+ }
53
+ stream << "]";
54
+ return stream;
55
+ }
56
+
57
+
58
+ #if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER)
59
+
60
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CAST (AVX512) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
61
+
62
+ template<>
63
+ inline Vectorized<float> cast<float, double>(const Vectorized<double>& src) {
64
+ return _mm512_castpd_ps(src);
65
+ }
66
+
67
+ template<>
68
+ inline Vectorized<double> cast<double, float>(const Vectorized<float>& src) {
69
+ return _mm512_castps_pd(src);
70
+ }
71
+
72
+ template<>
73
+ inline Vectorized<float> cast<float, int32_t>(const Vectorized<int32_t>& src) {
74
+ return _mm512_castsi512_ps(src);
75
+ }
76
+
77
+ template<>
78
+ inline Vectorized<double> cast<double, int64_t>(const Vectorized<int64_t>& src) {
79
+ return _mm512_castsi512_pd(src);
80
+ }
81
+
82
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ GATHER ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
83
+
84
+ template<int64_t scale = 1>
85
+ std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<double>>
86
+ inline gather(const double* base_addr, const Vectorized<int64_t>& vindex) {
87
+ return _mm512_i64gather_pd(vindex, base_addr, scale);
88
+ }
89
+
90
+ template<int64_t scale = 1>
91
+ std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<float>>
92
+ inline gather(const float* base_addr, const Vectorized<int32_t>& vindex) {
93
+ return _mm512_i32gather_ps(vindex, base_addr, scale);
94
+ }
95
+
96
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ MASK GATHER ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
97
+
98
+ template<int64_t scale = 1>
99
+ std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<double>>
100
+ inline mask_gather(const Vectorized<double>& src, const double* base_addr,
101
+ const Vectorized<int64_t>& vindex, Vectorized<double>& mask) {
102
+ auto all_ones = _mm512_castsi512_pd(_mm512_set1_epi64(0xFFFFFFFFFFFFFFFF));
103
+ auto mask_ = _mm512_cmp_pd_mask(all_ones, mask.values, _CMP_EQ_OQ);
104
+ return _mm512_mask_i64gather_pd(src, mask_, vindex, base_addr, scale);
105
+ }
106
+
107
+ template<int64_t scale = 1>
108
+ std::enable_if_t<scale == 1 || scale == 2 || scale == 4 || scale == 8, Vectorized<float>>
109
+ inline mask_gather(const Vectorized<float>& src, const float* base_addr,
110
+ const Vectorized<int32_t>& vindex, Vectorized<float>& mask) {
111
+ auto all_ones = _mm512_castsi512_ps(_mm512_set1_epi32(0xFFFFFFFF));
112
+ auto mask_ = _mm512_cmp_ps_mask(all_ones, mask.values, _CMP_EQ_OQ);
113
+ return _mm512_mask_i32gather_ps(src, mask_, vindex, base_addr, scale);
114
+ }
115
+
116
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CONVERT ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
117
+
118
+ template<>
119
+ Vectorized<int64_t>
120
+ inline convert_to_int_of_same_size<double>(const Vectorized<double> &src) {
121
+ return _mm512_cvtpd_epi64(src);
122
+ }
123
+
124
+ template<>
125
+ Vectorized<int32_t>
126
+ inline convert_to_int_of_same_size<float>(const Vectorized<float> &src) {
127
+ return _mm512_cvttps_epi32(src);
128
+ }
129
+
130
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ INTERLEAVE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
131
+
132
+ template <>
133
+ std::pair<Vectorized<double>, Vectorized<double>>
134
+ inline interleave2<double>(const Vectorized<double>& a, const Vectorized<double>& b) {
135
+ // inputs:
136
+ // a = {a0, a1, a3, a3, a4, a5, a6, a7}
137
+ // b = {b0, b1, b2, b3, b4, b5, b6, b7}
138
+ // group cols crossing lanes:
139
+ // return {a0, b0, a1, b1, a2, b2, a3, b3}
140
+ // {a4, b4, a5, b5, a6, b6, a7, b7}
141
+ __m512i idx1 = _mm512_set_epi64(11, 3, 10, 2, 9, 1, 8, 0);
142
+ __m512i idx2 = _mm512_set_epi64(15, 7, 14, 6, 13, 5, 12, 4);
143
+ return std::make_pair(_mm512_mask_permutex2var_pd(a, 0xff, idx1, b),
144
+ _mm512_mask_permutex2var_pd(a, 0xff, idx2, b));
145
+ }
146
+
147
+ template <>
148
+ std::pair<Vectorized<float>, Vectorized<float>>
149
+ inline interleave2<float>(const Vectorized<float>& a, const Vectorized<float>& b) {
150
+ // inputs:
151
+ // a = {a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15}
152
+ // b = {b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15}
153
+ //
154
+ // return:
155
+ // {a0, b0, a1, b1, a2, b2, a3, b3, a4, b4, a5, b5, a6, b6, a7, b7}
156
+ // {a8, b8, a9, b9, a10, b10, a11, b11, a12, b12, a13, b13, a14, b14, a15, b15}
157
+ __m512i idx1 = _mm512_set_epi32(23, 7, 22, 6, 21, 5, 20, 4,
158
+ 19, 3, 18, 2, 17, 1, 16, 0);
159
+ __m512i idx2 = _mm512_set_epi32(31, 15, 30, 14, 29, 13, 28, 12,
160
+ 27, 11, 26, 10, 25, 9, 24, 8);
161
+ return std::make_pair(_mm512_mask_permutex2var_ps(a, 0xffff, idx1, b),
162
+ _mm512_mask_permutex2var_ps(a, 0xffff, idx2, b));
163
+ }
164
+
165
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DEINTERLEAVE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
166
+
167
+ template <>
168
+ std::pair<Vectorized<double>, Vectorized<double>>
169
+ inline deinterleave2<double>(const Vectorized<double>& a, const Vectorized<double>& b) {
170
+ // inputs:
171
+ // a = {a0, b0, a1, b1, a2, b2, a3, b3}
172
+ // b = {a4, b4, a5, b5, a6, b6, a7, b7}
173
+ // output:
174
+ // return {a0, a1, a2, a3, a4, a5, a6, a7}
175
+ // {b0, b1, b2, b3, b4, b5, b6, b7}
176
+ // The members of indices have been written in binary format for better understandability
177
+ __m512i idx1 = _mm512_set_epi64(14, 12, 10, 8, 6, 4, 2, 0);
178
+ __m512i idx2 = _mm512_set_epi64(15, 13, 11, 9, 7, 5, 3, 1);
179
+
180
+ return std::make_pair(_mm512_mask_permutex2var_pd(a, 0xff, idx1, b),
181
+ _mm512_mask_permutex2var_pd(a, 0xff, idx2, b));
182
+ }
183
+
184
+ template <>
185
+ std::pair<Vectorized<float>, Vectorized<float>>
186
+ inline deinterleave2<float>(const Vectorized<float>& a, const Vectorized<float>& b) {
187
+ // inputs:
188
+ // a = {a0, b0, a1, b1, a2, b2, a3, b3, a4, b4, a5, b5, a6, b6, a7, b7}
189
+ // b = {a8, b8, a9, b9, a10, b10, a11, b11, a12, b12, a13, b13, a14, b14, a15, b15}
190
+ // output:
191
+ // return {a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15}
192
+ // {b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15}
193
+ __m512i idx1 = _mm512_set_epi32(30, 28, 26, 24, 22, 20, 18, 16,
194
+ 14, 12, 10, 8, 6, 4, 2, 0);
195
+ __m512i idx2 = _mm512_set_epi32(31, 29, 27, 25, 23, 21, 19, 17,
196
+ 15, 13, 11, 9, 7, 5, 3, 1);
197
+
198
+ return std::make_pair(_mm512_mask_permutex2var_ps(a, 0xffff, idx1, b),
199
+ _mm512_mask_permutex2var_ps(a, 0xffff, idx2, b));
200
+ }
201
+
202
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FLIP ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
203
+
204
+ template<>
205
+ inline Vectorized<float> flip(const Vectorized<float> & v) {
206
+ const __m512i mask = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7,
207
+ 8, 9, 10, 11, 12, 13, 14, 15);
208
+ return _mm512_permutexvar_ps(mask, v);
209
+ }
210
+
211
+ template<>
212
+ inline Vectorized<double> flip(const Vectorized<double> & v) {
213
+ const __m512i mask = _mm512_set_epi64(0, 1, 2, 3, 4, 5, 6, 7);
214
+ return _mm512_permutexvar_pd(mask, v);
215
+ }
216
+
217
+ template<>
218
+ inline Vectorized<int64_t> flip(const Vectorized<int64_t> & v) {
219
+ const __m512i mask = _mm512_set_epi64(0, 1, 2, 3, 4, 5, 6, 7);
220
+ return _mm512_permutexvar_epi64(mask, v);
221
+ }
222
+
223
+ template<>
224
+ inline Vectorized<int32_t> flip(const Vectorized<int32_t> & v) {
225
+ const __m512i mask = _mm512_set_epi32(0, 1, 2, 3, 4, 5, 6, 7,
226
+ 8, 9, 10, 11, 12, 13, 14, 15);
227
+ return _mm512_permutexvar_epi32(mask, v);
228
+ }
229
+
230
+ template<>
231
+ inline Vectorized<int16_t> flip(const Vectorized<int16_t> & v) {
232
+ const __m512i mask = _mm512_set_epi16(
233
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
234
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
235
+ );
236
+ return _mm512_permutexvar_epi16(mask, v);
237
+ }
238
+
239
+ inline __m512i flip8(const __m512i & v) {
240
+ const __m512i mask1 = _mm512_set_epi8(
241
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
242
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
243
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
244
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
245
+ );
246
+ const __m512i mask2 = _mm512_set_epi64(1, 0, 3, 2, 5, 4, 7, 6);
247
+ auto reversed_vec = _mm512_shuffle_epi8(v, mask1);
248
+ return _mm512_permutexvar_epi64(mask2, reversed_vec);
249
+ }
250
+
251
+ template<>
252
+ inline Vectorized<int8_t> flip(const Vectorized<int8_t> & v) {
253
+ return flip8(v);
254
+ }
255
+
256
+ template<>
257
+ inline Vectorized<uint8_t> flip(const Vectorized<uint8_t> & v) {
258
+ return flip8(v);
259
+ }
260
+
261
+ #endif // defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER)
262
+
263
+ }}}
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_bfloat16.h ADDED
@@ -0,0 +1,1232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/intrinsics.h>
7
+ #include <ATen/cpu/vec/vec_base.h>
8
+ #include <c10/util/irange.h>
9
+
10
+ #if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER)
11
+ #include <sleef.h>
12
+ #endif
13
+
14
+ namespace at {
15
+ namespace vec {
16
+ // See Note [CPU_CAPABILITY namespace]
17
+ inline namespace CPU_CAPABILITY {
18
+
19
+ #if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER)
20
+
21
+ // bfloat16 conversion
22
+ static inline void cvtbf16_fp32(const __m256i& a, __m512& o) {
23
+ o = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_cvtepu16_epi32(a), 16));
24
+ }
25
+
26
+ static inline void cvtbf16_fp32(const __m512i& a, __m512& o1, __m512& o2) {
27
+ __m256i lo = _mm512_extracti32x8_epi32(a, 0);
28
+ __m256i hi = _mm512_extracti32x8_epi32(a, 1);
29
+ cvtbf16_fp32(lo, o1);
30
+ cvtbf16_fp32(hi, o2);
31
+ }
32
+
33
+ static inline __m512i cvtfp32_bf16(const __m512& a, const __m512& b) {
34
+ __m512i lo = _mm512_castps_si512(a);
35
+ __m512i hi = _mm512_castps_si512(b);
36
+ __m512i nan = _mm512_set1_epi32(0xffff);
37
+ auto mask_lo = _mm512_cmp_ps_mask(a, a, _CMP_ORD_Q);
38
+ auto mask_hi = _mm512_cmp_ps_mask(b, b, _CMP_ORD_Q);
39
+ __m512i ones = _mm512_set1_epi32(0x1);
40
+ __m512i vec_bias = _mm512_set1_epi32(0x7fff);
41
+ // uint32_t lsb = (input >> 16) & 1;
42
+ auto t_lo = _mm512_and_si512(_mm512_srli_epi32(lo, 16), ones);
43
+ auto t_hi = _mm512_and_si512(_mm512_srli_epi32(hi, 16), ones);
44
+ // uint32_t rounding_bias = 0x7fff + lsb;
45
+ t_lo = _mm512_add_epi32(t_lo, vec_bias);
46
+ t_hi = _mm512_add_epi32(t_hi, vec_bias);
47
+ // input += rounding_bias;
48
+ t_lo = _mm512_add_epi32(t_lo, lo);
49
+ t_hi = _mm512_add_epi32(t_hi, hi);
50
+ // input = input >> 16;
51
+ t_lo = _mm512_srli_epi32(t_lo, 16);
52
+ t_hi = _mm512_srli_epi32(t_hi, 16);
53
+ // Check NaN before converting back to bf16
54
+ t_lo = _mm512_mask_blend_epi32(mask_lo, nan, t_lo);
55
+ t_hi = _mm512_mask_blend_epi32(mask_hi, nan, t_hi);
56
+
57
+ t_lo = _mm512_packus_epi32(t_lo, t_hi); // t_hi[4-7] t_lo[4-7] t_hi[0-4] t_lo[0-4]
58
+ __m512i idx = _mm512_set_epi64(7, 5, 3, 1, 6, 4, 2, 0);
59
+ return _mm512_permutexvar_epi64(idx, t_lo);
60
+ }
61
+
62
+ static inline __m512i merge_compare_result(const __m512& a, const __m512& b) {
63
+ __m512i lo = _mm512_castps_si512(a);
64
+ __m512i hi = _mm512_castps_si512(b);
65
+ lo = _mm512_srli_epi32(lo, 16);
66
+ hi = _mm512_srli_epi32(hi, 16);
67
+ auto out = _mm512_packus_epi32(lo, hi);
68
+ __m512i idx = _mm512_set_epi64(7, 5, 3, 1, 6, 4, 2, 0);
69
+ return _mm512_permutexvar_epi64(idx, out);
70
+ }
71
+
72
+ // float16 conversion
73
+ static inline void cvtfp16_fp32(const __m256i& a, __m512& o) {
74
+ o = _mm512_cvtph_ps(a);
75
+ }
76
+
77
+ static inline void cvtfp16_fp32(const __m512i& a, __m512& o1, __m512& o2) {
78
+ __m256i lo = _mm512_extracti32x8_epi32(a, 0);
79
+ __m256i hi = _mm512_extracti32x8_epi32(a, 1);
80
+ cvtfp16_fp32(lo, o1);
81
+ cvtfp16_fp32(hi, o2);
82
+ }
83
+
84
+ static inline __m512i cvtfp32_fp16(const __m512& a, const __m512& b) {
85
+ __m256i lo = _mm512_cvtps_ph(
86
+ a, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
87
+ __m256i hi = _mm512_cvtps_ph(
88
+ b, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
89
+ __m512 t_lo = _mm512_castsi512_ps(_mm512_castsi256_si512(lo));
90
+ __m256 t_hi = _mm256_castsi256_ps(hi);
91
+ return _mm512_castps_si512(_mm512_insertf32x8(t_lo, t_hi, 1));
92
+ }
93
+
94
+ // dtype conversion between float16/bfloat16 and float32
95
+ template <typename T, typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
96
+ inline void cvt_to_fp32(const __m256i& a, __m512& o);
97
+ template <> inline void cvt_to_fp32<BFloat16>(const __m256i& a, __m512& o) {
98
+ cvtbf16_fp32(a, o);
99
+ }
100
+ template <> inline void cvt_to_fp32<Half>(const __m256i& a, __m512& o) {
101
+ cvtfp16_fp32(a, o);
102
+ }
103
+
104
+ template <typename T, typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
105
+ inline void cvt_to_fp32(const __m512i& a, __m512& o1, __m512& o2);
106
+ template <> inline void cvt_to_fp32<BFloat16>(const __m512i& a, __m512& o1, __m512& o2) {
107
+ cvtbf16_fp32(a, o1, o2);
108
+ }
109
+ template <> inline void cvt_to_fp32<Half>(const __m512i& a, __m512& o1, __m512& o2) {
110
+ cvtfp16_fp32(a, o1, o2);
111
+ }
112
+
113
+ template <typename T, bool is_compare_op = false,
114
+ typename std::enable_if_t<is_reduced_floating_point_v<T>, int> = 0>
115
+ inline __m512i cvt_from_fp32(const __m512& a, const __m512& b);
116
+ template <> inline __m512i cvt_from_fp32<BFloat16, false>(const __m512& a, const __m512& b) {
117
+ return cvtfp32_bf16(a, b);
118
+ }
119
+ template <> inline __m512i cvt_from_fp32<BFloat16, true>(const __m512& a, const __m512& b) {
120
+ return merge_compare_result(a, b);
121
+ }
122
+ template <> inline __m512i cvt_from_fp32<Half, false>(const __m512& a, const __m512& b) {
123
+ return cvtfp32_fp16(a, b);
124
+ }
125
+ template <> inline __m512i cvt_from_fp32<Half, true>(const __m512& a, const __m512& b) {
126
+ return cvtfp32_fp16(a, b);
127
+ }
128
+
129
+ template <typename T>
130
+ class Vectorized16 {
131
+ static_assert(
132
+ is_reduced_floating_point_v<T>,
133
+ "Support only float16 and bfloat16.");
134
+ private:
135
+ __m512i values;
136
+ public:
137
+ using value_type = uint16_t;
138
+ using size_type = int;
139
+ static constexpr size_type size() {
140
+ return 32;
141
+ }
142
+ Vectorized16() {}
143
+ Vectorized16(__m512i v) : values(v) {}
144
+ Vectorized16(T val) {
145
+ value_type uw = val.x;
146
+ values = _mm512_set1_epi16(uw);
147
+ }
148
+ Vectorized16(T val1, T val2, T val3, T val4,
149
+ T val5, T val6, T val7, T val8,
150
+ T val9, T val10, T val11, T val12,
151
+ T val13, T val14, T val15, T val16,
152
+ T val17, T val18, T val19, T val20,
153
+ T val21, T val22, T val23, T val24,
154
+ T val25, T val26, T val27, T val28,
155
+ T val29, T val30, T val31, T val32) {
156
+ values = _mm512_set_epi16(
157
+ val32.x, val31.x, val30.x, val29.x, val28.x, val27.x, val26.x, val25.x,
158
+ val24.x, val23.x, val22.x, val21.x, val20.x, val19.x, val18.x, val17.x,
159
+ val16.x, val15.x, val14.x, val13.x, val12.x, val11.x, val10.x, val9.x,
160
+ val8.x, val7.x, val6.x, val5.x, val4.x, val3.x, val2.x, val1.x);
161
+ }
162
+ operator __m512i() const {
163
+ return values;
164
+ }
165
+ T& operator[](int idx) = delete;
166
+ const T& operator[](int idx) const = delete;
167
+ int zero_mask() const {
168
+ // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit
169
+ return _mm512_cmpeq_epi16_mask(values, _mm512_set1_epi16(0));
170
+ }
171
+ static Vectorized<T> loadu(const void* ptr, int16_t count = size()) {
172
+ if (count == size())
173
+ return _mm512_loadu_si512(reinterpret_cast<const __m512i*>(ptr));
174
+
175
+ __at_align__ int16_t tmp_values[size()];
176
+ std::memcpy(tmp_values, ptr, count * sizeof(int16_t));
177
+ return _mm512_loadu_si512(reinterpret_cast<const __m512i*>(tmp_values));
178
+ }
179
+ void store(void* ptr, int count = size()) const {
180
+ if (count == size()) {
181
+ _mm512_storeu_si512(reinterpret_cast<__m512i*>(ptr), values);
182
+ } else if (count > 0) {
183
+ __at_align__ int16_t tmp_values[size()];
184
+ _mm512_storeu_si512(reinterpret_cast<__m512i*>(tmp_values), values);
185
+ std::memcpy(ptr, tmp_values, count * sizeof(int16_t));
186
+ }
187
+ }
188
+ template <int64_t mask>
189
+ static Vectorized<T> blend(const Vectorized<T>& a, const Vectorized<T>& b) {
190
+ __at_align__ int16_t tmp_values[size()];
191
+ a.store(tmp_values);
192
+ if (mask & 0x01)
193
+ tmp_values[0] = b.values[31];
194
+ if (mask & 0x02)
195
+ tmp_values[1] = b.values[30];
196
+ if (mask & 0x04)
197
+ tmp_values[2] = b.values[29];
198
+ if (mask & 0x08)
199
+ tmp_values[3] = b.values[28];
200
+ if (mask & 0x10)
201
+ tmp_values[4] = b.values[27];
202
+ if (mask & 0x20)
203
+ tmp_values[5] = b.values[26];
204
+ if (mask & 0x40)
205
+ tmp_values[6] = b.values[25];
206
+ if (mask & 0x80)
207
+ tmp_values[7] = b.values[24];
208
+ if (mask & 0x100)
209
+ tmp_values[8] = b.values[23];
210
+ if (mask & 0x200)
211
+ tmp_values[9] = b.values[22];
212
+ if (mask & 0x400)
213
+ tmp_values[10] = b.values[21];
214
+ if (mask & 0x800)
215
+ tmp_values[11] = b.values[20];
216
+ if (mask & 0x1000)
217
+ tmp_values[12] = b.values[19];
218
+ if (mask & 0x2000)
219
+ tmp_values[13] = b.values[18];
220
+ if (mask & 0x4000)
221
+ tmp_values[14] = b.values[17];
222
+ if (mask & 0x8000)
223
+ tmp_values[15] = b.values[16];
224
+ if (mask & 0x10000)
225
+ tmp_values[16] = b.values[15];
226
+ if (mask & 0x20000)
227
+ tmp_values[17] = b.values[14];
228
+ if (mask & 0x40000)
229
+ tmp_values[18] = b.values[13];
230
+ if (mask & 0x80000)
231
+ tmp_values[19] = b.values[12];
232
+ if (mask & 0x100000)
233
+ tmp_values[20] = b.values[11];
234
+ if (mask & 0x200000)
235
+ tmp_values[21] = b.values[10];
236
+ if (mask & 0x400000)
237
+ tmp_values[22] = b.values[9];
238
+ if (mask & 0x800000)
239
+ tmp_values[23] = b.values[8];
240
+ if (mask & 0x1000000)
241
+ tmp_values[24] = b.values[7];
242
+ if (mask & 0x2000000)
243
+ tmp_values[25] = b.values[6];
244
+ if (mask & 0x4000000)
245
+ tmp_values[26] = b.values[5];
246
+ if (mask & 0x8000000)
247
+ tmp_values[27] = b.values[4];
248
+ if (mask & 0x10000000)
249
+ tmp_values[28] = b.values[3];
250
+ if (mask & 0x20000000)
251
+ tmp_values[29] = b.values[2];
252
+ if (mask & 0x40000000)
253
+ tmp_values[30] = b.values[1];
254
+ if (mask & 0x80000000)
255
+ tmp_values[31] = b.values[0];
256
+ return loadu(tmp_values);
257
+ }
258
+ static Vectorized<T> blendv(const Vectorized<T>& a,
259
+ const Vectorized<T>& b, const Vectorized<T>& mask) {
260
+ auto all_ones = _mm512_set1_epi16(0xFFFF);
261
+ auto mask_ = _mm512_cmp_epi16_mask(mask, all_ones, _MM_CMPINT_EQ);
262
+ return _mm512_mask_blend_epi16(mask_, a.values, b.values);
263
+ }
264
+ template<typename step_t>
265
+ static Vectorized<T> arange(T base = 0.f, step_t step = static_cast<step_t>(1)) {
266
+ return Vectorized<T>(
267
+ base, base + step, base + 2 * step, base + 3 * step,
268
+ base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step,
269
+ base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step,
270
+ base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step,
271
+ base + 16 * step, base + 17 * step, base + 18 * step, base + 19 * step,
272
+ base + 20 * step, base + 21 * step, base + 22 * step, base + 23 * step,
273
+ base + 24 * step, base + 25 * step, base + 26 * step, base + 27 * step,
274
+ base + 28 * step, base + 29 * step, base + 30 * step, base + 31 * step);
275
+ }
276
+ static Vectorized<T> set(const Vectorized<T>& a,
277
+ const Vectorized<T>& b, int64_t count = size()) {
278
+ switch (count) {
279
+ case 0:
280
+ return a;
281
+ case 1:
282
+ return blend<1>(a, b);
283
+ case 2:
284
+ return blend<3>(a, b);
285
+ case 3:
286
+ return blend<7>(a, b);
287
+ case 4:
288
+ return blend<15>(a, b);
289
+ case 5:
290
+ return blend<31>(a, b);
291
+ case 6:
292
+ return blend<63>(a, b);
293
+ case 7:
294
+ return blend<127>(a, b);
295
+ case 8:
296
+ return blend<255>(a, b);
297
+ case 9:
298
+ return blend<511>(a, b);
299
+ case 10:
300
+ return blend<1023>(a, b);
301
+ case 11:
302
+ return blend<2047>(a, b);
303
+ case 12:
304
+ return blend<4095>(a, b);
305
+ case 13:
306
+ return blend<8191>(a, b);
307
+ case 14:
308
+ return blend<16383>(a, b);
309
+ case 15:
310
+ return blend<32767>(a, b);
311
+ case 16:
312
+ return blend<65535>(a, b);
313
+ case 17:
314
+ return blend<131071>(a, b);
315
+ case 18:
316
+ return blend<262143>(a, b);
317
+ case 19:
318
+ return blend<524287>(a, b);
319
+ case 20:
320
+ return blend<1048575>(a, b);
321
+ case 21:
322
+ return blend<2097151>(a, b);
323
+ case 22:
324
+ return blend<4194303>(a, b);
325
+ case 23:
326
+ return blend<8388607>(a, b);
327
+ case 24:
328
+ return blend<16777215>(a, b);
329
+ case 25:
330
+ return blend<33554431>(a, b);
331
+ case 26:
332
+ return blend<67108863>(a, b);
333
+ case 27:
334
+ return blend<134217727>(a, b);
335
+ case 28:
336
+ return blend<268435455>(a, b);
337
+ case 29:
338
+ return blend<536870911>(a, b);
339
+ case 30:
340
+ return blend<1073741823>(a, b);
341
+ case 31:
342
+ return blend<2147483647>(a, b);
343
+ }
344
+ return b;
345
+ }
346
+ #pragma clang diagnostic push
347
+ #pragma clang diagnostic ignored "-Wignored-qualifiers"
348
+ Vectorized<T> map(const __m512 (*const vop)(__m512)) const {
349
+ __m512 lo, hi;
350
+ cvt_to_fp32<T>(values, lo, hi);
351
+ const auto o1 = vop(lo);
352
+ const auto o2 = vop(hi);
353
+ return cvt_from_fp32<T>(o1, o2);
354
+ }
355
+ Vectorized<T> isnan() const {
356
+ __m512 lo, hi;
357
+ cvt_to_fp32<T>(values, lo, hi);
358
+ __mmask16 lo_mask, hi_mask;
359
+ __m512 zero = _mm512_set1_ps(0.0);
360
+ __m512i zeroi = _mm512_castps_si512(zero);
361
+ lo_mask = _mm512_cmp_ps_mask(lo, zero, _CMP_UNORD_Q);
362
+ lo = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zeroi, lo_mask, 0xFFFF'FFFF));
363
+ hi_mask = _mm512_cmp_ps_mask(hi, zero, _CMP_UNORD_Q);
364
+ hi = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zeroi, hi_mask, 0xFFFF'FFFF));
365
+ return merge_compare_result(lo, hi);
366
+ }
367
+ #pragma clang diagnostic pop
368
+ Vectorized<T> abs() const {
369
+ return _mm512_andnot_si512(_mm512_set1_epi16(0x8000), values);
370
+ }
371
+ Vectorized<T> angle() const {
372
+ __m512 lo, hi;
373
+ cvt_to_fp32<T>(values, lo, hi);
374
+ auto angle_lambda = [](__m512 values) {
375
+ const auto zero_vec = _mm512_set1_ps(0.f);
376
+ const auto nan_vec = _mm512_set1_ps(NAN);
377
+ const auto not_nan_mask = _mm512_cmp_ps_mask(values, values, _CMP_EQ_OQ);
378
+ const auto non_nan_mask_vec = _mm512_mask_set1_epi32(_mm512_castps_si512(zero_vec),
379
+ not_nan_mask, 0xFFFFFFFF);
380
+ const auto nan_mask = _mm512_cmp_ps_mask(_mm512_castsi512_ps(non_nan_mask_vec),
381
+ zero_vec, _CMP_EQ_OQ);
382
+ const auto pi = _mm512_set1_ps(c10::pi<float>);
383
+
384
+ const auto neg_mask = _mm512_cmp_ps_mask(values, zero_vec, _CMP_LT_OQ);
385
+ auto angle = _mm512_mask_blend_ps(neg_mask, zero_vec, pi);
386
+ angle = _mm512_mask_blend_ps(nan_mask, angle, nan_vec);
387
+ return angle;
388
+ };
389
+ auto o1 = angle_lambda(lo);
390
+ auto o2 = angle_lambda(hi);
391
+ return cvt_from_fp32<T>(o1, o2);
392
+ }
393
+ Vectorized<T> real() const {
394
+ return *this;
395
+ }
396
+ Vectorized<T> imag() const {
397
+ return _mm512_set1_epi16(0);
398
+ }
399
+ Vectorized<T> conj() const {
400
+ return *this;
401
+ }
402
+ Vectorized<T> acos() const {
403
+ return map(Sleef_acosf16_u10);
404
+ }
405
+ Vectorized<T> asin() const {
406
+ return map(Sleef_asinf16_u10);
407
+ }
408
+ Vectorized<T> atan() const {
409
+ return map(Sleef_atanf16_u10);
410
+ }
411
+ Vectorized<T> atanh() const {
412
+ return map(Sleef_atanhf16_u10);
413
+ }
414
+ Vectorized<T> atan2(const Vectorized<T> &b) const {
415
+ __m512 lo, hi;
416
+ __m512 b1, b2;
417
+ cvt_to_fp32<T>(values, lo, hi);
418
+ cvt_to_fp32<T>(b.values, b1, b2);
419
+ auto o1 = Sleef_atan2f16_u10(lo, b1);
420
+ auto o2 = Sleef_atan2f16_u10(hi, b2);
421
+ return cvt_from_fp32<T>(o1, o2);
422
+ }
423
+ Vectorized<T> copysign(const Vectorized<T> &sign) const {
424
+ // copy sign bit (0x8000) from sign and remaining bits from values
425
+ __m512i mask_value = _mm512_set1_epi32(~0x80008000);
426
+ __m512i mask_signbit = _mm512_set1_epi32(0x80008000);
427
+ return Vectorized<T>(
428
+ _mm512_or_si512(
429
+ _mm512_and_si512(values, mask_value),
430
+ _mm512_and_si512(sign, mask_signbit)));
431
+ }
432
+ Vectorized<T> erf() const {
433
+ return map(Sleef_erff16_u10);
434
+ }
435
+ Vectorized<T> erfc() const {
436
+ return map(Sleef_erfcf16_u15);
437
+ }
438
+ Vectorized<T> erfinv() const {
439
+ __m512 lo, hi;
440
+ cvt_to_fp32<T>(values, lo, hi);
441
+ __at_align__ float tmp1[size() / 2], tmp2[size() / 2];
442
+ _mm512_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
443
+ _mm512_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
444
+ for (int64_t i = 0; i < size() / 2; i++) {
445
+ tmp1[i] = calc_erfinv(tmp1[i]);
446
+ tmp2[i] = calc_erfinv(tmp2[i]);
447
+ }
448
+ auto o1 = _mm512_loadu_ps(tmp1);
449
+ auto o2 = _mm512_loadu_ps(tmp2);
450
+ return cvt_from_fp32<T>(o1, o2);
451
+ }
452
+ Vectorized<T> exp() const {
453
+ return map(Sleef_expf16_u10);
454
+ }
455
+ Vectorized<T> exp2() const {
456
+ return map(Sleef_exp2f16_u10);
457
+ }
458
+ Vectorized<T> expm1() const {
459
+ return map(Sleef_expm1f16_u10);
460
+ }
461
+ Vectorized<T> fmod(const Vectorized<T> & q) const {
462
+ __m512 x_lo, x_hi;
463
+ cvt_to_fp32<T>(values, x_lo, x_hi);
464
+ __m512 q_lo, q_hi;
465
+ cvtbf16_fp32(q.values, q_lo, q_hi);
466
+ auto o1 = Sleef_fmodf16(x_lo, q_lo);
467
+ auto o2 = Sleef_fmodf16(x_hi, q_hi);
468
+ return cvt_from_fp32<T>(o1, o2);
469
+ }
470
+ Vectorized<T> hypot(const Vectorized<T> &b) const {
471
+ __m512 lo, hi;
472
+ __m512 b1, b2;
473
+ cvt_to_fp32<T>(values, lo, hi);
474
+ cvt_to_fp32<T>(b.values, b1, b2);
475
+ auto o1 = Sleef_hypotf16_u05(lo, b1);
476
+ auto o2 = Sleef_hypotf16_u05(hi, b2);
477
+ return cvt_from_fp32<T>(o1, o2);
478
+ }
479
+ Vectorized<T> i0() const {
480
+ __m512 lo, hi;
481
+ cvt_to_fp32<T>(values, lo, hi);
482
+ __at_align__ float tmp1[size() / 2], tmp2[size() / 2];
483
+ _mm512_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
484
+ _mm512_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
485
+ for (int64_t i = 0; i < size() / 2; i++) {
486
+ tmp1[i] = calc_i0(tmp1[i]);
487
+ tmp2[i] = calc_i0(tmp2[i]);
488
+ }
489
+ auto o1 = _mm512_loadu_ps(tmp1);
490
+ auto o2 = _mm512_loadu_ps(tmp2);
491
+ return cvt_from_fp32<T>(o1, o2);
492
+ }
493
+ Vectorized<T> i0e() const {
494
+ __m512 lo, hi;
495
+ cvt_to_fp32<T>(values, lo, hi);
496
+ constexpr auto sz = size();
497
+ __at_align__ float tmp1[sz / 2], tmp2[sz / 2];
498
+ _mm512_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
499
+ _mm512_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
500
+
501
+ for (auto i = decltype(sz){0}; i < sz / 2; i++) {
502
+ tmp1[i] = calc_i0e(tmp1[i]);
503
+ tmp2[i] = calc_i0e(tmp2[i]);
504
+ }
505
+ const auto o1 = _mm512_loadu_ps(tmp1);
506
+ const auto o2 = _mm512_loadu_ps(tmp2);
507
+ return cvt_from_fp32<T>(o1, o2);
508
+ }
509
+ Vectorized<T> digamma() const {
510
+ __m512 lo, hi;
511
+ cvt_to_fp32<T>(values, lo, hi);
512
+ constexpr auto sz = size();
513
+ __at_align__ float tmp1[sz / 2], tmp2[sz / 2];
514
+ _mm512_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
515
+ _mm512_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
516
+
517
+ for (auto i = decltype(sz){0}; i < sz / 2; i++) {
518
+ tmp1[i] = calc_digamma(tmp1[i]);
519
+ tmp2[i] = calc_digamma(tmp2[i]);
520
+ }
521
+ const auto o1 = _mm512_loadu_ps(tmp1);
522
+ const auto o2 = _mm512_loadu_ps(tmp2);
523
+ return cvt_from_fp32<T>(o1, o2);
524
+ }
525
+ Vectorized<T> igamma(const Vectorized<T> &x) const {
526
+ __m512 lo, hi;
527
+ __m512 xlo, xhi;
528
+ cvt_to_fp32<T>(values, lo, hi);
529
+ cvt_to_fp32<T>(x.values, xlo, xhi);
530
+ __at_align__ float tmp1[size() / 2], tmp2[size() / 2];
531
+ _mm512_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
532
+ _mm512_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
533
+ __at_align__ float tmpx1[size() / 2], tmpx2[size() / 2];
534
+ _mm512_storeu_ps(reinterpret_cast<float*>(tmpx1), xlo);
535
+ _mm512_storeu_ps(reinterpret_cast<float*>(tmpx2), xhi);
536
+ for (int64_t i = 0; i < size() / 2; ++i) {
537
+ tmp1[i] = calc_igamma(tmp1[i], tmpx1[i]);
538
+ tmp2[i] = calc_igamma(tmp2[i], tmpx2[i]);
539
+ }
540
+ auto o1 = _mm512_loadu_ps(tmp1);
541
+ auto o2 = _mm512_loadu_ps(tmp2);
542
+ return cvt_from_fp32<T>(o1, o2);
543
+ }
544
+
545
+ Vectorized<T> igammac(const Vectorized<T> &x) const {
546
+ __m512 lo, hi;
547
+ __m512 xlo, xhi;
548
+ cvt_to_fp32<T>(values, lo, hi);
549
+ cvt_to_fp32<T>(x.values, xlo, xhi);
550
+ __at_align__ float tmp1[size() / 2], tmp2[size() / 2];
551
+ _mm512_storeu_ps(reinterpret_cast<float*>(tmp1), lo);
552
+ _mm512_storeu_ps(reinterpret_cast<float*>(tmp2), hi);
553
+ __at_align__ float tmpx1[size() / 2], tmpx2[size() / 2];
554
+ _mm512_storeu_ps(reinterpret_cast<float*>(tmpx1), xlo);
555
+ _mm512_storeu_ps(reinterpret_cast<float*>(tmpx2), xhi);
556
+ for (int64_t i = 0; i < size() / 2; ++i) {
557
+ tmp1[i] = calc_igammac(tmp1[i], tmpx1[i]);
558
+ tmp2[i] = calc_igammac(tmp2[i], tmpx2[i]);
559
+ }
560
+ auto o1 = _mm512_loadu_ps(tmp1);
561
+ auto o2 = _mm512_loadu_ps(tmp2);
562
+ return cvt_from_fp32<T>(o1, o2);
563
+ }
564
+ Vectorized<T> log() const {
565
+ return map(Sleef_logf16_u10);
566
+ }
567
+ Vectorized<T> log2() const {
568
+ return map(Sleef_log2f16_u10);
569
+ }
570
+ Vectorized<T> log10() const {
571
+ return map(Sleef_log10f16_u10);
572
+ }
573
+ Vectorized<T> log1p() const {
574
+ return map(Sleef_log1pf16_u10);
575
+ }
576
+ Vectorized<T> sin() const {
577
+ return map(Sleef_sinf16_u10);
578
+ }
579
+ Vectorized<T> sinh() const {
580
+ return map(Sleef_sinhf16_u10);
581
+ }
582
+ Vectorized<T> cos() const {
583
+ return map(Sleef_cosf16_u10);
584
+ }
585
+ Vectorized<T> cosh() const {
586
+ return map(Sleef_coshf16_u10);
587
+ }
588
+ Vectorized<T> ceil() const {
589
+ __m512 lo, hi;
590
+ cvt_to_fp32<T>(values, lo, hi);
591
+ auto o1 = _mm512_ceil_ps(lo);
592
+ auto o2 = _mm512_ceil_ps(hi);
593
+ return cvt_from_fp32<T>(o1, o2);
594
+ }
595
+ Vectorized<T> floor() const {
596
+ __m512 lo, hi;
597
+ cvt_to_fp32<T>(values, lo, hi);
598
+ auto o1 = _mm512_floor_ps(lo);
599
+ auto o2 = _mm512_floor_ps(hi);
600
+ return cvt_from_fp32<T>(o1, o2);
601
+ }
602
+ Vectorized<T> neg() const {
603
+ return _mm512_xor_si512(values, _mm512_set1_epi16(0x8000));
604
+ }
605
+ Vectorized<T> round() const {
606
+ __m512 lo, hi;
607
+ cvt_to_fp32<T>(values, lo, hi);
608
+ auto o1 = _mm512_roundscale_ps(lo, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
609
+ auto o2 = _mm512_roundscale_ps(hi, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
610
+ return cvt_from_fp32<T>(o1, o2);
611
+ }
612
+ Vectorized<T> tan() const {
613
+ return map(Sleef_tanf16_u10);
614
+ }
615
+ Vectorized<T> tanh() const {
616
+ return map(Sleef_tanhf16_u10);
617
+ }
618
+ Vectorized<T> trunc() const {
619
+ __m512 lo, hi;
620
+ cvt_to_fp32<T>(values, lo, hi);
621
+ auto o1 = _mm512_roundscale_ps(lo, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
622
+ auto o2 = _mm512_roundscale_ps(hi, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
623
+ return cvt_from_fp32<T>(o1, o2);
624
+ }
625
+ Vectorized<T> lgamma() const {
626
+ return map(Sleef_lgammaf16_u10);
627
+ }
628
+ Vectorized<T> sqrt() const {
629
+ __m512 lo, hi;
630
+ cvt_to_fp32<T>(values, lo, hi);
631
+ auto o1 = _mm512_sqrt_ps(lo);
632
+ auto o2 = _mm512_sqrt_ps(hi);
633
+ return cvt_from_fp32<T>(o1, o2);
634
+ }
635
+ Vectorized<T> reciprocal() const {
636
+ __m512 lo, hi;
637
+ cvt_to_fp32<T>(values, lo, hi);
638
+ auto ones = _mm512_set1_ps(1);
639
+ auto o1 = _mm512_div_ps(ones, lo);
640
+ auto o2 = _mm512_div_ps(ones, hi);
641
+ return cvt_from_fp32<T>(o1, o2);
642
+ }
643
+ Vectorized<T> rsqrt() const {
644
+ __m512 lo, hi;
645
+ cvt_to_fp32<T>(values, lo, hi);
646
+ auto ones = _mm512_set1_ps(1);
647
+ auto o1 = _mm512_div_ps(ones, _mm512_sqrt_ps(lo));
648
+ auto o2 = _mm512_div_ps(ones, _mm512_sqrt_ps(hi));
649
+ return cvt_from_fp32<T>(o1, o2);
650
+ }
651
+ Vectorized<T> pow(const Vectorized<T> &b) const {
652
+ __m512 lo, hi;
653
+ __m512 b1, b2;
654
+ cvt_to_fp32<T>(values, lo, hi);
655
+ cvt_to_fp32<T>(b.values, b1, b2);
656
+ auto o1 = Sleef_powf16_u10(lo, b1);
657
+ auto o2 = Sleef_powf16_u10(hi, b2);
658
+ return cvt_from_fp32<T>(o1, o2);
659
+ }
660
+ private:
661
+ template<typename Op>
662
+ Vectorized<T> inline binary_compare(const Vectorized<T>& b, Op op) const {
663
+ __m512 a_lo, a_hi;
664
+ __m512 b_lo, b_hi;
665
+ cvt_to_fp32<T>(values, a_lo, a_hi);
666
+ cvt_to_fp32<T>(b.values, b_lo, b_hi);
667
+ auto o1 = op(a_lo, b_lo);
668
+ auto o2 = op(a_hi, b_hi);
669
+ return cvt_from_fp32<T, /*is_compare_op*/true>(o1, o2);
670
+ }
671
+
672
+ public:
673
+ Vectorized<T> inline operator>(const Vectorized<T>& other) const {
674
+ return binary_compare(other, [](__m512 x, __m512 y) {
675
+ auto zero_vec = _mm512_set1_epi32(0);
676
+ auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_GT_OQ);
677
+ return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF));
678
+ });
679
+ }
680
+ Vectorized<T> inline operator<(const Vectorized<T>& other) const {
681
+ return binary_compare(other, [](__m512 x, __m512 y) {
682
+ auto zero_vec = _mm512_set1_epi32(0);
683
+ auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_LT_OQ);
684
+ return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF));
685
+ });
686
+ }
687
+ Vectorized<T> inline operator>=(const Vectorized<T>& other) const {
688
+ return binary_compare(other, [](__m512 x, __m512 y) {
689
+ auto zero_vec = _mm512_set1_epi32(0);
690
+ auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_GE_OQ);
691
+ return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF));
692
+ });
693
+ }
694
+ Vectorized<T> inline operator<=(const Vectorized<T>& other) const {
695
+ return binary_compare(other, [](__m512 x, __m512 y) {
696
+ auto zero_vec = _mm512_set1_epi32(0);
697
+ auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_LE_OQ);
698
+ return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF));
699
+ });
700
+ }
701
+ Vectorized<T> inline operator==(const Vectorized<T>& other) const {
702
+ return binary_compare(other, [](__m512 x, __m512 y) {
703
+ auto zero_vec = _mm512_set1_epi32(0);
704
+ auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_EQ_OQ);
705
+ return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF));
706
+ });
707
+ }
708
+ Vectorized<T> inline operator!=(const Vectorized<T>& other) const {
709
+ return binary_compare(other, [](__m512 x, __m512 y) {
710
+ auto zero_vec = _mm512_set1_epi32(0);
711
+ auto cmp = _mm512_cmp_ps_mask(x, y, _CMP_NEQ_UQ);
712
+ return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, cmp, 0xFFFFFFFF));
713
+ });
714
+ }
715
+ };
716
+
717
+ template<typename T, typename Op>
718
+ static inline Vectorized<T> binary_op_as_fp32(const Vectorized<T>& a, const Vectorized<T>& b, Op op) {
719
+ __m512 a_lo, a_hi;
720
+ __m512 b_lo, b_hi;
721
+ cvt_to_fp32<T>(__m512i(a), a_lo, a_hi);
722
+ cvt_to_fp32<T>(__m512i(b), b_lo, b_hi);
723
+ auto o1 = op(a_lo, b_lo);
724
+ auto o2 = op(a_hi, b_hi);
725
+ return cvt_from_fp32<T>(o1, o2);
726
+ }
727
+
728
+ template <>
729
+ class Vectorized<BFloat16>: public Vectorized16<BFloat16> {
730
+ public:
731
+ using Vectorized16::Vectorized16;
732
+
733
+ Vectorized<BFloat16> frac() const;
734
+
735
+ Vectorized<BFloat16> eq(const Vectorized<BFloat16>& other) const;
736
+ Vectorized<BFloat16> ne(const Vectorized<BFloat16>& other) const;
737
+ Vectorized<BFloat16> gt(const Vectorized<BFloat16>& other) const;
738
+ Vectorized<BFloat16> ge(const Vectorized<BFloat16>& other) const;
739
+ Vectorized<BFloat16> lt(const Vectorized<BFloat16>& other) const;
740
+ Vectorized<BFloat16> le(const Vectorized<BFloat16>& other) const;
741
+ };
742
+
743
+ Vectorized<BFloat16> inline operator+(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
744
+ return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_add_ps(x, y); });
745
+ }
746
+ Vectorized<BFloat16> inline operator-(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
747
+ return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_sub_ps(x, y); });
748
+ }
749
+ Vectorized<BFloat16> inline operator*(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
750
+ return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_mul_ps(x, y); });
751
+ }
752
+ Vectorized<BFloat16> inline operator/(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
753
+ return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_div_ps(x, y); });
754
+ }
755
+ Vectorized<BFloat16> inline operator&(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
756
+ return _mm512_and_si512(a, b);
757
+ }
758
+ Vectorized<BFloat16> inline operator|(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
759
+ return _mm512_or_si512(a, b);
760
+ }
761
+ Vectorized<BFloat16> inline operator^(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
762
+ return _mm512_xor_si512(a, b);
763
+ }
764
+
765
+ inline Vectorized<BFloat16> Vectorized<BFloat16>::eq(const Vectorized<BFloat16>& other) const {
766
+ return (*this == other) & Vectorized<BFloat16>(1.0f);
767
+ }
768
+
769
+ inline Vectorized<BFloat16> Vectorized<BFloat16>::ne(const Vectorized<BFloat16>& other) const {
770
+ return (*this != other) & Vectorized<BFloat16>(1.0f);
771
+ }
772
+
773
+ inline Vectorized<BFloat16> Vectorized<BFloat16>::gt(const Vectorized<BFloat16>& other) const {
774
+ return (*this > other) & Vectorized<BFloat16>(1.0f);
775
+ }
776
+
777
+ inline Vectorized<BFloat16> Vectorized<BFloat16>::ge(const Vectorized<BFloat16>& other) const {
778
+ return (*this >= other) & Vectorized<BFloat16>(1.0f);
779
+ }
780
+
781
+ inline Vectorized<BFloat16> Vectorized<BFloat16>::lt(const Vectorized<BFloat16>& other) const {
782
+ return (*this < other) & Vectorized<BFloat16>(1.0f);
783
+ }
784
+
785
+ inline Vectorized<BFloat16> Vectorized<BFloat16>::le(const Vectorized<BFloat16>& other) const {
786
+ return (*this <= other) & Vectorized<BFloat16>(1.0f);
787
+ }
788
+
789
+ // frac. Implement this here so we can use subtraction
790
+ inline Vectorized<BFloat16> Vectorized<BFloat16>::frac() const {
791
+ return *this - this->trunc();
792
+ }
793
+
794
+ // Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
795
+ // either input is a NaN.
796
+ template <>
797
+ Vectorized<BFloat16> inline maximum(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
798
+ __m512 a_lo, a_hi;
799
+ __m512 b_lo, b_hi;
800
+ cvtbf16_fp32(__m512i(a), a_lo, a_hi);
801
+ cvtbf16_fp32(__m512i(b), b_lo, b_hi);
802
+ auto max_lo = _mm512_max_ps(a_lo, b_lo);
803
+ auto max_hi = _mm512_max_ps(a_hi, b_hi);
804
+ auto nan_lo_mask = _mm512_cmp_ps_mask(a_lo, b_lo, _CMP_UNORD_Q);
805
+ auto nan_hi_mask = _mm512_cmp_ps_mask(a_hi, b_hi, _CMP_UNORD_Q);
806
+ auto nan_lo = _mm512_castsi512_ps(_mm512_set1_epi32(nan_lo_mask));
807
+ auto nan_hi = _mm512_castsi512_ps(_mm512_set1_epi32(nan_hi_mask));
808
+ // Exploit the fact that all-ones is a NaN.
809
+ auto o1 = _mm512_or_ps(max_lo, nan_lo);
810
+ auto o2 = _mm512_or_ps(max_hi, nan_hi);
811
+ return cvtfp32_bf16(o1, o2);
812
+ }
813
+
814
+ // Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
815
+ // either input is a NaN.
816
+ template <>
817
+ Vectorized<BFloat16> inline minimum(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& b) {
818
+ __m512 a_lo, a_hi;
819
+ __m512 b_lo, b_hi;
820
+ __m512i zero_vec = _mm512_set1_epi32(0);
821
+ cvtbf16_fp32(__m512i(a), a_lo, a_hi);
822
+ cvtbf16_fp32(__m512i(b), b_lo, b_hi);
823
+ auto min_lo = _mm512_min_ps(a_lo, b_lo);
824
+ auto min_hi = _mm512_min_ps(a_hi, b_hi);
825
+ auto nan_lo_mask = _mm512_cmp_ps_mask(a_lo, b_lo, _CMP_UNORD_Q);
826
+ auto nan_hi_mask = _mm512_cmp_ps_mask(a_hi, b_hi, _CMP_UNORD_Q);
827
+ auto nan_lo = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, nan_lo_mask,
828
+ 0xFFFFFFFF));
829
+ auto nan_hi = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, nan_hi_mask,
830
+ 0xFFFFFFFF));
831
+ // Exploit the fact that all-ones is a NaN.
832
+ auto o1 = _mm512_or_ps(min_lo, nan_lo);
833
+ auto o2 = _mm512_or_ps(min_hi, nan_hi);
834
+ return cvtfp32_bf16(o1, o2);
835
+ }
836
+
837
+ template <>
838
+ Vectorized<BFloat16> inline clamp(const Vectorized<BFloat16>& a,
839
+ const Vectorized<BFloat16>& min, const Vectorized<BFloat16>& max) {
840
+ __m512 a_lo, a_hi;
841
+ __m512 min_lo, min_hi;
842
+ __m512 max_lo, max_hi;
843
+ cvtbf16_fp32(__m512i(a), a_lo, a_hi);
844
+ cvtbf16_fp32(__m512i(min), min_lo, min_hi);
845
+ cvtbf16_fp32(__m512i(max), max_lo, max_hi);
846
+ auto o1 = _mm512_min_ps(max_lo, _mm512_max_ps(min_lo, a_lo));
847
+ auto o2 = _mm512_min_ps(max_hi, _mm512_max_ps(min_hi, a_hi));
848
+ return cvtfp32_bf16(o1, o2);
849
+ }
850
+
851
+ template <>
852
+ Vectorized<BFloat16> inline clamp_max(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& max) {
853
+ __m512 a_lo, a_hi;
854
+ __m512 max_lo, max_hi;
855
+ cvtbf16_fp32(__m512i(a), a_lo, a_hi);
856
+ cvtbf16_fp32(__m512i(max), max_lo, max_hi);
857
+ auto o1 = _mm512_min_ps(max_lo, a_lo);
858
+ auto o2 = _mm512_min_ps(max_hi, a_hi);
859
+ return cvtfp32_bf16(o1, o2);
860
+ }
861
+
862
+ template <>
863
+ Vectorized<BFloat16> inline clamp_min(const Vectorized<BFloat16>& a, const Vectorized<BFloat16>& min) {
864
+ __m512 a_lo, a_hi;
865
+ __m512 min_lo, min_hi;
866
+ cvtbf16_fp32(__m512i(a), a_lo, a_hi);
867
+ cvtbf16_fp32(__m512i(min), min_lo, min_hi);
868
+ auto o1 = _mm512_max_ps(min_lo, a_lo);
869
+ auto o2 = _mm512_max_ps(min_hi, a_hi);
870
+ return cvtfp32_bf16(o1, o2);
871
+ }
872
+
873
+ template <>
874
+ inline void convert(const BFloat16* src, BFloat16* dst, int64_t n) {
875
+ int64_t i;
876
+ #pragma unroll
877
+ for (i = 0; i <= (n - Vectorized<BFloat16>::size()); i += Vectorized<BFloat16>::size()) {
878
+ auto vsrc = _mm512_loadu_si512(reinterpret_cast<__m512i*>((void*)(src + i)));
879
+ _mm512_storeu_si512(reinterpret_cast<__m512i*>((void*)(dst + i)), vsrc);
880
+ }
881
+ #pragma unroll
882
+ for (; i < n; i++) {
883
+ dst[i] = src[i];
884
+ }
885
+ }
886
+
887
+ template <>
888
+ inline void convert(const float* src, BFloat16* dst, int64_t n) {
889
+ int64_t i;
890
+ for (i = 0; i + Vectorized<BFloat16>::size() <= n; i += Vectorized<BFloat16>::size()) {
891
+ __m512 a = _mm512_loadu_ps(&src[i]);
892
+ __m512 b = _mm512_loadu_ps(&src[i + 16]);
893
+
894
+ __m512i bf = cvtfp32_bf16(a, b);
895
+ _mm512_storeu_si512(reinterpret_cast<__m512i*>(&dst[i]), bf);
896
+ }
897
+ for (; i < n; i++) {
898
+ dst[i] = c10::convert<BFloat16>(src[i]);
899
+ }
900
+ }
901
+
902
+ template <>
903
+ inline void convert(const double* src, BFloat16* dst, int64_t n) {
904
+ auto load_float = [](const double *src) -> __m512 {
905
+ // Load one float vector from an array of doubles
906
+ __m256 a = _mm512_cvtpd_ps(_mm512_loadu_pd(src));
907
+ __m256 b = _mm512_cvtpd_ps(_mm512_loadu_pd(src + 8));
908
+ return _mm512_insertf32x8(_mm512_castps256_ps512(a), b, 1);
909
+ };
910
+
911
+ int64_t i;
912
+ for (i = 0; i + Vectorized<BFloat16>::size() <= n; i += Vectorized<BFloat16>::size()) {
913
+ __m512 a = load_float(&src[i]);
914
+ __m512 b = load_float(&src[i + 16]);
915
+
916
+ __m512i bf = cvtfp32_bf16(a, b);
917
+ _mm512_storeu_si512(reinterpret_cast<__m512i*>(&dst[i]), bf);
918
+ }
919
+ for (; i < n; i++) {
920
+ dst[i] = c10::convert<BFloat16>(src[i]);
921
+ }
922
+ }
923
+
924
+ template <>
925
+ Vectorized<BFloat16> inline fmadd(const Vectorized<BFloat16>& a,
926
+ const Vectorized<BFloat16>& b, const Vectorized<BFloat16>& c) {
927
+ __m512 a_lo, a_hi;
928
+ __m512 b_lo, b_hi;
929
+ __m512 c_lo, c_hi;
930
+ cvtbf16_fp32(__m512i(a), a_lo, a_hi);
931
+ cvtbf16_fp32(__m512i(b), b_lo, b_hi);
932
+ cvtbf16_fp32(__m512i(c), c_lo, c_hi);
933
+ auto o1 = _mm512_fmadd_ps(a_lo, b_lo, c_lo);
934
+ auto o2 = _mm512_fmadd_ps(a_hi, b_hi, c_hi);
935
+ return cvtfp32_bf16(o1, o2);
936
+ }
937
+
938
+ template <>
939
+ class Vectorized<Half>: public Vectorized16<Half> {
940
+ public:
941
+ using Vectorized16::Vectorized16;
942
+
943
+ Vectorized<Half> frac() const;
944
+
945
+ Vectorized<Half> eq(const Vectorized<Half>& other) const;
946
+ Vectorized<Half> ne(const Vectorized<Half>& other) const;
947
+ Vectorized<Half> gt(const Vectorized<Half>& other) const;
948
+ Vectorized<Half> ge(const Vectorized<Half>& other) const;
949
+ Vectorized<Half> lt(const Vectorized<Half>& other) const;
950
+ Vectorized<Half> le(const Vectorized<Half>& other) const;
951
+ };
952
+
953
+ Vectorized<Half> inline operator+(const Vectorized<Half>& a, const Vectorized<Half>& b) {
954
+ return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_add_ps(x, y); });
955
+ }
956
+ Vectorized<Half> inline operator-(const Vectorized<Half>& a, const Vectorized<Half>& b) {
957
+ return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_sub_ps(x, y); });
958
+ }
959
+ Vectorized<Half> inline operator*(const Vectorized<Half>& a, const Vectorized<Half>& b) {
960
+ return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_mul_ps(x, y); });
961
+ }
962
+ Vectorized<Half> inline operator/(const Vectorized<Half>& a, const Vectorized<Half>& b) {
963
+ return binary_op_as_fp32(a, b, [](const __m512& x, const __m512& y) { return _mm512_div_ps(x, y); });
964
+ }
965
+
966
+ Vectorized<Half> inline operator&(const Vectorized<Half>& a, const Vectorized<Half>& b) {
967
+ return _mm512_and_si512(a, b);
968
+ }
969
+ Vectorized<Half> inline operator|(const Vectorized<Half>& a, const Vectorized<Half>& b) {
970
+ return _mm512_or_si512(a, b);
971
+ }
972
+ Vectorized<Half> inline operator^(const Vectorized<Half>& a, const Vectorized<Half>& b) {
973
+ return _mm512_xor_si512(a, b);
974
+ }
975
+
976
+ inline Vectorized<Half> Vectorized<Half>::eq(const Vectorized<Half>& other) const {
977
+ return (*this == other) & Vectorized<Half>(1.0f);
978
+ }
979
+
980
+ inline Vectorized<Half> Vectorized<Half>::ne(const Vectorized<Half>& other) const {
981
+ return (*this != other) & Vectorized<Half>(1.0f);
982
+ }
983
+
984
+ inline Vectorized<Half> Vectorized<Half>::gt(const Vectorized<Half>& other) const {
985
+ return (*this > other) & Vectorized<Half>(1.0f);
986
+ }
987
+
988
+ inline Vectorized<Half> Vectorized<Half>::ge(const Vectorized<Half>& other) const {
989
+ return (*this >= other) & Vectorized<Half>(1.0f);
990
+ }
991
+
992
+ inline Vectorized<Half> Vectorized<Half>::lt(const Vectorized<Half>& other) const {
993
+ return (*this < other) & Vectorized<Half>(1.0f);
994
+ }
995
+
996
+ inline Vectorized<Half> Vectorized<Half>::le(const Vectorized<Half>& other) const {
997
+ return (*this <= other) & Vectorized<Half>(1.0f);
998
+ }
999
+
1000
+ // frac. Implement this here so we can use subtraction
1001
+ inline Vectorized<Half> Vectorized<Half>::frac() const {
1002
+ return *this - this->trunc();
1003
+ }
1004
+
1005
+ // Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
1006
+ // either input is a NaN.
1007
+ template <>
1008
+ Vectorized<Half> inline maximum(const Vectorized<Half>& a, const Vectorized<Half>& b) {
1009
+ __m512 a_lo, a_hi;
1010
+ __m512 b_lo, b_hi;
1011
+ cvtfp16_fp32(__m512i(a), a_lo, a_hi);
1012
+ cvtfp16_fp32(__m512i(b), b_lo, b_hi);
1013
+ auto max_lo = _mm512_max_ps(a_lo, b_lo);
1014
+ auto max_hi = _mm512_max_ps(a_hi, b_hi);
1015
+ auto nan_lo_mask = _mm512_cmp_ps_mask(a_lo, b_lo, _CMP_UNORD_Q);
1016
+ auto nan_hi_mask = _mm512_cmp_ps_mask(a_hi, b_hi, _CMP_UNORD_Q);
1017
+ auto nan_lo = _mm512_castsi512_ps(_mm512_set1_epi32(nan_lo_mask));
1018
+ auto nan_hi = _mm512_castsi512_ps(_mm512_set1_epi32(nan_hi_mask));
1019
+ // Exploit the fact that all-ones is a NaN.
1020
+ auto o1 = _mm512_or_ps(max_lo, nan_lo);
1021
+ auto o2 = _mm512_or_ps(max_hi, nan_hi);
1022
+ return cvtfp32_fp16(o1, o2);
1023
+ }
1024
+
1025
+ // Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
1026
+ // either input is a NaN.
1027
+ template <>
1028
+ Vectorized<Half> inline minimum(const Vectorized<Half>& a, const Vectorized<Half>& b) {
1029
+ __m512 a_lo, a_hi;
1030
+ __m512 b_lo, b_hi;
1031
+ __m512i zero_vec = _mm512_set1_epi32(0);
1032
+ cvtfp16_fp32(__m512i(a), a_lo, a_hi);
1033
+ cvtfp16_fp32(__m512i(b), b_lo, b_hi);
1034
+ auto min_lo = _mm512_min_ps(a_lo, b_lo);
1035
+ auto min_hi = _mm512_min_ps(a_hi, b_hi);
1036
+ auto nan_lo_mask = _mm512_cmp_ps_mask(a_lo, b_lo, _CMP_UNORD_Q);
1037
+ auto nan_hi_mask = _mm512_cmp_ps_mask(a_hi, b_hi, _CMP_UNORD_Q);
1038
+ auto nan_lo = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, nan_lo_mask,
1039
+ 0xFFFFFFFF));
1040
+ auto nan_hi = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, nan_hi_mask,
1041
+ 0xFFFFFFFF));
1042
+ // Exploit the fact that all-ones is a NaN.
1043
+ auto o1 = _mm512_or_ps(min_lo, nan_lo);
1044
+ auto o2 = _mm512_or_ps(min_hi, nan_hi);
1045
+ return cvtfp32_fp16(o1, o2);
1046
+ }
1047
+
1048
+ template <>
1049
+ Vectorized<Half> inline clamp(const Vectorized<Half>& a,
1050
+ const Vectorized<Half>& min, const Vectorized<Half>& max) {
1051
+ __m512 a_lo, a_hi;
1052
+ __m512 min_lo, min_hi;
1053
+ __m512 max_lo, max_hi;
1054
+ cvtfp16_fp32(__m512i(a), a_lo, a_hi);
1055
+ cvtfp16_fp32(__m512i(min), min_lo, min_hi);
1056
+ cvtfp16_fp32(__m512i(max), max_lo, max_hi);
1057
+ auto o1 = _mm512_min_ps(max_lo, _mm512_max_ps(min_lo, a_lo));
1058
+ auto o2 = _mm512_min_ps(max_hi, _mm512_max_ps(min_hi, a_hi));
1059
+ return cvtfp32_fp16(o1, o2);
1060
+ }
1061
+
1062
+ template <>
1063
+ Vectorized<Half> inline clamp_max(const Vectorized<Half>& a, const Vectorized<Half>& max) {
1064
+ __m512 a_lo, a_hi;
1065
+ __m512 max_lo, max_hi;
1066
+ cvtfp16_fp32(__m512i(a), a_lo, a_hi);
1067
+ cvtfp16_fp32(__m512i(max), max_lo, max_hi);
1068
+ auto o1 = _mm512_min_ps(max_lo, a_lo);
1069
+ auto o2 = _mm512_min_ps(max_hi, a_hi);
1070
+ return cvtfp32_fp16(o1, o2);
1071
+ }
1072
+
1073
+ template <>
1074
+ Vectorized<Half> inline clamp_min(const Vectorized<Half>& a, const Vectorized<Half>& min) {
1075
+ __m512 a_lo, a_hi;
1076
+ __m512 min_lo, min_hi;
1077
+ cvtfp16_fp32(__m512i(a), a_lo, a_hi);
1078
+ cvtfp16_fp32(__m512i(min), min_lo, min_hi);
1079
+ auto o1 = _mm512_max_ps(min_lo, a_lo);
1080
+ auto o2 = _mm512_max_ps(min_hi, a_hi);
1081
+ return cvtfp32_fp16(o1, o2);
1082
+ }
1083
+
1084
+ template <>
1085
+ inline void convert(const Half* src, Half* dst, int64_t n) {
1086
+ int64_t i;
1087
+ #pragma unroll
1088
+ for (i = 0; i <= (n - Vectorized<Half>::size()); i += Vectorized<Half>::size()) {
1089
+ auto vsrc = _mm512_loadu_si512(reinterpret_cast<__m512i*>((void*)(src + i)));
1090
+ _mm512_storeu_si512(reinterpret_cast<__m512i*>((void*)(dst + i)), vsrc);
1091
+ }
1092
+ #pragma unroll
1093
+ for (; i < n; i++) {
1094
+ dst[i] = src[i];
1095
+ }
1096
+ }
1097
+
1098
+ template <>
1099
+ inline void convert(const float* src, Half* dst, int64_t n) {
1100
+ int64_t i;
1101
+ for (i = 0; i + Vectorized<Half>::size() <= n; i += Vectorized<Half>::size()) {
1102
+ __m512 a = _mm512_loadu_ps(&src[i]);
1103
+ __m512 b = _mm512_loadu_ps(&src[i + 16]);
1104
+
1105
+ __m512i bf = cvtfp32_fp16(a, b);
1106
+ _mm512_storeu_si512(reinterpret_cast<__m512i*>(&dst[i]), bf);
1107
+ }
1108
+ for (; i < n; i++) {
1109
+ dst[i] = c10::convert<Half>(src[i]);
1110
+ }
1111
+ }
1112
+
1113
+ template <>
1114
+ inline void convert(const double* src, Half* dst, int64_t n) {
1115
+ auto load_float = [](const double *src) -> __m512 {
1116
+ // Load one float vector from an array of doubles
1117
+ __m256 a = _mm512_cvtpd_ps(_mm512_loadu_pd(src));
1118
+ __m256 b = _mm512_cvtpd_ps(_mm512_loadu_pd(src + 8));
1119
+ return _mm512_insertf32x8(_mm512_castps256_ps512(a), b, 1);
1120
+ };
1121
+
1122
+ int64_t i;
1123
+ for (i = 0; i + Vectorized<Half>::size() <= n; i += Vectorized<Half>::size()) {
1124
+ __m512 a = load_float(&src[i]);
1125
+ __m512 b = load_float(&src[i + 16]);
1126
+
1127
+ __m512i bf = cvtfp32_fp16(a, b);
1128
+ _mm512_storeu_si512(reinterpret_cast<__m512i*>(&dst[i]), bf);
1129
+ }
1130
+ for (; i < n; i++) {
1131
+ dst[i] = c10::convert<Half>(src[i]);
1132
+ }
1133
+ }
1134
+
1135
+ template <>
1136
+ Vectorized<Half> inline fmadd(const Vectorized<Half>& a,
1137
+ const Vectorized<Half>& b, const Vectorized<Half>& c) {
1138
+ __m512 a_lo, a_hi;
1139
+ __m512 b_lo, b_hi;
1140
+ __m512 c_lo, c_hi;
1141
+ cvtfp16_fp32(__m512i(a), a_lo, a_hi);
1142
+ cvtfp16_fp32(__m512i(b), b_lo, b_hi);
1143
+ cvtfp16_fp32(__m512i(c), c_lo, c_hi);
1144
+ auto o1 = _mm512_fmadd_ps(a_lo, b_lo, c_lo);
1145
+ auto o2 = _mm512_fmadd_ps(a_hi, b_hi, c_hi);
1146
+ return cvtfp32_fp16(o1, o2);
1147
+ }
1148
+
1149
+ #define CONVERT_VECTORIZED_INIT(type, name) \
1150
+ inline std::tuple<Vectorized<float>, Vectorized<float>> convert_##name##_float(const Vectorized<type>& a) { \
1151
+ __m512 o1, o2; \
1152
+ cvt_to_fp32<type>(__m512i(a), o1, o2); \
1153
+ return std::make_tuple(o1, o2); \
1154
+ } \
1155
+ \
1156
+ inline Vectorized<type> convert_float_##name(const Vectorized<float>& a, const Vectorized<float>& b) { \
1157
+ return cvt_from_fp32<type>(__m512(a), __m512(b)); \
1158
+ }
1159
+ CONVERT_VECTORIZED_INIT(BFloat16, bfloat16);
1160
+ CONVERT_VECTORIZED_INIT(Half, half);
1161
+
1162
+ #else //defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER)
1163
+
1164
+ #define CONVERT_NON_VECTORIZED_INIT(type, name) \
1165
+ inline std::tuple<Vectorized<float>, Vectorized<float>> convert_##name##_float(const Vectorized<type>& a) { \
1166
+ constexpr int64_t K = Vectorized<type>::size(); \
1167
+ __at_align__ float arr[K]; \
1168
+ __at_align__ type arr2[K]; \
1169
+ a.store(arr2); \
1170
+ for (const auto k : c10::irange(K)) { \
1171
+ arr[k] = c10::convert<float>(arr2[k]); \
1172
+ } \
1173
+ return std::make_tuple( \
1174
+ Vectorized<float>::loadu(arr), \
1175
+ Vectorized<float>::loadu(arr + Vectorized<float>::size())); \
1176
+ } \
1177
+ \
1178
+ inline Vectorized<type> convert_float_##name(const Vectorized<float>& a, const Vectorized<float>& b) { \
1179
+ constexpr int64_t K = Vectorized<type>::size(); \
1180
+ __at_align__ float arr[K]; \
1181
+ __at_align__ type arr2[K]; \
1182
+ a.store(arr); \
1183
+ b.store(arr + Vectorized<float>::size()); \
1184
+ for (const auto k : c10::irange(K)) { \
1185
+ arr2[k] = c10::convert<type>(arr[k]); \
1186
+ } \
1187
+ return Vectorized<type>::loadu(arr2); \
1188
+ }
1189
+ CONVERT_NON_VECTORIZED_INIT(BFloat16, bfloat16);
1190
+ CONVERT_NON_VECTORIZED_INIT(Half, half);
1191
+
1192
+ #endif // defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER)
1193
+
1194
+ #if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER)
1195
+ #define LOAD_FP32_VECTORIZED_INIT(type, name) \
1196
+ inline void load_fp32_from_##name(const type *data, Vectorized<float>& out) { \
1197
+ auto values = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(data)); \
1198
+ __m512 out_values; \
1199
+ cvt_to_fp32<type>(values, out_values); \
1200
+ out = out_values; \
1201
+ } \
1202
+ \
1203
+ inline void load_fp32_from_##name(const type *data, Vectorized<float>& out1, Vectorized<float>& out2) { \
1204
+ auto vec = Vectorized<type>::loadu(data); \
1205
+ __m512 out1_values, out2_values; \
1206
+ cvt_to_fp32<type>(vec, out1_values, out2_values); \
1207
+ out1 = out1_values; \
1208
+ out2 = out2_values; \
1209
+ }
1210
+ LOAD_FP32_VECTORIZED_INIT(BFloat16, bf16);
1211
+ LOAD_FP32_VECTORIZED_INIT(Half, fp16);
1212
+
1213
+ #else // defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER)
1214
+ #define LOAD_FP32_NON_VECTORIZED_INIT(type, name) \
1215
+ inline void load_fp32_from_##name(const type *data, Vectorized<float>& out) { \
1216
+ __at_align__ float values[Vectorized<float>::size()]; \
1217
+ for (const auto k : c10::irange(Vectorized<float>::size())) { \
1218
+ values[k] = data[k]; \
1219
+ } \
1220
+ out = Vectorized<float>::loadu(values); \
1221
+ } \
1222
+ \
1223
+ inline void load_fp32_from_##name(const type *data, Vectorized<float>& out1, Vectorized<float>& out2) { \
1224
+ load_fp32_from_##name(data, out1); \
1225
+ data += Vectorized<float>::size(); \
1226
+ load_fp32_from_##name(data, out2); \
1227
+ }
1228
+ LOAD_FP32_NON_VECTORIZED_INIT(BFloat16, bf16);
1229
+ LOAD_FP32_NON_VECTORIZED_INIT(Half, fp16);
1230
+
1231
+ #endif
1232
+ }}}
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_complex_double.h ADDED
@@ -0,0 +1,512 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <c10/util/complex.h>
7
+ #include <c10/util/irange.h>
8
+ #include <ATen/cpu/vec/intrinsics.h>
9
+ #include <ATen/cpu/vec/vec_base.h>
10
+ #if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER)
11
+ #include <sleef.h>
12
+ #endif
13
+
14
+ namespace at {
15
+ namespace vec {
16
+ // See Note [CPU_CAPABILITY namespace]
17
+ inline namespace CPU_CAPABILITY {
18
+
19
+ #if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER)
20
+
21
+ template <> class Vectorized<c10::complex<double>> {
22
+ private:
23
+ __m512d values;
24
+ static constexpr __m512i zero_vector {0, 0, 0, 0, 0, 0, 0, 0};
25
+ public:
26
+ using value_type = c10::complex<double>;
27
+ using size_type = int;
28
+ static constexpr size_type size() {
29
+ return 4;
30
+ }
31
+ Vectorized() {}
32
+ Vectorized(__m512d v) : values(v) {}
33
+ Vectorized(c10::complex<double> val) {
34
+ double real_value = val.real();
35
+ double imag_value = val.imag();
36
+ values = _mm512_setr_pd(real_value, imag_value, real_value, imag_value,
37
+ real_value, imag_value, real_value, imag_value);
38
+ }
39
+ Vectorized(c10::complex<double> val1, c10::complex<double> val2,
40
+ c10::complex<double> val3, c10::complex<double> val4) {
41
+ values = _mm512_setr_pd(val1.real(), val1.imag(),
42
+ val2.real(), val2.imag(),
43
+ val3.real(), val3.imag(),
44
+ val4.real(), val4.imag());
45
+ }
46
+ operator __m512d() const {
47
+ return values;
48
+ }
49
+ template <int64_t mask>
50
+ static Vectorized<c10::complex<double>> blend(const Vectorized<c10::complex<double>>& a,
51
+ const Vectorized<c10::complex<double>>& b) {
52
+ // convert c10::complex<V> index mask to V index mask: xy -> xxyy
53
+ // NOLINTNEXTLINE(clang-diagnostic-warning)
54
+ switch (mask) {
55
+ case 0:
56
+ return a;
57
+ case 1:
58
+ return _mm512_mask_blend_pd(0x03, a.values, b.values); //b0000 0001 = b0000 0011
59
+ case 2:
60
+ return _mm512_mask_blend_pd(0x0C, a.values, b.values); //b0000 0010 = b0000 1100
61
+ case 3:
62
+ return _mm512_mask_blend_pd(0x0F, a.values, b.values); //b0000 0011 = b0000 1111
63
+ case 4:
64
+ return _mm512_mask_blend_pd(0x30, a.values, b.values); //b0000 0100 = b0011 0000
65
+ case 5:
66
+ return _mm512_mask_blend_pd(0x33, a.values, b.values); //b0000 0101 = b0011 0011
67
+ case 6:
68
+ return _mm512_mask_blend_pd(0x3C, a.values, b.values); //b0000 0110 = b0011 1100
69
+ case 7:
70
+ return _mm512_mask_blend_pd(0x3F, a.values, b.values); //b0000 0111 = b0011 1111
71
+ case 8:
72
+ return _mm512_mask_blend_pd(0xC0, a.values, b.values); //b0000 1000 = b1100 0000
73
+ case 9:
74
+ return _mm512_mask_blend_pd(0xC3, a.values, b.values); //b0000 1001 = b1100 0011
75
+ case 10:
76
+ return _mm512_mask_blend_pd(0xCC, a.values, b.values); //b0000 1010 = b1100 1100
77
+ case 11:
78
+ return _mm512_mask_blend_pd(0xCF, a.values, b.values); //b0000 1011 = b1100 1111
79
+ case 12:
80
+ return _mm512_mask_blend_pd(0xF0, a.values, b.values); //b0000 1100 = b1111 0000
81
+ case 13:
82
+ return _mm512_mask_blend_pd(0xF3, a.values, b.values); //b0000 1101 = b1111 0011
83
+ case 14:
84
+ return _mm512_mask_blend_pd(0xFC, a.values, b.values); //b0000 1110 = b1111 1100
85
+ case 15:
86
+ return _mm512_mask_blend_pd(0xFF, a.values, b.values); //b0000 1111 = b1111 1111
87
+ }
88
+ return b;
89
+ }
90
+ static Vectorized<c10::complex<double>> blendv(const Vectorized<c10::complex<double>>& a,
91
+ const Vectorized<c10::complex<double>>& b,
92
+ const Vectorized<c10::complex<double>>& mask) {
93
+ // convert c10::complex<V> index mask to V index mask: xy -> xxyy
94
+ auto mask_ = _mm512_unpacklo_pd(mask.values, mask.values);
95
+ auto all_ones = _mm512_set1_epi64(0xFFFFFFFFFFFFFFFF);
96
+ auto mmask = _mm512_cmp_epi64_mask(_mm512_castpd_si512(mask_), all_ones, _MM_CMPINT_EQ);
97
+ return _mm512_mask_blend_pd(mmask, a.values, b.values);
98
+ }
99
+ template<typename step_t>
100
+ static Vectorized<c10::complex<double>> arange(c10::complex<double> base = 0.,
101
+ step_t step = static_cast<step_t>(1)) {
102
+ return Vectorized<c10::complex<double>>(base,
103
+ base + c10::complex<double>(1)*step,
104
+ base + c10::complex<double>(2)*step,
105
+ base + c10::complex<double>(3)*step);
106
+ }
107
+ static Vectorized<c10::complex<double>> set(const Vectorized<c10::complex<double>>& a,
108
+ const Vectorized<c10::complex<double>>& b,
109
+ int64_t count = size()) {
110
+ switch (count) {
111
+ case 0:
112
+ return a;
113
+ case 1:
114
+ return blend<1>(a, b);
115
+ case 2:
116
+ return blend<3>(a, b);
117
+ case 3:
118
+ return blend<7>(a, b);
119
+ }
120
+ return b;
121
+ }
122
+ static Vectorized<c10::complex<double>> loadu(const void* ptr, int64_t count = size()) {
123
+ if (count == size())
124
+ return _mm512_loadu_pd(reinterpret_cast<const double*>(ptr));
125
+
126
+ __at_align__ double tmp_values[2*size()];
127
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
128
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
129
+ // instructions while a loop would be compiled to one instruction.
130
+ for (const auto i : c10::irange(2*size())) {
131
+ tmp_values[i] = 0.0;
132
+ }
133
+ std::memcpy(
134
+ tmp_values,
135
+ reinterpret_cast<const double*>(ptr),
136
+ count * sizeof(c10::complex<double>));
137
+ return _mm512_load_pd(tmp_values);
138
+ }
139
+ void store(void* ptr, int count = size()) const {
140
+ if (count == size()) {
141
+ _mm512_storeu_pd(reinterpret_cast<double*>(ptr), values);
142
+ } else if (count > 0) {
143
+ double tmp_values[2*size()];
144
+ _mm512_storeu_pd(reinterpret_cast<double*>(tmp_values), values);
145
+ std::memcpy(ptr, tmp_values, count * sizeof(c10::complex<double>));
146
+ }
147
+ }
148
+ const c10::complex<double>& operator[](int idx) const = delete;
149
+ c10::complex<double>& operator[](int idx) = delete;
150
+ Vectorized<c10::complex<double>> map(c10::complex<double> (*const f)(const c10::complex<double> &)) const {
151
+ __at_align__ c10::complex<double> tmp[size()];
152
+ store(tmp);
153
+ for (const auto i : c10::irange(size())) {
154
+ tmp[i] = f(tmp[i]);
155
+ }
156
+ return loadu(tmp);
157
+ }
158
+ // AVX512 doesn't have horizontal add & horizontal sub instructions.
159
+ // TODO: hadd_pd() & hsub_pd() may have scope for improvement.
160
+ static inline __m512d hadd_pd(__m512d a, __m512d b) {
161
+ __m512i idx1 = _mm512_set_epi64(14, 6, 12, 4, 10, 2, 8, 0);
162
+ __m512i idx2 = _mm512_set_epi64(15, 7, 13, 5, 11, 3, 9, 1);
163
+ return _mm512_add_pd(_mm512_mask_permutex2var_pd(a, 0xff, idx1, b),
164
+ _mm512_mask_permutex2var_pd(a, 0xff, idx2, b));
165
+ }
166
+ static inline __m512d hsub_pd(__m512d a, __m512d b) {
167
+ __m512i idx1 = _mm512_set_epi64(14, 6, 12, 4, 10, 2, 8, 0);
168
+ __m512i idx2 = _mm512_set_epi64(15, 7, 13, 5, 11, 3, 9, 1);
169
+ return _mm512_sub_pd(_mm512_mask_permutex2var_pd(a, 0xff, idx1, b),
170
+ _mm512_mask_permutex2var_pd(a, 0xff, idx2, b));
171
+ }
172
+ __m512d abs_2_() const {
173
+ auto val_2 = _mm512_mul_pd(values, values); // a*a b*b
174
+ return hadd_pd(val_2, val_2); // a*a+b*b a*a+b*b
175
+ }
176
+ __m512d abs_() const {
177
+ auto real = _mm512_movedup_pd(values); // real real
178
+ // movehdup_pd does not exist...
179
+ auto imag = _mm512_permute_pd(values, 0xff); // imag imag
180
+ return Sleef_hypotd8_u05(real, imag); // abs abs
181
+ }
182
+ Vectorized<c10::complex<double>> abs() const {
183
+ const __m512d real_mask = _mm512_castsi512_pd(_mm512_setr_epi64(0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
184
+ 0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
185
+ 0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
186
+ 0xFFFFFFFFFFFFFFFF, 0x0000000000000000));
187
+ return _mm512_and_pd(abs_(), real_mask); // abs 0
188
+ }
189
+ __m512d angle_() const {
190
+ //angle = atan2(b/a)
191
+ auto b_a = _mm512_permute_pd(values, 0x55); // b a
192
+ return Sleef_atan2d8_u10(values, b_a); // 90-angle angle
193
+ }
194
+ Vectorized<c10::complex<double>> angle() const {
195
+ const __m512d real_mask = _mm512_castsi512_pd(_mm512_setr_epi64(0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
196
+ 0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
197
+ 0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
198
+ 0xFFFFFFFFFFFFFFFF, 0x0000000000000000));
199
+ auto angle = _mm512_permute_pd(angle_(), 0x55); // angle 90-angle
200
+ return _mm512_and_pd(angle, real_mask); // angle 0
201
+ }
202
+ Vectorized<c10::complex<double>> sgn() const {
203
+ auto abs = abs_();
204
+ auto zero = _mm512_setzero_pd();
205
+ auto mask = _mm512_cmp_pd_mask(abs, zero, _CMP_EQ_OQ);
206
+ auto div = values / abs;
207
+ return _mm512_mask_blend_pd(mask, div, zero);
208
+ }
209
+ __m512d real_() const {
210
+ const __m512d real_mask = _mm512_castsi512_pd(_mm512_setr_epi64(0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
211
+ 0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
212
+ 0xFFFFFFFFFFFFFFFF, 0x0000000000000000,
213
+ 0xFFFFFFFFFFFFFFFF, 0x0000000000000000));
214
+ return _mm512_and_pd(values, real_mask);
215
+ }
216
+ Vectorized<c10::complex<double>> real() const {
217
+ return real_();
218
+ }
219
+ __m512d imag_() const {
220
+ const __m512d imag_mask = _mm512_castsi512_pd(_mm512_setr_epi64(0x0000000000000000, 0xFFFFFFFFFFFFFFFF,
221
+ 0x0000000000000000, 0xFFFFFFFFFFFFFFFF,
222
+ 0x0000000000000000, 0xFFFFFFFFFFFFFFFF,
223
+ 0x0000000000000000, 0xFFFFFFFFFFFFFFFF));
224
+ return _mm512_and_pd(values, imag_mask);
225
+ }
226
+ Vectorized<c10::complex<double>> imag() const {
227
+ return _mm512_permute_pd(imag_(), 0x55); //b a
228
+ }
229
+ __m512d conj_() const {
230
+ const __m512d sign_mask = _mm512_setr_pd(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0);
231
+ return _mm512_xor_pd(values, sign_mask); // a -b
232
+ }
233
+ Vectorized<c10::complex<double>> conj() const {
234
+ return conj_();
235
+ }
236
+ Vectorized<c10::complex<double>> log() const {
237
+ // Most trigonomic ops use the log() op to improve complex number performance.
238
+ return map(std::log);
239
+ }
240
+ Vectorized<c10::complex<double>> log2() const {
241
+ const __m512d log2_ = _mm512_set1_pd(std::log(2));
242
+ return _mm512_div_pd(log(), log2_);
243
+ }
244
+ Vectorized<c10::complex<double>> log10() const {
245
+ const __m512d log10_ = _mm512_set1_pd(std::log(10));
246
+ return _mm512_div_pd(log(), log10_);
247
+ }
248
+ Vectorized<c10::complex<double>> log1p() const {
249
+ return map(std::log1p);
250
+ }
251
+ Vectorized<c10::complex<double>> asin() const {
252
+ // asin(x)
253
+ // = -i*ln(iz + sqrt(1 -z^2))
254
+ // = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi)))
255
+ // = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi))
256
+ const __m512d one = _mm512_set1_pd(1);
257
+
258
+ auto conj = conj_();
259
+ auto b_a = _mm512_permute_pd(conj, 0x55); //-b a
260
+ auto ab = _mm512_mul_pd(conj, b_a); //-ab -ab
261
+ auto im = _mm512_add_pd(ab, ab); //-2ab -2ab
262
+
263
+ auto val_2 = _mm512_mul_pd(values, values); // a*a b*b
264
+ auto re = hsub_pd(val_2, _mm512_permute_pd(val_2, 0x55)); // a*a-b*b b*b-a*a
265
+ re = _mm512_sub_pd(one, re);
266
+
267
+ auto root = Vectorized(_mm512_mask_blend_pd(0xAA, re, im)).sqrt(); //sqrt(re + i*im)
268
+ auto ln = Vectorized(_mm512_add_pd(b_a, root)).log(); //ln(iz + sqrt())
269
+ return Vectorized(_mm512_permute_pd(ln.values, 0x55)).conj(); //-i*ln()
270
+ }
271
+ Vectorized<c10::complex<double>> acos() const {
272
+ // acos(x) = pi/2 - asin(x)
273
+ constexpr auto pi_2d = c10::pi<double> / 2;
274
+ const __m512d pi_2 = _mm512_setr_pd(pi_2d, 0.0, pi_2d, 0.0, pi_2d, 0.0, pi_2d, 0.0);
275
+ return _mm512_sub_pd(pi_2, asin());
276
+ }
277
+ Vectorized<c10::complex<double>> atan() const;
278
+ Vectorized<c10::complex<double>> atanh() const {
279
+ return map(std::atanh);
280
+ }
281
+ Vectorized<c10::complex<double>> exp() const {
282
+ //exp(a + bi)
283
+ // = exp(a)*(cos(b) + sin(b)i)
284
+ auto exp = Sleef_expd8_u10(values); //exp(a) exp(b)
285
+ exp = _mm512_mask_blend_pd(0xAA, exp, _mm512_permute_pd(exp, 0x55)); //exp(a) exp(a)
286
+
287
+ auto sin_cos = Sleef_sincosd8_u10(values); //[sin(a), cos(a)] [sin(b), cos(b)]
288
+ auto cos_sin = _mm512_mask_blend_pd(0xAA, _mm512_permute_pd(sin_cos.y, 0x55),
289
+ sin_cos.x); //cos(b) sin(b)
290
+ return _mm512_mul_pd(exp, cos_sin);
291
+ }
292
+ Vectorized<c10::complex<double>> exp2() const {
293
+ // Use identity 2**x = exp(log(2) * x)
294
+ const __m512d ln_2 = _mm512_set1_pd(c10::ln_2<double>);
295
+ Vectorized<c10::complex<double>> scaled_values = _mm512_mul_pd(values, ln_2);
296
+ return scaled_values.exp();
297
+ }
298
+ Vectorized<c10::complex<double>> expm1() const {
299
+ return map(std::expm1);
300
+ }
301
+ Vectorized<c10::complex<double>> sin() const {
302
+ return map(std::sin);
303
+ }
304
+ Vectorized<c10::complex<double>> sinh() const {
305
+ return map(std::sinh);
306
+ }
307
+ Vectorized<c10::complex<double>> cos() const {
308
+ return map(std::cos);
309
+ }
310
+ Vectorized<c10::complex<double>> cosh() const {
311
+ return map(std::cosh);
312
+ }
313
+ Vectorized<c10::complex<double>> ceil() const {
314
+ return _mm512_ceil_pd(values);
315
+ }
316
+ Vectorized<c10::complex<double>> floor() const {
317
+ return _mm512_floor_pd(values);
318
+ }
319
+ Vectorized<c10::complex<double>> neg() const {
320
+ auto zero = _mm512_setzero_pd();
321
+ return _mm512_sub_pd(zero, values);
322
+ }
323
+ Vectorized<c10::complex<double>> round() const {
324
+ return _mm512_roundscale_pd(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
325
+ }
326
+ Vectorized<c10::complex<double>> tan() const {
327
+ return map(std::tan);
328
+ }
329
+ Vectorized<c10::complex<double>> tanh() const {
330
+ return map(std::tanh);
331
+ }
332
+ Vectorized<c10::complex<double>> trunc() const {
333
+ return _mm512_roundscale_pd(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
334
+ }
335
+ Vectorized<c10::complex<double>> sqrt() const {
336
+ return map(std::sqrt);
337
+ }
338
+ Vectorized<c10::complex<double>> reciprocal() const;
339
+ Vectorized<c10::complex<double>> rsqrt() const {
340
+ return sqrt().reciprocal();
341
+ }
342
+ Vectorized<c10::complex<double>> pow(const Vectorized<c10::complex<double>> &exp) const {
343
+ __at_align__ c10::complex<double> x_tmp[size()];
344
+ __at_align__ c10::complex<double> y_tmp[size()];
345
+ store(x_tmp);
346
+ exp.store(y_tmp);
347
+ for (const auto i : c10::irange(size())) {
348
+ x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]);
349
+ }
350
+ return loadu(x_tmp);
351
+ }
352
+ // Comparison using the _CMP_**_OQ predicate.
353
+ // `O`: get false if an operand is NaN
354
+ // `Q`: do not raise if an operand is NaN
355
+ Vectorized<c10::complex<double>> operator==(const Vectorized<c10::complex<double>>& other) const {
356
+ auto mask = _mm512_cmp_pd_mask(values, other.values, _CMP_EQ_OQ);
357
+ return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, mask,
358
+ 0xFFFFFFFFFFFFFFFF));
359
+ }
360
+ Vectorized<c10::complex<double>> operator!=(const Vectorized<c10::complex<double>>& other) const {
361
+ auto mask = _mm512_cmp_pd_mask(values, other.values, _CMP_NEQ_UQ);
362
+ return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, mask,
363
+ 0xFFFFFFFFFFFFFFFF));
364
+ }
365
+ Vectorized<c10::complex<double>> operator<(const Vectorized<c10::complex<double>>& other) const {
366
+ TORCH_CHECK(false, "not supported for complex numbers");
367
+ }
368
+ Vectorized<c10::complex<double>> operator<=(const Vectorized<c10::complex<double>>& other) const {
369
+ TORCH_CHECK(false, "not supported for complex numbers");
370
+ }
371
+ Vectorized<c10::complex<double>> operator>(const Vectorized<c10::complex<double>>& other) const {
372
+ TORCH_CHECK(false, "not supported for complex numbers");
373
+ }
374
+ Vectorized<c10::complex<double>> operator>=(const Vectorized<c10::complex<double>>& other) const {
375
+ TORCH_CHECK(false, "not supported for complex numbers");
376
+ }
377
+
378
+ Vectorized<c10::complex<double>> eq(const Vectorized<c10::complex<double>>& other) const;
379
+ Vectorized<c10::complex<double>> ne(const Vectorized<c10::complex<double>>& other) const;
380
+ };
381
+
382
+ template <> Vectorized<c10::complex<double>> inline operator+(const Vectorized<c10::complex<double>> &a,
383
+ const Vectorized<c10::complex<double>> &b) {
384
+ return _mm512_add_pd(a, b);
385
+ }
386
+
387
+ template <> Vectorized<c10::complex<double>> inline operator-(const Vectorized<c10::complex<double>> &a,
388
+ const Vectorized<c10::complex<double>> &b) {
389
+ return _mm512_sub_pd(a, b);
390
+ }
391
+
392
+ template <> Vectorized<c10::complex<double>> inline operator*(const Vectorized<c10::complex<double>> &a,
393
+ const Vectorized<c10::complex<double>> &b) {
394
+ //(a + bi) * (c + di) = (ac - bd) + (ad + bc)i
395
+ const __m512d sign_mask = _mm512_setr_pd(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0);
396
+ auto ac_bd = _mm512_mul_pd(a, b); //ac bd
397
+
398
+ auto d_c = _mm512_permute_pd(b, 0x55); //d c
399
+ d_c = _mm512_xor_pd(sign_mask, d_c); //d -c
400
+ auto ad_bc = _mm512_mul_pd(a, d_c); //ad -bc
401
+
402
+ auto ret = Vectorized<c10::complex<double>>::hsub_pd(ac_bd, ad_bc); //ac - bd ad + bc
403
+ return ret;
404
+ }
405
+
406
+ template <> Vectorized<c10::complex<double>> inline operator/(const Vectorized<c10::complex<double>> &a,
407
+ const Vectorized<c10::complex<double>> &b) {
408
+ //re + im*i = (a + bi) / (c + di)
409
+ auto mask = _mm512_set1_pd(-0.f);
410
+ auto fabs_cd = _mm512_andnot_pd(mask, b); // |c| |d|
411
+ auto fabs_dc = _mm512_permute_pd(fabs_cd, 0x55); // |d| |c|
412
+ auto scale = _mm512_rcp14_pd(_mm512_max_pd(fabs_cd, fabs_dc)); // 1/sc 1/sc
413
+ auto a2 = _mm512_mul_pd(a, scale); // a/sc b/sc
414
+ auto b2 = _mm512_mul_pd(b, scale); // c/sc d/sc
415
+ auto acbd2 = _mm512_mul_pd(a2, b2);
416
+
417
+ const __m512d sign_mask = _mm512_setr_pd(-0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0);
418
+ auto dc2 = _mm512_permute_pd(b2, 0x55); // d/sc c/sc
419
+ dc2 = _mm512_xor_pd(sign_mask, dc2); // -d/|c,d| c/sc
420
+ auto adbc2 = _mm512_mul_pd(a2, dc2); //-ad/sc^2 bc/sc^2
421
+ auto res2 = Vectorized<c10::complex<double>>::hadd_pd(acbd2, adbc2); //(ac+bd)/sc^2 (bc-ad)/sc^2
422
+
423
+ // get the denominator
424
+ auto denom2 = Vectorized<c10::complex<double>>(b2).abs_2_(); // (c^2+d^2)/sc^2 (c^2+d^2)/sc^2
425
+ res2 = _mm512_div_pd(res2, denom2);
426
+ return res2;
427
+ }
428
+
429
+ // reciprocal. Implement this here so we can use multiplication.
430
+ inline Vectorized<c10::complex<double>> Vectorized<c10::complex<double>>::reciprocal() const{
431
+ //re + im*i = (a + bi) / (c + di)
432
+ //re = (ac + bd)/abs_2() = c/abs_2()
433
+ //im = (bc - ad)/abs_2() = d/abs_2()
434
+ const __m512d sign_mask = _mm512_setr_pd(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0);
435
+ auto c_d = _mm512_xor_pd(sign_mask, values); //c -d
436
+ return _mm512_div_pd(c_d, abs_2_());
437
+ }
438
+
439
+ inline Vectorized<c10::complex<double>> Vectorized<c10::complex<double>>::atan() const {
440
+ // atan(x) = i/2 * ln((i + z)/(i - z))
441
+ const __m512d i = _mm512_setr_pd(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0);
442
+ const Vectorized i_half = _mm512_setr_pd(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5);
443
+
444
+ auto sum = Vectorized(_mm512_add_pd(i, values)); // a 1+b
445
+ auto sub = Vectorized(_mm512_sub_pd(i, values)); // -a 1-b
446
+ auto ln = (sum/sub).log(); // ln((i + z)/(i - z))
447
+ return i_half*ln; // i/2*ln()
448
+ }
449
+
450
+ template <>
451
+ Vectorized<c10::complex<double>> inline maximum(const Vectorized<c10::complex<double>>& a,
452
+ const Vectorized<c10::complex<double>>& b) {
453
+ auto zero_vec = _mm512_set1_epi64(0);
454
+ auto abs_a = a.abs_2_();
455
+ auto abs_b = b.abs_2_();
456
+ auto mask = _mm512_cmp_pd_mask(abs_a, abs_b, _CMP_LT_OQ);
457
+ auto max = _mm512_mask_blend_pd(mask, a, b);
458
+ // Exploit the fact that all-ones is a NaN.
459
+ auto isnan_mask = _mm512_cmp_pd_mask(abs_a, abs_b, _CMP_UNORD_Q);
460
+ auto isnan = _mm512_mask_set1_epi64(zero_vec, isnan_mask,
461
+ 0xFFFFFFFFFFFFFFFF);
462
+ return _mm512_or_pd(max, _mm512_castsi512_pd(isnan));
463
+ }
464
+
465
+ template <>
466
+ Vectorized<c10::complex<double>> inline minimum(const Vectorized<c10::complex<double>>& a,
467
+ const Vectorized<c10::complex<double>>& b) {
468
+ auto zero_vec = _mm512_set1_epi64(0);
469
+ auto abs_a = a.abs_2_();
470
+ auto abs_b = b.abs_2_();
471
+ auto mask = _mm512_cmp_pd_mask(abs_a, abs_b, _CMP_GT_OQ);
472
+ auto min = _mm512_mask_blend_pd(mask, a, b);
473
+ // Exploit the fact that all-ones is a NaN.
474
+ auto isnan_mask = _mm512_cmp_pd_mask(abs_a, abs_b, _CMP_UNORD_Q);
475
+ auto isnan = _mm512_mask_set1_epi64(zero_vec, isnan_mask,
476
+ 0xFFFFFFFFFFFFFFFF);
477
+ return _mm512_or_pd(min, _mm512_castsi512_pd(isnan));
478
+ }
479
+
480
+ template <>
481
+ Vectorized<c10::complex<double>> inline operator&(const Vectorized<c10::complex<double>>& a,
482
+ const Vectorized<c10::complex<double>>& b) {
483
+ return _mm512_and_pd(a, b);
484
+ }
485
+
486
+ template <>
487
+ Vectorized<c10::complex<double>> inline operator|(const Vectorized<c10::complex<double>>& a,
488
+ const Vectorized<c10::complex<double>>& b) {
489
+ return _mm512_or_pd(a, b);
490
+ }
491
+
492
+ template <>
493
+ Vectorized<c10::complex<double>> inline operator^(const Vectorized<c10::complex<double>>& a,
494
+ const Vectorized<c10::complex<double>>& b) {
495
+ return _mm512_xor_pd(a, b);
496
+ }
497
+
498
+ inline Vectorized<c10::complex<double>> Vectorized<c10::complex<double>>::eq(const Vectorized<c10::complex<double>>& other) const {
499
+ auto eq = (*this == other); // compares real and imag individually
500
+ // If both real numbers and imag numbers are equal, then the complex numbers are equal
501
+ return (eq.real() & eq.imag()) & Vectorized<c10::complex<double>>(_mm512_set1_pd(1.0));
502
+ }
503
+
504
+ inline Vectorized<c10::complex<double>> Vectorized<c10::complex<double>>::ne(const Vectorized<c10::complex<double>>& other) const {
505
+ auto ne = (*this != other); // compares real and imag individually
506
+ // If either real numbers or imag numbers are not equal, then the complex numbers are not equal
507
+ return (ne.real() | ne.imag()) & Vectorized<c10::complex<double>>(_mm512_set1_pd(1.0));
508
+ }
509
+
510
+ #endif
511
+
512
+ }}}
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_complex_float.h ADDED
@@ -0,0 +1,1018 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <c10/util/complex.h>
7
+ #include <c10/util/irange.h>
8
+ #include <ATen/cpu/vec/intrinsics.h>
9
+ #include <ATen/cpu/vec/vec_base.h>
10
+ #if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER)
11
+ #include <sleef.h>
12
+ #endif
13
+
14
+ namespace at {
15
+ namespace vec {
16
+ // See Note [CPU_CAPABILITY namespace]
17
+ inline namespace CPU_CAPABILITY {
18
+
19
+ #if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER)
20
+
21
+ template <> class Vectorized<c10::complex<float>> {
22
+ private:
23
+ __m512 values;
24
+ static constexpr __m512i zero_vector {0, 0, 0, 0, 0, 0, 0, 0};
25
+ public:
26
+ using value_type = c10::complex<float>;
27
+ using size_type = int;
28
+ static constexpr size_type size() {
29
+ return 8;
30
+ }
31
+ Vectorized() {}
32
+ Vectorized(__m512 v) : values(v) {}
33
+ Vectorized(c10::complex<float> val) {
34
+ float real_value = val.real();
35
+ float imag_value = val.imag();
36
+ values = _mm512_setr_ps(real_value, imag_value,
37
+ real_value, imag_value,
38
+ real_value, imag_value,
39
+ real_value, imag_value,
40
+ real_value, imag_value,
41
+ real_value, imag_value,
42
+ real_value, imag_value,
43
+ real_value, imag_value);
44
+ }
45
+ Vectorized(c10::complex<float> val1, c10::complex<float> val2,
46
+ c10::complex<float> val3, c10::complex<float> val4,
47
+ c10::complex<float> val5, c10::complex<float> val6,
48
+ c10::complex<float> val7, c10::complex<float> val8) {
49
+ values = _mm512_setr_ps(val1.real(), val1.imag(),
50
+ val2.real(), val2.imag(),
51
+ val3.real(), val3.imag(),
52
+ val4.real(), val4.imag(),
53
+ val5.real(), val5.imag(),
54
+ val6.real(), val6.imag(),
55
+ val7.real(), val7.imag(),
56
+ val8.real(), val8.imag());
57
+ }
58
+ operator __m512() const {
59
+ return values;
60
+ }
61
+ template <int64_t mask>
62
+ static Vectorized<c10::complex<float>> blend(const Vectorized<c10::complex<float>>& a,
63
+ const Vectorized<c10::complex<float>>& b) {
64
+ // convert c10::complex<V> index mask to V index mask: xy -> xxyy
65
+ static_assert(mask > -1 && mask < 256, "Unexpected mask value");
66
+ // The compiler would hopefully convert this switch condition
67
+ // into a jump table
68
+ switch (mask) {
69
+ case 0:
70
+ return a;
71
+ case 1:
72
+ return _mm512_mask_blend_ps(0x03, a.values, b.values);
73
+ case 2:
74
+ return _mm512_mask_blend_ps(0x0C, a.values, b.values);
75
+ case 3:
76
+ return _mm512_mask_blend_ps(0x0F, a.values, b.values);
77
+ case 4:
78
+ return _mm512_mask_blend_ps(0x30, a.values, b.values);
79
+ case 5:
80
+ return _mm512_mask_blend_ps(0x33, a.values, b.values);
81
+ case 6:
82
+ return _mm512_mask_blend_ps(0x3C, a.values, b.values);
83
+ case 7:
84
+ return _mm512_mask_blend_ps(0x3F, a.values, b.values);
85
+ case 8:
86
+ return _mm512_mask_blend_ps(0xC0, a.values, b.values);
87
+ case 9:
88
+ return _mm512_mask_blend_ps(0xC3, a.values, b.values);
89
+ case 10:
90
+ return _mm512_mask_blend_ps(0xCC, a.values, b.values);
91
+ case 11:
92
+ return _mm512_mask_blend_ps(0xCF, a.values, b.values);
93
+ case 12:
94
+ return _mm512_mask_blend_ps(0xF0, a.values, b.values);
95
+ case 13:
96
+ return _mm512_mask_blend_ps(0xF3, a.values, b.values);
97
+ case 14:
98
+ return _mm512_mask_blend_ps(0xFC, a.values, b.values);
99
+ case 15:
100
+ return _mm512_mask_blend_ps(0xFF, a.values, b.values);
101
+ case 16:
102
+ return _mm512_mask_blend_ps(0x300, a.values, b.values);
103
+ case 17:
104
+ return _mm512_mask_blend_ps(0x303, a.values, b.values);
105
+ case 18:
106
+ return _mm512_mask_blend_ps(0x30C, a.values, b.values);
107
+ case 19:
108
+ return _mm512_mask_blend_ps(0x30F, a.values, b.values);
109
+ case 20:
110
+ return _mm512_mask_blend_ps(0x330, a.values, b.values);
111
+ case 21:
112
+ return _mm512_mask_blend_ps(0x333, a.values, b.values);
113
+ case 22:
114
+ return _mm512_mask_blend_ps(0x33C, a.values, b.values);
115
+ case 23:
116
+ return _mm512_mask_blend_ps(0x33F, a.values, b.values);
117
+ case 24:
118
+ return _mm512_mask_blend_ps(0x3C0, a.values, b.values);
119
+ case 25:
120
+ return _mm512_mask_blend_ps(0x3C3, a.values, b.values);
121
+ case 26:
122
+ return _mm512_mask_blend_ps(0x3CC, a.values, b.values);
123
+ case 27:
124
+ return _mm512_mask_blend_ps(0x3CF, a.values, b.values);
125
+ case 28:
126
+ return _mm512_mask_blend_ps(0x3F0, a.values, b.values);
127
+ case 29:
128
+ return _mm512_mask_blend_ps(0x3F3, a.values, b.values);
129
+ case 30:
130
+ return _mm512_mask_blend_ps(0x3FC, a.values, b.values);
131
+ case 31:
132
+ return _mm512_mask_blend_ps(0x3FF, a.values, b.values);
133
+ case 32:
134
+ return _mm512_mask_blend_ps(0xC00, a.values, b.values);
135
+ case 33:
136
+ return _mm512_mask_blend_ps(0xC03, a.values, b.values);
137
+ case 34:
138
+ return _mm512_mask_blend_ps(0xC0C, a.values, b.values);
139
+ case 35:
140
+ return _mm512_mask_blend_ps(0xC0F, a.values, b.values);
141
+ case 36:
142
+ return _mm512_mask_blend_ps(0xC30, a.values, b.values);
143
+ case 37:
144
+ return _mm512_mask_blend_ps(0xC33, a.values, b.values);
145
+ case 38:
146
+ return _mm512_mask_blend_ps(0xC3C, a.values, b.values);
147
+ case 39:
148
+ return _mm512_mask_blend_ps(0xC3F, a.values, b.values);
149
+ case 40:
150
+ return _mm512_mask_blend_ps(0xCC0, a.values, b.values);
151
+ case 41:
152
+ return _mm512_mask_blend_ps(0xCC3, a.values, b.values);
153
+ case 42:
154
+ return _mm512_mask_blend_ps(0xCCC, a.values, b.values);
155
+ case 43:
156
+ return _mm512_mask_blend_ps(0xCCF, a.values, b.values);
157
+ case 44:
158
+ return _mm512_mask_blend_ps(0xCF0, a.values, b.values);
159
+ case 45:
160
+ return _mm512_mask_blend_ps(0xCF3, a.values, b.values);
161
+ case 46:
162
+ return _mm512_mask_blend_ps(0xCFC, a.values, b.values);
163
+ case 47:
164
+ return _mm512_mask_blend_ps(0xCFF, a.values, b.values);
165
+ case 48:
166
+ return _mm512_mask_blend_ps(0xF00, a.values, b.values);
167
+ case 49:
168
+ return _mm512_mask_blend_ps(0xF03, a.values, b.values);
169
+ case 50:
170
+ return _mm512_mask_blend_ps(0xF0C, a.values, b.values);
171
+ case 51:
172
+ return _mm512_mask_blend_ps(0xF0F, a.values, b.values);
173
+ case 52:
174
+ return _mm512_mask_blend_ps(0xF30, a.values, b.values);
175
+ case 53:
176
+ return _mm512_mask_blend_ps(0xF33, a.values, b.values);
177
+ case 54:
178
+ return _mm512_mask_blend_ps(0xF3C, a.values, b.values);
179
+ case 55:
180
+ return _mm512_mask_blend_ps(0xF3F, a.values, b.values);
181
+ case 56:
182
+ return _mm512_mask_blend_ps(0xFC0, a.values, b.values);
183
+ case 57:
184
+ return _mm512_mask_blend_ps(0xFC3, a.values, b.values);
185
+ case 58:
186
+ return _mm512_mask_blend_ps(0xFCC, a.values, b.values);
187
+ case 59:
188
+ return _mm512_mask_blend_ps(0xFCF, a.values, b.values);
189
+ case 60:
190
+ return _mm512_mask_blend_ps(0xFF0, a.values, b.values);
191
+ case 61:
192
+ return _mm512_mask_blend_ps(0xFF3, a.values, b.values);
193
+ case 62:
194
+ return _mm512_mask_blend_ps(0xFFC, a.values, b.values);
195
+ case 63:
196
+ return _mm512_mask_blend_ps(0xFFF, a.values, b.values);
197
+ case 64:
198
+ return _mm512_mask_blend_ps(0x3000, a.values, b.values);
199
+ case 65:
200
+ return _mm512_mask_blend_ps(0x3003, a.values, b.values);
201
+ case 66:
202
+ return _mm512_mask_blend_ps(0x300C, a.values, b.values);
203
+ case 67:
204
+ return _mm512_mask_blend_ps(0x300F, a.values, b.values);
205
+ case 68:
206
+ return _mm512_mask_blend_ps(0x3030, a.values, b.values);
207
+ case 69:
208
+ return _mm512_mask_blend_ps(0x3033, a.values, b.values);
209
+ case 70:
210
+ return _mm512_mask_blend_ps(0x303C, a.values, b.values);
211
+ case 71:
212
+ return _mm512_mask_blend_ps(0x303F, a.values, b.values);
213
+ case 72:
214
+ return _mm512_mask_blend_ps(0x30C0, a.values, b.values);
215
+ case 73:
216
+ return _mm512_mask_blend_ps(0X30C3, a.values, b.values);
217
+ case 74:
218
+ return _mm512_mask_blend_ps(0x30CC, a.values, b.values);
219
+ case 75:
220
+ return _mm512_mask_blend_ps(0x30CF, a.values, b.values);
221
+ case 76:
222
+ return _mm512_mask_blend_ps(0x30F0, a.values, b.values);
223
+ case 77:
224
+ return _mm512_mask_blend_ps(0x30F3, a.values, b.values);
225
+ case 78:
226
+ return _mm512_mask_blend_ps(0x30FC, a.values, b.values);
227
+ case 79:
228
+ return _mm512_mask_blend_ps(0x30FF, a.values, b.values);
229
+ case 80:
230
+ return _mm512_mask_blend_ps(0x3300, a.values, b.values);
231
+ case 81:
232
+ return _mm512_mask_blend_ps(0X3303, a.values, b.values);
233
+ case 82:
234
+ return _mm512_mask_blend_ps(0x330C, a.values, b.values);
235
+ case 83:
236
+ return _mm512_mask_blend_ps(0x330F, a.values, b.values);
237
+ case 84:
238
+ return _mm512_mask_blend_ps(0x3330, a.values, b.values);
239
+ case 85:
240
+ return _mm512_mask_blend_ps(0x3333, a.values, b.values);
241
+ case 86:
242
+ return _mm512_mask_blend_ps(0x333C, a.values, b.values);
243
+ case 87:
244
+ return _mm512_mask_blend_ps(0X333F, a.values, b.values);
245
+ case 88:
246
+ return _mm512_mask_blend_ps(0x33C0, a.values, b.values);
247
+ case 89:
248
+ return _mm512_mask_blend_ps(0x33C3, a.values, b.values);
249
+ case 90:
250
+ return _mm512_mask_blend_ps(0x33CC, a.values, b.values);
251
+ case 91:
252
+ return _mm512_mask_blend_ps(0x33CF, a.values, b.values);
253
+ case 92:
254
+ return _mm512_mask_blend_ps(0x33F0, a.values, b.values);
255
+ case 93:
256
+ return _mm512_mask_blend_ps(0x33F3, a.values, b.values);
257
+ case 94:
258
+ return _mm512_mask_blend_ps(0x33FC, a.values, b.values);
259
+ case 95:
260
+ return _mm512_mask_blend_ps(0x33FF, a.values, b.values);
261
+ case 96:
262
+ return _mm512_mask_blend_ps(0X3C00, a.values, b.values);
263
+ case 97:
264
+ return _mm512_mask_blend_ps(0x3C03, a.values, b.values);
265
+ case 98:
266
+ return _mm512_mask_blend_ps(0x3C0C, a.values, b.values);
267
+ case 99:
268
+ return _mm512_mask_blend_ps(0x3C0F, a.values, b.values);
269
+ case 100:
270
+ return _mm512_mask_blend_ps(0x3C30, a.values, b.values);
271
+ case 101:
272
+ return _mm512_mask_blend_ps(0x3C33, a.values, b.values);
273
+ case 102:
274
+ return _mm512_mask_blend_ps(0x3C3C, a.values, b.values);
275
+ case 103:
276
+ return _mm512_mask_blend_ps(0x3C3F, a.values, b.values);
277
+ case 104:
278
+ return _mm512_mask_blend_ps(0x3CC0, a.values, b.values);
279
+ case 105:
280
+ return _mm512_mask_blend_ps(0x3CC3, a.values, b.values);
281
+ case 106:
282
+ return _mm512_mask_blend_ps(0x3CCC, a.values, b.values);
283
+ case 107:
284
+ return _mm512_mask_blend_ps(0x3CCF, a.values, b.values);
285
+ case 108:
286
+ return _mm512_mask_blend_ps(0x3CF0, a.values, b.values);
287
+ case 109:
288
+ return _mm512_mask_blend_ps(0x3CF3, a.values, b.values);
289
+ case 110:
290
+ return _mm512_mask_blend_ps(0x3CFC, a.values, b.values);
291
+ case 111:
292
+ return _mm512_mask_blend_ps(0x3CFF, a.values, b.values);
293
+ case 112:
294
+ return _mm512_mask_blend_ps(0x3F00, a.values, b.values);
295
+ case 113:
296
+ return _mm512_mask_blend_ps(0x3F03, a.values, b.values);
297
+ case 114:
298
+ return _mm512_mask_blend_ps(0x3F0C, a.values, b.values);
299
+ case 115:
300
+ return _mm512_mask_blend_ps(0x3F0F, a.values, b.values);
301
+ case 116:
302
+ return _mm512_mask_blend_ps(0x3F30, a.values, b.values);
303
+ case 117:
304
+ return _mm512_mask_blend_ps(0x3F33, a.values, b.values);
305
+ case 118:
306
+ return _mm512_mask_blend_ps(0x3F3C, a.values, b.values);
307
+ case 119:
308
+ return _mm512_mask_blend_ps(0x3F3F, a.values, b.values);
309
+ case 120:
310
+ return _mm512_mask_blend_ps(0x3FC0, a.values, b.values);
311
+ case 121:
312
+ return _mm512_mask_blend_ps(0x3FC3, a.values, b.values);
313
+ case 122:
314
+ return _mm512_mask_blend_ps(0x3FCC, a.values, b.values);
315
+ case 123:
316
+ return _mm512_mask_blend_ps(0x3FCF, a.values, b.values);
317
+ case 124:
318
+ return _mm512_mask_blend_ps(0x3FF0, a.values, b.values);
319
+ case 125:
320
+ return _mm512_mask_blend_ps(0x3FF3, a.values, b.values);
321
+ case 126:
322
+ return _mm512_mask_blend_ps(0x3FFC, a.values, b.values);
323
+ case 127:
324
+ return _mm512_mask_blend_ps(0x3FFF, a.values, b.values);
325
+ case 128:
326
+ return _mm512_mask_blend_ps(0xC000, a.values, b.values);
327
+ case 129:
328
+ return _mm512_mask_blend_ps(0xC003, a.values, b.values);
329
+ case 130:
330
+ return _mm512_mask_blend_ps(0xC00C, a.values, b.values);
331
+ case 131:
332
+ return _mm512_mask_blend_ps(0xC00F, a.values, b.values);
333
+ case 132:
334
+ return _mm512_mask_blend_ps(0xC030, a.values, b.values);
335
+ case 133:
336
+ return _mm512_mask_blend_ps(0xC033, a.values, b.values);
337
+ case 134:
338
+ return _mm512_mask_blend_ps(0xC03C, a.values, b.values);
339
+ case 135:
340
+ return _mm512_mask_blend_ps(0xC03F, a.values, b.values);
341
+ case 136:
342
+ return _mm512_mask_blend_ps(0xC0C0, a.values, b.values);
343
+ case 137:
344
+ return _mm512_mask_blend_ps(0xC0C3, a.values, b.values);
345
+ case 138:
346
+ return _mm512_mask_blend_ps(0xC0CC, a.values, b.values);
347
+ case 139:
348
+ return _mm512_mask_blend_ps(0xC0CF, a.values, b.values);
349
+ case 140:
350
+ return _mm512_mask_blend_ps(0xC0F0, a.values, b.values);
351
+ case 141:
352
+ return _mm512_mask_blend_ps(0xC0F3, a.values, b.values);
353
+ case 142:
354
+ return _mm512_mask_blend_ps(0xC0FC, a.values, b.values);
355
+ case 143:
356
+ return _mm512_mask_blend_ps(0xC0FF, a.values, b.values);
357
+ case 144:
358
+ return _mm512_mask_blend_ps(0xC300, a.values, b.values);
359
+ case 145:
360
+ return _mm512_mask_blend_ps(0xC303, a.values, b.values);
361
+ case 146:
362
+ return _mm512_mask_blend_ps(0xC30C, a.values, b.values);
363
+ case 147:
364
+ return _mm512_mask_blend_ps(0xC30F, a.values, b.values);
365
+ case 148:
366
+ return _mm512_mask_blend_ps(0xC330, a.values, b.values);
367
+ case 149:
368
+ return _mm512_mask_blend_ps(0xC333, a.values, b.values);
369
+ case 150:
370
+ return _mm512_mask_blend_ps(0xC33C, a.values, b.values);
371
+ case 151:
372
+ return _mm512_mask_blend_ps(0xC33F, a.values, b.values);
373
+ case 152:
374
+ return _mm512_mask_blend_ps(0xC3C0, a.values, b.values);
375
+ case 153:
376
+ return _mm512_mask_blend_ps(0xC3C3, a.values, b.values);
377
+ case 154:
378
+ return _mm512_mask_blend_ps(0xC3CC, a.values, b.values);
379
+ case 155:
380
+ return _mm512_mask_blend_ps(0xC3CF, a.values, b.values);
381
+ case 156:
382
+ return _mm512_mask_blend_ps(0xC3F0, a.values, b.values);
383
+ case 157:
384
+ return _mm512_mask_blend_ps(0xC3F3, a.values, b.values);
385
+ case 158:
386
+ return _mm512_mask_blend_ps(0xC3FC, a.values, b.values);
387
+ case 159:
388
+ return _mm512_mask_blend_ps(0xC3FF, a.values, b.values);
389
+ case 160:
390
+ return _mm512_mask_blend_ps(0xCC00, a.values, b.values);
391
+ case 161:
392
+ return _mm512_mask_blend_ps(0xCC03, a.values, b.values);
393
+ case 162:
394
+ return _mm512_mask_blend_ps(0xCC0C, a.values, b.values);
395
+ case 163:
396
+ return _mm512_mask_blend_ps(0xCC0F, a.values, b.values);
397
+ case 164:
398
+ return _mm512_mask_blend_ps(0xCC30, a.values, b.values);
399
+ case 165:
400
+ return _mm512_mask_blend_ps(0xCC33, a.values, b.values);
401
+ case 166:
402
+ return _mm512_mask_blend_ps(0xCC3C, a.values, b.values);
403
+ case 167:
404
+ return _mm512_mask_blend_ps(0xCC3F, a.values, b.values);
405
+ case 168:
406
+ return _mm512_mask_blend_ps(0xCCC0, a.values, b.values);
407
+ case 169:
408
+ return _mm512_mask_blend_ps(0xCCC3, a.values, b.values);
409
+ case 170:
410
+ return _mm512_mask_blend_ps(0xCCCC, a.values, b.values);
411
+ case 171:
412
+ return _mm512_mask_blend_ps(0xCCCF, a.values, b.values);
413
+ case 172:
414
+ return _mm512_mask_blend_ps(0xCCF0, a.values, b.values);
415
+ case 173:
416
+ return _mm512_mask_blend_ps(0xCCF3, a.values, b.values);
417
+ case 174:
418
+ return _mm512_mask_blend_ps(0xCCFC, a.values, b.values);
419
+ case 175:
420
+ return _mm512_mask_blend_ps(0xCCFF, a.values, b.values);
421
+ case 176:
422
+ return _mm512_mask_blend_ps(0xCF00, a.values, b.values);
423
+ case 177:
424
+ return _mm512_mask_blend_ps(0xCF03, a.values, b.values);
425
+ case 178:
426
+ return _mm512_mask_blend_ps(0xCF0C, a.values, b.values);
427
+ case 179:
428
+ return _mm512_mask_blend_ps(0xCF0F, a.values, b.values);
429
+ case 180:
430
+ return _mm512_mask_blend_ps(0xCF30, a.values, b.values);
431
+ case 181:
432
+ return _mm512_mask_blend_ps(0xCF33, a.values, b.values);
433
+ case 182:
434
+ return _mm512_mask_blend_ps(0xCF3C, a.values, b.values);
435
+ case 183:
436
+ return _mm512_mask_blend_ps(0xCF3F, a.values, b.values);
437
+ case 184:
438
+ return _mm512_mask_blend_ps(0xCFC0, a.values, b.values);
439
+ case 185:
440
+ return _mm512_mask_blend_ps(0xCFC3, a.values, b.values);
441
+ case 186:
442
+ return _mm512_mask_blend_ps(0xCFCC, a.values, b.values);
443
+ case 187:
444
+ return _mm512_mask_blend_ps(0xCFCF, a.values, b.values);
445
+ case 188:
446
+ return _mm512_mask_blend_ps(0xCFF0, a.values, b.values);
447
+ case 189:
448
+ return _mm512_mask_blend_ps(0xCFF3, a.values, b.values);
449
+ case 190:
450
+ return _mm512_mask_blend_ps(0xCFFC, a.values, b.values);
451
+ case 191:
452
+ return _mm512_mask_blend_ps(0xCFFF, a.values, b.values);
453
+ case 192:
454
+ return _mm512_mask_blend_ps(0xF000, a.values, b.values);
455
+ case 193:
456
+ return _mm512_mask_blend_ps(0xF003, a.values, b.values);
457
+ case 194:
458
+ return _mm512_mask_blend_ps(0xF00C, a.values, b.values);
459
+ case 195:
460
+ return _mm512_mask_blend_ps(0xF00F, a.values, b.values);
461
+ case 196:
462
+ return _mm512_mask_blend_ps(0xF030, a.values, b.values);
463
+ case 197:
464
+ return _mm512_mask_blend_ps(0xF033, a.values, b.values);
465
+ case 198:
466
+ return _mm512_mask_blend_ps(0xF03C, a.values, b.values);
467
+ case 199:
468
+ return _mm512_mask_blend_ps(0xF03F, a.values, b.values);
469
+ case 200:
470
+ return _mm512_mask_blend_ps(0XF0C0, a.values, b.values);
471
+ case 201:
472
+ return _mm512_mask_blend_ps(0xF0C3, a.values, b.values);
473
+ case 202:
474
+ return _mm512_mask_blend_ps(0xF0CC, a.values, b.values);
475
+ case 203:
476
+ return _mm512_mask_blend_ps(0xF0CF, a.values, b.values);
477
+ case 204:
478
+ return _mm512_mask_blend_ps(0xF0F0, a.values, b.values);
479
+ case 205:
480
+ return _mm512_mask_blend_ps(0xF0F3, a.values, b.values);
481
+ case 206:
482
+ return _mm512_mask_blend_ps(0xF0FC, a.values, b.values);
483
+ case 207:
484
+ return _mm512_mask_blend_ps(0xF0FF, a.values, b.values);
485
+ case 208:
486
+ return _mm512_mask_blend_ps(0XF300, a.values, b.values);
487
+ case 209:
488
+ return _mm512_mask_blend_ps(0xF303, a.values, b.values);
489
+ case 210:
490
+ return _mm512_mask_blend_ps(0xF30C, a.values, b.values);
491
+ case 211:
492
+ return _mm512_mask_blend_ps(0xF30F, a.values, b.values);
493
+ case 212:
494
+ return _mm512_mask_blend_ps(0xF330, a.values, b.values);
495
+ case 213:
496
+ return _mm512_mask_blend_ps(0xF333, a.values, b.values);
497
+ case 214:
498
+ return _mm512_mask_blend_ps(0XF33C, a.values, b.values);
499
+ case 215:
500
+ return _mm512_mask_blend_ps(0xF33F, a.values, b.values);
501
+ case 216:
502
+ return _mm512_mask_blend_ps(0xF3C0, a.values, b.values);
503
+ case 217:
504
+ return _mm512_mask_blend_ps(0xF3C3, a.values, b.values);
505
+ case 218:
506
+ return _mm512_mask_blend_ps(0xF3CC, a.values, b.values);
507
+ case 219:
508
+ return _mm512_mask_blend_ps(0xF3CF, a.values, b.values);
509
+ case 220:
510
+ return _mm512_mask_blend_ps(0xF3F0, a.values, b.values);
511
+ case 221:
512
+ return _mm512_mask_blend_ps(0xF3F3, a.values, b.values);
513
+ case 222:
514
+ return _mm512_mask_blend_ps(0xF3FC, a.values, b.values);
515
+ case 223:
516
+ return _mm512_mask_blend_ps(0XF3FF, a.values, b.values);
517
+ case 224:
518
+ return _mm512_mask_blend_ps(0xFC00, a.values, b.values);
519
+ case 225:
520
+ return _mm512_mask_blend_ps(0xFC03, a.values, b.values);
521
+ case 226:
522
+ return _mm512_mask_blend_ps(0xFC0C, a.values, b.values);
523
+ case 227:
524
+ return _mm512_mask_blend_ps(0xFC0F, a.values, b.values);
525
+ case 228:
526
+ return _mm512_mask_blend_ps(0xFC30, a.values, b.values);
527
+ case 229:
528
+ return _mm512_mask_blend_ps(0xFC33, a.values, b.values);
529
+ case 230:
530
+ return _mm512_mask_blend_ps(0xFC3C, a.values, b.values);
531
+ case 231:
532
+ return _mm512_mask_blend_ps(0xFC3F, a.values, b.values);
533
+ case 232:
534
+ return _mm512_mask_blend_ps(0xFCC0, a.values, b.values);
535
+ case 233:
536
+ return _mm512_mask_blend_ps(0xFCC3, a.values, b.values);
537
+ case 234:
538
+ return _mm512_mask_blend_ps(0xFCCC, a.values, b.values);
539
+ case 235:
540
+ return _mm512_mask_blend_ps(0xFCCF, a.values, b.values);
541
+ case 236:
542
+ return _mm512_mask_blend_ps(0xFCF0, a.values, b.values);
543
+ case 237:
544
+ return _mm512_mask_blend_ps(0xFCF3, a.values, b.values);
545
+ case 238:
546
+ return _mm512_mask_blend_ps(0xFCFC, a.values, b.values);
547
+ case 239:
548
+ return _mm512_mask_blend_ps(0xFCFF, a.values, b.values);
549
+ case 240:
550
+ return _mm512_mask_blend_ps(0xFF00, a.values, b.values);
551
+ case 241:
552
+ return _mm512_mask_blend_ps(0xFF03, a.values, b.values);
553
+ case 242:
554
+ return _mm512_mask_blend_ps(0xFF0C, a.values, b.values);
555
+ case 243:
556
+ return _mm512_mask_blend_ps(0xFF0F, a.values, b.values);
557
+ case 244:
558
+ return _mm512_mask_blend_ps(0xFF30, a.values, b.values);
559
+ case 245:
560
+ return _mm512_mask_blend_ps(0xFF33, a.values, b.values);
561
+ case 246:
562
+ return _mm512_mask_blend_ps(0xFF3C, a.values, b.values);
563
+ case 247:
564
+ return _mm512_mask_blend_ps(0xFF3F, a.values, b.values);
565
+ case 248:
566
+ return _mm512_mask_blend_ps(0xFFC0, a.values, b.values);
567
+ case 249:
568
+ return _mm512_mask_blend_ps(0xFFC3, a.values, b.values);
569
+ case 250:
570
+ return _mm512_mask_blend_ps(0xFFCC, a.values, b.values);
571
+ case 251:
572
+ return _mm512_mask_blend_ps(0xFFCF, a.values, b.values);
573
+ case 252:
574
+ return _mm512_mask_blend_ps(0xFFF0, a.values, b.values);
575
+ case 253:
576
+ return _mm512_mask_blend_ps(0xFFF3, a.values, b.values);
577
+ case 254:
578
+ return _mm512_mask_blend_ps(0xFFFC, a.values, b.values);
579
+ default: break;
580
+ }
581
+ return b;
582
+ }
583
+ static Vectorized<c10::complex<float>> blendv(const Vectorized<c10::complex<float>>& a,
584
+ const Vectorized<c10::complex<float>>& b,
585
+ const Vectorized<c10::complex<float>>& mask) {
586
+ // convert c10::complex<V> index mask to V index mask: xy -> xxyy
587
+ auto mask_ = _mm512_unpacklo_ps(mask.values, mask.values);
588
+ auto all_ones = _mm512_set1_epi32(0xFFFFFFFF);
589
+ auto mmask = _mm512_cmp_epi32_mask(_mm512_castps_si512(mask_), all_ones, _MM_CMPINT_EQ);
590
+ return _mm512_mask_blend_ps(mmask, a.values, b.values);
591
+ }
592
+ template<typename step_t>
593
+ static Vectorized<c10::complex<float>> arange(c10::complex<float> base = 0.,
594
+ step_t step = static_cast<step_t>(1)) {
595
+ return Vectorized<c10::complex<float>>(base,
596
+ base + step,
597
+ base + c10::complex<float>(2)*step,
598
+ base + c10::complex<float>(3)*step,
599
+ base + c10::complex<float>(4)*step,
600
+ base + c10::complex<float>(5)*step,
601
+ base + c10::complex<float>(6)*step,
602
+ base + c10::complex<float>(7)*step);
603
+ }
604
+ static Vectorized<c10::complex<float>> set(const Vectorized<c10::complex<float>>& a,
605
+ const Vectorized<c10::complex<float>>& b,
606
+ int64_t count = size()) {
607
+ switch (count) {
608
+ case 0:
609
+ return a;
610
+ case 1:
611
+ return blend<1>(a, b);
612
+ case 2:
613
+ return blend<3>(a, b);
614
+ case 3:
615
+ return blend<7>(a, b);
616
+ case 4:
617
+ return blend<15>(a, b);
618
+ case 5:
619
+ return blend<31>(a, b);
620
+ case 6:
621
+ return blend<63>(a, b);
622
+ case 7:
623
+ return blend<127>(a, b);
624
+ }
625
+ return b;
626
+ }
627
+ static Vectorized<c10::complex<float>> loadu(const void* ptr, int64_t count = size()) {
628
+ if (count == size())
629
+ return _mm512_loadu_ps(reinterpret_cast<const float*>(ptr));
630
+
631
+ __at_align__ float tmp_values[2*size()];
632
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
633
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
634
+ // instructions while a loop would be compiled to one instruction.
635
+ for (const auto i : c10::irange(2*size())) {
636
+ tmp_values[i] = 0.0;
637
+ }
638
+ std::memcpy(
639
+ tmp_values,
640
+ reinterpret_cast<const float*>(ptr),
641
+ count * sizeof(c10::complex<float>));
642
+ return _mm512_load_ps(tmp_values);
643
+ }
644
+ void store(void* ptr, int count = size()) const {
645
+ if (count == size()) {
646
+ _mm512_storeu_ps(reinterpret_cast<float*>(ptr), values);
647
+ } else if (count > 0) {
648
+ float tmp_values[2*size()];
649
+ _mm512_storeu_ps(reinterpret_cast<float*>(tmp_values), values);
650
+ std::memcpy(ptr, tmp_values, count * sizeof(c10::complex<float>));
651
+ }
652
+ }
653
+ // AVX512 doesn't have horizontal add & horizontal sub instructions.
654
+ // TODO: hadd_pd() & hsub_pd() may have scope for improvement.
655
+ static inline __m512 hadd_ps(__m512 a, __m512 b) {
656
+ __m512i idx1 = _mm512_set_epi32(30, 14, 28, 12, 26, 10, 24, 8, 22, 6, 20, 4, 18, 2, 16, 0);
657
+ __m512i idx2 = _mm512_set_epi32(31, 15, 29, 13, 27, 11, 25, 9, 23, 7, 21, 5, 19, 3, 17, 1);
658
+ return _mm512_add_ps(_mm512_mask_permutex2var_ps(a, 0xffff, idx1, b),
659
+ _mm512_mask_permutex2var_ps(a, 0xffff, idx2, b));
660
+ }
661
+ static inline __m512 hsub_ps(__m512 a, __m512 b) {
662
+ __m512i idx1 = _mm512_set_epi32(30, 14, 28, 12, 26, 10, 24, 8, 22, 6, 20, 4, 18, 2, 16, 0);
663
+ __m512i idx2 = _mm512_set_epi32(31, 15, 29, 13, 27, 11, 25, 9, 23, 7, 21, 5, 19, 3, 17, 1);
664
+ return _mm512_sub_ps(_mm512_mask_permutex2var_ps(a, 0xffff, idx1, b),
665
+ _mm512_mask_permutex2var_ps(a, 0xffff, idx2, b));
666
+ }
667
+ const c10::complex<float>& operator[](int idx) const = delete;
668
+ c10::complex<float>& operator[](int idx) = delete;
669
+ Vectorized<c10::complex<float>> map(c10::complex<float> (*const f)(const c10::complex<float> &)) const {
670
+ __at_align__ c10::complex<float> tmp[size()];
671
+ store(tmp);
672
+ for (const auto i : c10::irange(size())) {
673
+ tmp[i] = f(tmp[i]);
674
+ }
675
+ return loadu(tmp);
676
+ }
677
+ __m512 abs_2_() const {
678
+ auto val_2 = _mm512_mul_ps(values, values); // a*a b*b
679
+ auto ret = hadd_ps(val_2, val_2); // a*a+b*b a*a+b*b
680
+ return ret;
681
+ }
682
+ __m512 abs_() const {
683
+ auto real = _mm512_moveldup_ps(values); // real real
684
+ auto imag = _mm512_movehdup_ps(values); // imag imag
685
+ return Sleef_hypotf16_u05(real, imag); // abs abs
686
+ }
687
+ Vectorized<c10::complex<float>> abs() const {
688
+ const __m512 real_mask = _mm512_castsi512_ps(_mm512_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
689
+ 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
690
+ 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
691
+ 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000));
692
+ return _mm512_and_ps(abs_(), real_mask); // abs 0
693
+ }
694
+ __m512 angle_() const {
695
+ //angle = atan2(b/a)
696
+ auto b_a = _mm512_permute_ps(values, 0xB1); // b a
697
+ return Sleef_atan2f16_u10(values, b_a); // 90-angle angle
698
+ }
699
+ Vectorized<c10::complex<float>> angle() const {
700
+ const __m512 real_mask = _mm512_castsi512_ps(_mm512_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
701
+ 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
702
+ 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
703
+ 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000));
704
+ auto angle = _mm512_permute_ps(angle_(), 0xB1); // angle 90-angle
705
+ return _mm512_and_ps(angle, real_mask); // angle 0
706
+ }
707
+ Vectorized<c10::complex<float>> sgn() const {
708
+ auto abs = abs_();
709
+ auto zero = _mm512_setzero_ps();
710
+ auto mask = _mm512_cmp_ps_mask(abs, zero, _CMP_EQ_OQ);
711
+ auto div = values / abs;
712
+ return _mm512_mask_blend_ps(mask, div, zero);
713
+ }
714
+ __m512 real_() const {
715
+ const __m512 real_mask = _mm512_castsi512_ps(_mm512_setr_epi32(0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
716
+ 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
717
+ 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000,
718
+ 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, 0x00000000));
719
+ return _mm512_and_ps(values, real_mask);
720
+ }
721
+ Vectorized<c10::complex<float>> real() const {
722
+ return real_();
723
+ }
724
+ __m512 imag_() const {
725
+ const __m512 imag_mask = _mm512_castsi512_ps(_mm512_setr_epi32(0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF,
726
+ 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF,
727
+ 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF,
728
+ 0x00000000, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF));
729
+ return _mm512_and_ps(values, imag_mask);
730
+ }
731
+ Vectorized<c10::complex<float>> imag() const {
732
+ return _mm512_permute_ps(imag_(), 0xB1); //b a
733
+ }
734
+ __m512 conj_() const {
735
+ const __m512 sign_mask = _mm512_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0,
736
+ 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0);
737
+ return _mm512_xor_ps(values, sign_mask); // a -b
738
+ }
739
+ Vectorized<c10::complex<float>> conj() const {
740
+ return conj_();
741
+ }
742
+ Vectorized<c10::complex<float>> log() const {
743
+ // Most trigonomic ops use the log() op to improve complex number performance.
744
+ return map(std::log);
745
+ }
746
+ Vectorized<c10::complex<float>> log2() const {
747
+ const __m512 log2_ = _mm512_set1_ps(std::log(2));
748
+ return _mm512_div_ps(log(), log2_);
749
+ }
750
+ Vectorized<c10::complex<float>> log10() const {
751
+ const __m512 log10_ = _mm512_set1_ps(std::log(10));
752
+ return _mm512_div_ps(log(), log10_);
753
+ }
754
+ Vectorized<c10::complex<float>> log1p() const {
755
+ return map(std::log1p);
756
+ }
757
+ Vectorized<c10::complex<float>> asin() const {
758
+ // asin(x)
759
+ // = -i*ln(iz + sqrt(1 -z^2))
760
+ // = -i*ln((ai - b) + sqrt(1 - (a + bi)*(a + bi)))
761
+ // = -i*ln((-b + ai) + sqrt(1 - (a**2 - b**2) - 2*abi))
762
+ const __m512 one = _mm512_set1_ps(1);
763
+
764
+ auto conj = conj_();
765
+ auto b_a = _mm512_permute_ps(conj, 0xB1); //-b a
766
+ auto ab = _mm512_mul_ps(conj, b_a); //-ab -ab
767
+ auto im = _mm512_add_ps(ab, ab); //-2ab -2ab
768
+
769
+ auto val_2 = _mm512_mul_ps(values, values); // a*a b*b
770
+ auto re = hsub_ps(val_2, _mm512_permute_ps(val_2, 0xB1)); // a*a-b*b b*b-a*a
771
+ re = _mm512_sub_ps(one, re);
772
+
773
+ auto root = Vectorized(_mm512_mask_blend_ps(0xAAAA, re, im)).sqrt(); //sqrt(re + i*im)
774
+ auto ln = Vectorized(_mm512_add_ps(b_a, root)).log(); //ln(iz + sqrt())
775
+ return Vectorized(_mm512_permute_ps(ln.values, 0xB1)).conj(); //-i*ln()
776
+ }
777
+ Vectorized<c10::complex<float>> acos() const {
778
+ return map(std::acos);
779
+ }
780
+ Vectorized<c10::complex<float>> atan() const;
781
+ Vectorized<c10::complex<float>> atanh() const {
782
+ return map(std::atanh);
783
+ }
784
+ Vectorized<c10::complex<float>> exp() const {
785
+ //exp(a + bi)
786
+ // = exp(a)*(cos(b) + sin(b)i)
787
+ auto exp = Sleef_expf16_u10(values); //exp(a) exp(b)
788
+ exp = _mm512_mask_blend_ps(0xAAAA, exp, _mm512_permute_ps(exp, 0xB1)); //exp(a) exp(a)
789
+
790
+ auto sin_cos = Sleef_sincosf16_u10(values); //[sin(a), cos(a)] [sin(b), cos(b)]
791
+ auto cos_sin = _mm512_mask_blend_ps(0xAAAA, _mm512_permute_ps(sin_cos.y, 0xB1),
792
+ sin_cos.x); //cos(b) sin(b)
793
+ return _mm512_mul_ps(exp, cos_sin);
794
+ }
795
+ Vectorized<c10::complex<float>> exp2() const {
796
+ // Use identity 2**x = exp(log(2) * x)
797
+ const __m512 ln_2 = _mm512_set1_ps(c10::ln_2<float>);
798
+ Vectorized<c10::complex<float>> scaled_values = _mm512_mul_ps(values, ln_2);
799
+ return scaled_values.exp();
800
+ }
801
+ Vectorized<c10::complex<float>> expm1() const {
802
+ return map(std::expm1);
803
+ }
804
+ Vectorized<c10::complex<float>> sin() const {
805
+ return map(std::sin);
806
+ }
807
+ Vectorized<c10::complex<float>> sinh() const {
808
+ return map(std::sinh);
809
+ }
810
+ Vectorized<c10::complex<float>> cos() const {
811
+ return map(std::cos);
812
+ }
813
+ Vectorized<c10::complex<float>> cosh() const {
814
+ return map(std::cosh);
815
+ }
816
+ Vectorized<c10::complex<float>> ceil() const {
817
+ return _mm512_ceil_ps(values);
818
+ }
819
+ Vectorized<c10::complex<float>> floor() const {
820
+ return _mm512_floor_ps(values);
821
+ }
822
+ Vectorized<c10::complex<float>> neg() const {
823
+ auto zero = _mm512_setzero_ps();
824
+ return _mm512_sub_ps(zero, values);
825
+ }
826
+ Vectorized<c10::complex<float>> round() const {
827
+ return _mm512_roundscale_ps(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
828
+ }
829
+ Vectorized<c10::complex<float>> tan() const {
830
+ return map(std::tan);
831
+ }
832
+ Vectorized<c10::complex<float>> tanh() const {
833
+ return map(std::tanh);
834
+ }
835
+ Vectorized<c10::complex<float>> trunc() const {
836
+ return _mm512_roundscale_ps(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
837
+ }
838
+ Vectorized<c10::complex<float>> sqrt() const {
839
+ return map(std::sqrt);
840
+ }
841
+ Vectorized<c10::complex<float>> reciprocal() const;
842
+ Vectorized<c10::complex<float>> rsqrt() const {
843
+ return sqrt().reciprocal();
844
+ }
845
+ Vectorized<c10::complex<float>> pow(const Vectorized<c10::complex<float>> &exp) const {
846
+ __at_align__ c10::complex<float> x_tmp[size()];
847
+ __at_align__ c10::complex<float> y_tmp[size()];
848
+ store(x_tmp);
849
+ exp.store(y_tmp);
850
+ for (const auto i : c10::irange(size())) {
851
+ x_tmp[i] = std::pow(x_tmp[i], y_tmp[i]);
852
+ }
853
+ return loadu(x_tmp);
854
+ }
855
+ // Comparison using the _CMP_**_OQ predicate.
856
+ // `O`: get false if an operand is NaN
857
+ // `Q`: do not raise if an operand is NaN
858
+ Vectorized<c10::complex<float>> operator==(const Vectorized<c10::complex<float>>& other) const {
859
+ auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_EQ_OQ);
860
+ return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vector, mask, 0xFFFFFFFF));
861
+ }
862
+ Vectorized<c10::complex<float>> operator!=(const Vectorized<c10::complex<float>>& other) const {
863
+ auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_NEQ_UQ);
864
+ return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vector, mask, 0xFFFFFFFF));
865
+ }
866
+ Vectorized<c10::complex<float>> operator<(const Vectorized<c10::complex<float>>& other) const {
867
+ TORCH_CHECK(false, "not supported for complex numbers");
868
+ }
869
+ Vectorized<c10::complex<float>> operator<=(const Vectorized<c10::complex<float>>& other) const {
870
+ TORCH_CHECK(false, "not supported for complex numbers");
871
+ }
872
+ Vectorized<c10::complex<float>> operator>(const Vectorized<c10::complex<float>>& other) const {
873
+ TORCH_CHECK(false, "not supported for complex numbers");
874
+ }
875
+ Vectorized<c10::complex<float>> operator>=(const Vectorized<c10::complex<float>>& other) const {
876
+ TORCH_CHECK(false, "not supported for complex numbers");
877
+ }
878
+
879
+ Vectorized<c10::complex<float>> eq(const Vectorized<c10::complex<float>>& other) const;
880
+ Vectorized<c10::complex<float>> ne(const Vectorized<c10::complex<float>>& other) const;
881
+ };
882
+
883
+ template <> Vectorized<c10::complex<float>> inline operator+(const Vectorized<c10::complex<float>> &a,
884
+ const Vectorized<c10::complex<float>> &b) {
885
+ return _mm512_add_ps(a, b);
886
+ }
887
+
888
+ template <> Vectorized<c10::complex<float>> inline operator-(const Vectorized<c10::complex<float>> &a,
889
+ const Vectorized<c10::complex<float>> &b) {
890
+ return _mm512_sub_ps(a, b);
891
+ }
892
+
893
+ template <> Vectorized<c10::complex<float>> inline operator*(const Vectorized<c10::complex<float>> &a,
894
+ const Vectorized<c10::complex<float>> &b) {
895
+ //(a + bi) * (c + di) = (ac - bd) + (ad + bc)i
896
+ const __m512 sign_mask = _mm512_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0,
897
+ 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0);
898
+ auto ac_bd = _mm512_mul_ps(a, b); //ac bd
899
+
900
+ auto d_c = _mm512_permute_ps(b, 0xB1); //d c
901
+ d_c = _mm512_xor_ps(sign_mask, d_c); //d -c
902
+ auto ad_bc = _mm512_mul_ps(a, d_c); //ad -bc
903
+
904
+ auto ret = Vectorized<c10::complex<float>>::hsub_ps(ac_bd, ad_bc); //ac - bd ad + bc
905
+ return ret;
906
+ }
907
+
908
+ template <> Vectorized<c10::complex<float>> inline operator/(const Vectorized<c10::complex<float>> &a,
909
+ const Vectorized<c10::complex<float>> &b) {
910
+ //re + im*i = (a + bi) / (c + di)
911
+ auto mask = _mm512_set1_ps(-0.f);
912
+ auto fabs_cd = _mm512_andnot_ps(mask, b); // |c| |d|
913
+ auto fabs_dc = _mm512_permute_ps(fabs_cd, 0xB1); // |d| |c|
914
+ auto scale = _mm512_rcp14_ps(_mm512_max_ps(fabs_cd, fabs_dc)); // 1/sc 1/sc
915
+ auto a2 = _mm512_mul_ps(a, scale); // a/sc b/sc
916
+ auto b2 = _mm512_mul_ps(b, scale); // c/sc d/sc
917
+ auto acbd2 = _mm512_mul_ps(a2, b2);
918
+
919
+ const __m512 sign_mask = _mm512_setr_ps(-0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0,
920
+ -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0);
921
+ auto dc2 = _mm512_permute_ps(b2, 0xB1); // d/sc c/sc
922
+ dc2 = _mm512_xor_ps(sign_mask, dc2); // -d/|c,d| c/sc
923
+ auto adbc2 = _mm512_mul_ps(a2, dc2); //-ad/sc^2 bc/sc^2
924
+ auto res2 = Vectorized<c10::complex<float>>::hadd_ps(acbd2, adbc2); //(ac+bd)/sc^2 (bc-ad)/sc^2
925
+
926
+ // get the denominator
927
+ auto denom2 = Vectorized<c10::complex<float>>(b2).abs_2_(); // (c^2+d^2)/sc^2 (c^2+d^2)/sc^2
928
+ res2 = _mm512_div_ps(res2, denom2);
929
+ return res2;
930
+ }
931
+
932
+ // reciprocal. Implement this here so we can use multiplication.
933
+ inline Vectorized<c10::complex<float>> Vectorized<c10::complex<float>>::reciprocal() const {
934
+ //re + im*i = (a + bi) / (c + di)
935
+ //re = (ac + bd)/abs_2() = c/abs_2()
936
+ //im = (bc - ad)/abs_2() = d/abs_2()
937
+ const __m512 sign_mask = _mm512_setr_ps(0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0,
938
+ 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0);
939
+ auto c_d = _mm512_xor_ps(sign_mask, values); //c -d
940
+ return _mm512_div_ps(c_d, abs_2_());
941
+ }
942
+
943
+ inline Vectorized<c10::complex<float>> Vectorized<c10::complex<float>>::atan() const {
944
+ // atan(x) = i/2 * ln((i + z)/(i - z))
945
+ const __m512 i = _mm512_setr_ps(0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0,
946
+ 0.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 1.0);
947
+ const Vectorized i_half = _mm512_setr_ps(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5,
948
+ 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5);
949
+
950
+ auto sum = Vectorized(_mm512_add_ps(i, values)); // a 1+b
951
+ auto sub = Vectorized(_mm512_sub_ps(i, values)); // -a 1-b
952
+ auto ln = (sum/sub).log(); // ln((i + z)/(i - z))
953
+ return i_half*ln; // i/2*ln()
954
+ }
955
+
956
+ template <>
957
+ Vectorized<c10::complex<float>> inline maximum(const Vectorized<c10::complex<float>>& a,
958
+ const Vectorized<c10::complex<float>>& b) {
959
+ auto zero_vector = _mm512_set1_epi32(0);
960
+ auto abs_a = a.abs_2_();
961
+ auto abs_b = b.abs_2_();
962
+ auto mask = _mm512_cmp_ps_mask(abs_a, abs_b, _CMP_LT_OQ);
963
+ auto max = _mm512_mask_blend_ps(mask, a, b);
964
+ // Exploit the fact that all-ones is a NaN.
965
+ auto isnan_mask = _mm512_cmp_ps_mask(abs_a, abs_b, _CMP_UNORD_Q);
966
+ auto isnan = _mm512_mask_set1_epi32(zero_vector, isnan_mask, 0xFFFFFFFF);
967
+ return _mm512_or_ps(max, _mm512_castsi512_ps(isnan));
968
+ }
969
+
970
+ template <>
971
+ Vectorized<c10::complex<float>> inline minimum(const Vectorized<c10::complex<float>>& a,
972
+ const Vectorized<c10::complex<float>>& b) {
973
+ auto zero_vector = _mm512_set1_epi32(0);
974
+ auto abs_a = a.abs_2_();
975
+ auto abs_b = b.abs_2_();
976
+ auto mask = _mm512_cmp_ps_mask(abs_a, abs_b, _CMP_GT_OQ);
977
+ auto min = _mm512_mask_blend_ps(mask, a, b);
978
+ // Exploit the fact that all-ones is a NaN.
979
+ auto isnan_mask = _mm512_cmp_ps_mask(abs_a, abs_b, _CMP_UNORD_Q);
980
+ auto isnan = _mm512_mask_set1_epi32(zero_vector, isnan_mask, 0xFFFFFFFF);
981
+ return _mm512_or_ps(min, _mm512_castsi512_ps(isnan));
982
+ }
983
+
984
+ template <>
985
+ Vectorized<c10::complex<float>> inline operator&(const Vectorized<c10::complex<float>>& a,
986
+ const Vectorized<c10::complex<float>>& b) {
987
+ return _mm512_and_ps(a, b);
988
+ }
989
+
990
+ template <>
991
+ Vectorized<c10::complex<float>> inline operator|(const Vectorized<c10::complex<float>>& a,
992
+ const Vectorized<c10::complex<float>>& b) {
993
+ return _mm512_or_ps(a, b);
994
+ }
995
+
996
+ template <>
997
+ Vectorized<c10::complex<float>> inline operator^(const Vectorized<c10::complex<float>>& a,
998
+ const Vectorized<c10::complex<float>>& b) {
999
+ return _mm512_xor_ps(a, b);
1000
+ }
1001
+
1002
+ inline Vectorized<c10::complex<float>> Vectorized<c10::complex<float>>::eq(
1003
+ const Vectorized<c10::complex<float>>& other) const {
1004
+ auto eq = (*this == other); // compares real and imag individually
1005
+ // If both real numbers and imag numbers are equal, then the complex numbers are equal
1006
+ return (eq.real() & eq.imag()) & Vectorized<c10::complex<float>>(_mm512_set1_ps(1.0f));
1007
+ }
1008
+
1009
+ inline Vectorized<c10::complex<float>> Vectorized<c10::complex<float>>::ne(
1010
+ const Vectorized<c10::complex<float>>& other) const {
1011
+ auto ne = (*this != other); // compares real and imag individually
1012
+ // If either real numbers or imag numbers are not equal, then the complex numbers are not equal
1013
+ return (ne.real() | ne.imag()) & Vectorized<c10::complex<float>>(_mm512_set1_ps(1.0f));
1014
+ }
1015
+
1016
+ #endif
1017
+
1018
+ }}}
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_double.h ADDED
@@ -0,0 +1,469 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/intrinsics.h>
7
+ #include <ATen/cpu/vec/vec_base.h>
8
+ #include <c10/util/irange.h>
9
+ #if (defined(CPU_CAPABILITY_AVX512)) && !defined(_MSC_VER)
10
+ #include <sleef.h>
11
+ #endif
12
+
13
+ namespace at {
14
+ namespace vec {
15
+ // See Note [CPU_CAPABILITY namespace]
16
+ inline namespace CPU_CAPABILITY {
17
+
18
+ #if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER)
19
+
20
+ template <> class Vectorized<double> {
21
+ private:
22
+ static constexpr __m512i zero_vector {0, 0, 0, 0, 0, 0, 0, 0};
23
+ public:
24
+ // values needs to be public for compilation with clang
25
+ // as vec512.h uses it
26
+ __m512d values;
27
+ using value_type = double;
28
+ using size_type = int;
29
+ static constexpr size_type size() {
30
+ return 8;
31
+ }
32
+ Vectorized() {}
33
+ Vectorized(__m512d v) : values(v) {}
34
+ Vectorized(double val) {
35
+ values = _mm512_set1_pd(val);
36
+ }
37
+ Vectorized(double val1, double val2, double val3, double val4,
38
+ double val5, double val6, double val7, double val8) {
39
+ values = _mm512_setr_pd(val1, val2, val3, val4, val5, val6, val7, val8);
40
+ }
41
+ operator __m512d() const {
42
+ return values;
43
+ }
44
+ template <int64_t mask>
45
+ static Vectorized<double> blend(const Vectorized<double>& a, const Vectorized<double>& b) {
46
+ return _mm512_mask_blend_pd(mask, a.values, b.values);
47
+ }
48
+ static Vectorized<double> blendv(const Vectorized<double>& a, const Vectorized<double>& b,
49
+ const Vectorized<double>& mask) {
50
+ auto all_ones = _mm512_set1_epi64(0xFFFFFFFFFFFFFFFF);
51
+ auto mmask = _mm512_cmp_epi64_mask(_mm512_castpd_si512(mask.values), all_ones, _MM_CMPINT_EQ);
52
+ return _mm512_mask_blend_pd(mmask, a.values, b.values);
53
+ }
54
+ template<typename step_t>
55
+ static Vectorized<double> arange(double base = 0., step_t step = static_cast<step_t>(1)) {
56
+ return Vectorized<double>(base, base + step, base + 2 * step, base + 3 * step,
57
+ base + 4 * step, base + 5 * step, base + 6 * step,
58
+ base + 7 * step);
59
+ }
60
+ static Vectorized<double> set(const Vectorized<double>& a, const Vectorized<double>& b,
61
+ int64_t count = size()) {
62
+ switch (count) {
63
+ case 0:
64
+ return a;
65
+ case 1:
66
+ return blend<1>(a, b);
67
+ case 2:
68
+ return blend<3>(a, b);
69
+ case 3:
70
+ return blend<7>(a, b);
71
+ case 4:
72
+ return blend<15>(a, b);
73
+ case 5:
74
+ return blend<31>(a, b);
75
+ case 6:
76
+ return blend<63>(a, b);
77
+ case 7:
78
+ return blend<127>(a, b);
79
+ }
80
+ return b;
81
+ }
82
+ static Vectorized<double> loadu(const void* ptr, int64_t count = size()) {
83
+ if (count == size())
84
+ return _mm512_loadu_pd(reinterpret_cast<const double*>(ptr));
85
+
86
+
87
+ __at_align__ double tmp_values[size()];
88
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
89
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
90
+ // instructions while a loop would be compiled to one instruction.
91
+ for (const auto i : c10::irange(size())) {
92
+ tmp_values[i] = 0.0;
93
+ }
94
+ std::memcpy(
95
+ tmp_values,
96
+ reinterpret_cast<const double*>(ptr),
97
+ count * sizeof(double));
98
+ return _mm512_load_pd(tmp_values);
99
+ }
100
+ void store(void* ptr, int count = size()) const {
101
+ if (count == size()) {
102
+ _mm512_storeu_pd(reinterpret_cast<double*>(ptr), values);
103
+ } else if (count > 0) {
104
+ double tmp_values[size()];
105
+ _mm512_storeu_pd(reinterpret_cast<double*>(tmp_values), values);
106
+ std::memcpy(ptr, tmp_values, count * sizeof(double));
107
+ }
108
+ }
109
+ const double& operator[](int idx) const = delete;
110
+ double& operator[](int idx) = delete;
111
+ int zero_mask() const {
112
+ // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit
113
+ __mmask8 cmp = _mm512_cmp_pd_mask(values, _mm512_set1_pd(0.0), _CMP_EQ_OQ);
114
+ return static_cast<int32_t>(cmp);
115
+ }
116
+ Vectorized<double> isnan() const {
117
+ auto cmp_mask = _mm512_cmp_pd_mask(values, _mm512_set1_pd(0.0), _CMP_UNORD_Q);
118
+ return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask,
119
+ 0xFFFFFFFFFFFFFFFF));
120
+ }
121
+ Vectorized<double> map(double (*const f)(double)) const {
122
+ __at_align__ double tmp[size()];
123
+ store(tmp);
124
+ for (const auto i : c10::irange(size())) {
125
+ tmp[i] = f(tmp[i]);
126
+ }
127
+ return loadu(tmp);
128
+ }
129
+ Vectorized<double> abs() const {
130
+ auto mask = _mm512_set1_pd(-0.f);
131
+ return _mm512_andnot_pd(mask, values);
132
+ }
133
+ Vectorized<double> angle() const {
134
+ const auto zero_vec = _mm512_castsi512_pd(zero_vector);
135
+ const auto nan_vec = _mm512_set1_pd(NAN);
136
+ const auto not_nan_mask = _mm512_cmp_pd_mask(values, values, _CMP_EQ_OQ);
137
+ const auto not_nan = _mm512_mask_set1_epi64(zero_vector, not_nan_mask,
138
+ 0xFFFFFFFFFFFFFFFF);
139
+ const auto nan_mask = _mm512_cmp_pd_mask(_mm512_castsi512_pd(not_nan),
140
+ zero_vec, _CMP_EQ_OQ);
141
+ const auto pi = _mm512_set1_pd(c10::pi<double>);
142
+
143
+ const auto neg_mask = _mm512_cmp_pd_mask(values, zero_vec, _CMP_LT_OQ);
144
+ auto angle = _mm512_mask_blend_pd(neg_mask, zero_vec, pi);
145
+ angle = _mm512_mask_blend_pd(nan_mask, angle, nan_vec);
146
+ return angle;
147
+ }
148
+ Vectorized<double> real() const {
149
+ return *this;
150
+ }
151
+ Vectorized<double> imag() const {
152
+ return _mm512_set1_pd(0);
153
+ }
154
+ Vectorized<double> conj() const {
155
+ return *this;
156
+ }
157
+ Vectorized<double> acos() const {
158
+ return Vectorized<double>(Sleef_acosd8_u10(values));
159
+ }
160
+ Vectorized<double> asin() const {
161
+ return Vectorized<double>(Sleef_asind8_u10(values));
162
+ }
163
+ Vectorized<double> atan() const {
164
+ return Vectorized<double>(Sleef_atand8_u10(values));
165
+ }
166
+ Vectorized<double> atanh() const {
167
+ return Vectorized<double>(Sleef_atanhd8_u10(values));
168
+ }
169
+ Vectorized<double> atan2(const Vectorized<double> &b) const {
170
+ return Vectorized<double>(Sleef_atan2d8_u10(values, b));
171
+ }
172
+ Vectorized<double> copysign(const Vectorized<double> &sign) const {
173
+ return Vectorized<double>(Sleef_copysignd8(values, sign));
174
+ }
175
+ Vectorized<double> erf() const {
176
+ return Vectorized<double>(Sleef_erfd8_u10(values));
177
+ }
178
+ Vectorized<double> erfc() const {
179
+ return Vectorized<double>(Sleef_erfcd8_u15(values));
180
+ }
181
+ Vectorized<double> erfinv() const {
182
+ return map(calc_erfinv);
183
+ }
184
+ Vectorized<double> exp() const {
185
+ return Vectorized<double>(Sleef_expd8_u10(values));
186
+ }
187
+ Vectorized<double> exp2() const {
188
+ return Vectorized<double>(Sleef_exp2d8_u10(values));
189
+ }
190
+ Vectorized<double> expm1() const {
191
+ return Vectorized<double>(Sleef_expm1d8_u10(values));
192
+ }
193
+ Vectorized<double> fmod(const Vectorized<double>& q) const {
194
+ return Vectorized<double>(Sleef_fmodd8(values, q));
195
+ }
196
+ Vectorized<double> hypot(const Vectorized<double> &b) const {
197
+ return Vectorized<double>(Sleef_hypotd8_u05(values, b));
198
+ }
199
+ Vectorized<double> i0() const {
200
+ return map(calc_i0);
201
+ }
202
+ Vectorized<double> i0e() const {
203
+ return map(calc_i0e);
204
+ }
205
+ Vectorized<double> digamma() const {
206
+ return map(calc_digamma);
207
+ }
208
+ Vectorized<double> igamma(const Vectorized<double> &x) const {
209
+ __at_align__ double tmp[size()];
210
+ __at_align__ double tmp_x[size()];
211
+ store(tmp);
212
+ x.store(tmp_x);
213
+ for (const auto i : c10::irange(size())) {
214
+ tmp[i] = calc_igamma(tmp[i], tmp_x[i]);
215
+ }
216
+ return loadu(tmp);
217
+ }
218
+ Vectorized<double> igammac(const Vectorized<double> &x) const {
219
+ __at_align__ double tmp[size()];
220
+ __at_align__ double tmp_x[size()];
221
+ store(tmp);
222
+ x.store(tmp_x);
223
+ for (const auto i : c10::irange(size())) {
224
+ tmp[i] = calc_igammac(tmp[i], tmp_x[i]);
225
+ }
226
+ return loadu(tmp);
227
+ }
228
+ Vectorized<double> log() const {
229
+ return Vectorized<double>(Sleef_logd8_u10(values));
230
+ }
231
+ Vectorized<double> log2() const {
232
+ return Vectorized<double>(Sleef_log2d8_u10(values));
233
+ }
234
+ Vectorized<double> log10() const {
235
+ return Vectorized<double>(Sleef_log10d8_u10(values));
236
+ }
237
+ Vectorized<double> log1p() const {
238
+ return Vectorized<double>(Sleef_log1pd8_u10(values));
239
+ }
240
+ Vectorized<double> sin() const {
241
+ return Vectorized<double>(Sleef_sind8_u10(values));
242
+ }
243
+ Vectorized<double> sinh() const {
244
+ return Vectorized<double>(Sleef_sinhd8_u10(values));
245
+ }
246
+ Vectorized<double> cos() const {
247
+ return Vectorized<double>(Sleef_cosd8_u10(values));
248
+ }
249
+ Vectorized<double> cosh() const {
250
+ return Vectorized<double>(Sleef_coshd8_u10(values));
251
+ }
252
+ Vectorized<double> ceil() const {
253
+ return _mm512_ceil_pd(values);
254
+ }
255
+ Vectorized<double> floor() const {
256
+ return _mm512_floor_pd(values);
257
+ }
258
+ Vectorized<double> frac() const;
259
+ Vectorized<double> neg() const {
260
+ return _mm512_xor_pd(_mm512_set1_pd(-0.), values);
261
+ }
262
+ Vectorized<double> nextafter(const Vectorized<double> &b) const {
263
+ return Vectorized<double>(Sleef_nextafterd8(values, b));
264
+ }
265
+ Vectorized<double> round() const {
266
+ return _mm512_roundscale_pd(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
267
+ }
268
+ Vectorized<double> tan() const {
269
+ return Vectorized<double>(Sleef_tand8_u10(values));
270
+ }
271
+ Vectorized<double> tanh() const {
272
+ return Vectorized<double>(Sleef_tanhd8_u10(values));
273
+ }
274
+ Vectorized<double> trunc() const {
275
+ return _mm512_roundscale_pd(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
276
+ }
277
+ Vectorized<double> lgamma() const {
278
+ return Vectorized<double>(Sleef_lgammad8_u10(values));
279
+ }
280
+ Vectorized<double> sqrt() const {
281
+ return _mm512_sqrt_pd(values);
282
+ }
283
+ Vectorized<double> reciprocal() const {
284
+ return _mm512_div_pd(_mm512_set1_pd(1), values);
285
+ }
286
+ Vectorized<double> rsqrt() const {
287
+ return _mm512_div_pd(_mm512_set1_pd(1), _mm512_sqrt_pd(values));
288
+ }
289
+ Vectorized<double> pow(const Vectorized<double> &b) const {
290
+ return Vectorized<double>(Sleef_powd8_u10(values, b));
291
+ }
292
+ // Comparison using the _CMP_**_OQ predicate.
293
+ // `O`: get false if an operand is NaN
294
+ // `Q`: do not raise if an operand is NaN
295
+ Vectorized<double> operator==(const Vectorized<double>& other) const {
296
+ auto cmp_mask = _mm512_cmp_pd_mask(values, other.values, _CMP_EQ_OQ);
297
+ return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask,
298
+ 0xFFFFFFFFFFFFFFFF));
299
+ }
300
+
301
+ Vectorized<double> operator!=(const Vectorized<double>& other) const {
302
+ auto cmp_mask = _mm512_cmp_pd_mask(values, other.values, _CMP_NEQ_UQ);
303
+ return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask,
304
+ 0xFFFFFFFFFFFFFFFF));
305
+ }
306
+
307
+ Vectorized<double> operator<(const Vectorized<double>& other) const {
308
+ auto cmp_mask = _mm512_cmp_pd_mask(values, other.values, _CMP_LT_OQ);
309
+ return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask,
310
+ 0xFFFFFFFFFFFFFFFF));
311
+ }
312
+
313
+ Vectorized<double> operator<=(const Vectorized<double>& other) const {
314
+ auto cmp_mask = _mm512_cmp_pd_mask(values, other.values, _CMP_LE_OQ);
315
+ return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask,
316
+ 0xFFFFFFFFFFFFFFFF));
317
+ }
318
+
319
+ Vectorized<double> operator>(const Vectorized<double>& other) const {
320
+ auto cmp_mask = _mm512_cmp_pd_mask(values, other.values, _CMP_GT_OQ);
321
+ return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask,
322
+ 0xFFFFFFFFFFFFFFFF));
323
+ }
324
+
325
+ Vectorized<double> operator>=(const Vectorized<double>& other) const {
326
+ auto cmp_mask = _mm512_cmp_pd_mask(values, other.values, _CMP_GE_OQ);
327
+ return _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vector, cmp_mask,
328
+ 0xFFFFFFFFFFFFFFFF));
329
+ }
330
+
331
+ Vectorized<double> eq(const Vectorized<double>& other) const;
332
+ Vectorized<double> ne(const Vectorized<double>& other) const;
333
+ Vectorized<double> lt(const Vectorized<double>& other) const;
334
+ Vectorized<double> le(const Vectorized<double>& other) const;
335
+ Vectorized<double> gt(const Vectorized<double>& other) const;
336
+ Vectorized<double> ge(const Vectorized<double>& other) const;
337
+ };
338
+
339
+ template <>
340
+ Vectorized<double> inline operator+(const Vectorized<double>& a, const Vectorized<double>& b) {
341
+ return _mm512_add_pd(a, b);
342
+ }
343
+
344
+ template <>
345
+ Vectorized<double> inline operator-(const Vectorized<double>& a, const Vectorized<double>& b) {
346
+ return _mm512_sub_pd(a, b);
347
+ }
348
+
349
+ template <>
350
+ Vectorized<double> inline operator*(const Vectorized<double>& a, const Vectorized<double>& b) {
351
+ return _mm512_mul_pd(a, b);
352
+ }
353
+
354
+ template <>
355
+ Vectorized<double> inline operator/(const Vectorized<double>& a, const Vectorized<double>& b) {
356
+ return _mm512_div_pd(a, b);
357
+ }
358
+
359
+ // frac. Implement this here so we can use subtraction.
360
+ inline Vectorized<double> Vectorized<double>::frac() const {
361
+ return *this - this->trunc();
362
+ }
363
+
364
+ // Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
365
+ // either input is a NaN.
366
+ template <>
367
+ Vectorized<double> inline maximum(const Vectorized<double>& a, const Vectorized<double>& b) {
368
+ auto zero_vec = _mm512_set1_epi64(0);
369
+ Vectorized<double> max = _mm512_max_pd(a, b);
370
+ auto isnan_mask = _mm512_cmp_pd_mask(a, b, _CMP_UNORD_Q);
371
+ auto isnan = _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vec, isnan_mask,
372
+ 0xFFFFFFFFFFFFFFFF));
373
+ // Exploit the fact that all-ones is a NaN.
374
+ return _mm512_or_pd(max, isnan);
375
+ }
376
+
377
+ // Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
378
+ // either input is a NaN.
379
+ template <>
380
+ Vectorized<double> inline minimum(const Vectorized<double>& a, const Vectorized<double>& b) {
381
+ auto zero_vec = _mm512_set1_epi64(0);
382
+ Vectorized<double> min = _mm512_min_pd(a, b);
383
+ auto isnan_mask = _mm512_cmp_pd_mask(a, b, _CMP_UNORD_Q);
384
+ auto isnan = _mm512_castsi512_pd(_mm512_mask_set1_epi64(zero_vec, isnan_mask,
385
+ 0xFFFFFFFFFFFFFFFF));
386
+ // Exploit the fact that all-ones is a NaN.
387
+ return _mm512_or_pd(min, isnan);
388
+ }
389
+
390
+ template <>
391
+ Vectorized<double> inline clamp(const Vectorized<double>& a, const Vectorized<double>& min, const Vectorized<double>& max) {
392
+ return _mm512_min_pd(max, _mm512_max_pd(min, a));
393
+ }
394
+
395
+ template <>
396
+ Vectorized<double> inline clamp_min(const Vectorized<double>& a, const Vectorized<double>& min) {
397
+ return _mm512_max_pd(min, a);
398
+ }
399
+
400
+ template <>
401
+ Vectorized<double> inline clamp_max(const Vectorized<double>& a, const Vectorized<double>& max) {
402
+ return _mm512_min_pd(max, a);
403
+ }
404
+
405
+ template <>
406
+ Vectorized<double> inline operator&(const Vectorized<double>& a, const Vectorized<double>& b) {
407
+ return _mm512_and_pd(a, b);
408
+ }
409
+
410
+ template <>
411
+ Vectorized<double> inline operator|(const Vectorized<double>& a, const Vectorized<double>& b) {
412
+ return _mm512_or_pd(a, b);
413
+ }
414
+
415
+ template <>
416
+ Vectorized<double> inline operator^(const Vectorized<double>& a, const Vectorized<double>& b) {
417
+ return _mm512_xor_pd(a, b);
418
+ }
419
+
420
+ inline Vectorized<double> Vectorized<double>::eq(const Vectorized<double>& other) const {
421
+ return (*this == other) & Vectorized<double>(1.0);
422
+ }
423
+
424
+ inline Vectorized<double> Vectorized<double>::ne(const Vectorized<double>& other) const {
425
+ return (*this != other) & Vectorized<double>(1.0);
426
+ }
427
+
428
+ inline Vectorized<double> Vectorized<double>::gt(const Vectorized<double>& other) const {
429
+ return (*this > other) & Vectorized<double>(1.0);
430
+ }
431
+
432
+ inline Vectorized<double> Vectorized<double>::ge(const Vectorized<double>& other) const {
433
+ return (*this >= other) & Vectorized<double>(1.0);
434
+ }
435
+
436
+ inline Vectorized<double> Vectorized<double>::lt(const Vectorized<double>& other) const {
437
+ return (*this < other) & Vectorized<double>(1.0);
438
+ }
439
+
440
+ inline Vectorized<double> Vectorized<double>::le(const Vectorized<double>& other) const {
441
+ return (*this <= other) & Vectorized<double>(1.0);
442
+ }
443
+
444
+ template <>
445
+ inline void convert(const double* src, double* dst, int64_t n) {
446
+ int64_t i;
447
+ #pragma unroll
448
+ for (i = 0; i <= (n - Vectorized<double>::size()); i += Vectorized<double>::size()) {
449
+ _mm512_storeu_pd(dst + i, _mm512_loadu_pd(src + i));
450
+ }
451
+ #pragma unroll
452
+ for (; i < n; i++) {
453
+ dst[i] = src[i];
454
+ }
455
+ }
456
+
457
+ template <>
458
+ Vectorized<double> inline fmadd(const Vectorized<double>& a, const Vectorized<double>& b, const Vectorized<double>& c) {
459
+ return _mm512_fmadd_pd(a, b, c);
460
+ }
461
+
462
+ template <>
463
+ Vectorized<double> inline fmsub(const Vectorized<double>& a, const Vectorized<double>& b, const Vectorized<double>& c) {
464
+ return _mm512_fmsub_pd(a, b, c);
465
+ }
466
+
467
+ #endif
468
+
469
+ }}}
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_float.h ADDED
@@ -0,0 +1,730 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/intrinsics.h>
7
+ #include <ATen/cpu/vec/vec_base.h>
8
+ #include <c10/util/irange.h>
9
+ #if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER)
10
+ #include <sleef.h>
11
+ #endif
12
+
13
+ namespace at {
14
+ namespace vec {
15
+ // See Note [CPU_CAPABILITY namespace]
16
+ inline namespace CPU_CAPABILITY {
17
+
18
+ #if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER)
19
+
20
+ template <> class Vectorized<float> {
21
+ private:
22
+ static constexpr __m512i zero_vec {0, 0, 0, 0, 0, 0, 0, 0};
23
+ public:
24
+ __m512 values;
25
+ using value_type = float;
26
+ using size_type = int;
27
+ static constexpr size_type size() {
28
+ return 16;
29
+ }
30
+ Vectorized() {}
31
+ Vectorized(__m512 v) : values(v) {}
32
+ Vectorized(float val) {
33
+ values = _mm512_set1_ps(val);
34
+ }
35
+ Vectorized(float val1, float val2, float val3, float val4,
36
+ float val5, float val6, float val7, float val8,
37
+ float val9, float val10, float val11, float val12,
38
+ float val13, float val14, float val15, float val16) {
39
+ values = _mm512_setr_ps(val1, val2, val3, val4, val5, val6, val7, val8,
40
+ val9, val10, val11, val12, val13, val14, val15, val16);
41
+ }
42
+ operator __m512() const {
43
+ return values;
44
+ }
45
+ template <int64_t mask>
46
+ static Vectorized<float> blend(const Vectorized<float>& a, const Vectorized<float>& b) {
47
+ return _mm512_mask_blend_ps(mask, a.values, b.values);
48
+ }
49
+ static Vectorized<float> blendv(const Vectorized<float>& a, const Vectorized<float>& b,
50
+ const Vectorized<float>& mask) {
51
+ auto all_ones = _mm512_set1_epi32(0xFFFFFFFF);
52
+ auto mmask = _mm512_cmp_epi32_mask(_mm512_castps_si512(mask.values), all_ones, _MM_CMPINT_EQ);
53
+ return _mm512_mask_blend_ps(mmask, a.values, b.values);
54
+ }
55
+ template<typename step_t>
56
+ static Vectorized<float> arange(float base = 0.f, step_t step = static_cast<step_t>(1)) {
57
+ return Vectorized<float>(
58
+ base, base + step, base + 2 * step, base + 3 * step,
59
+ base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step,
60
+ base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step,
61
+ base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step);
62
+ }
63
+ static Vectorized<float> set(const Vectorized<float>& a, const Vectorized<float>& b,
64
+ int64_t count = size()) {
65
+ switch (count) {
66
+ case 0:
67
+ return a;
68
+ case 1:
69
+ return blend<1>(a, b);
70
+ case 2:
71
+ return blend<3>(a, b);
72
+ case 3:
73
+ return blend<7>(a, b);
74
+ case 4:
75
+ return blend<15>(a, b);
76
+ case 5:
77
+ return blend<31>(a, b);
78
+ case 6:
79
+ return blend<63>(a, b);
80
+ case 7:
81
+ return blend<127>(a, b);
82
+ case 8:
83
+ return blend<255>(a, b);
84
+ case 9:
85
+ return blend<511>(a, b);
86
+ case 10:
87
+ return blend<1023>(a, b);
88
+ case 11:
89
+ return blend<2047>(a, b);
90
+ case 12:
91
+ return blend<4095>(a, b);
92
+ case 13:
93
+ return blend<8191>(a, b);
94
+ case 14:
95
+ return blend<16383>(a, b);
96
+ case 15:
97
+ return blend<32767>(a, b);
98
+ }
99
+ return b;
100
+ }
101
+ static Vectorized<float> loadu(const void* ptr, int64_t count = size()) {
102
+ if (count == size())
103
+ return _mm512_loadu_ps(reinterpret_cast<const float*>(ptr));
104
+ __at_align__ float tmp_values[size()];
105
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
106
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
107
+ // instructions while a loop would be compiled to one instruction.
108
+ for (const auto i : c10::irange(size())) {
109
+ tmp_values[i] = 0.0;
110
+ }
111
+ std::memcpy(
112
+ tmp_values, reinterpret_cast<const float*>(ptr), count * sizeof(float));
113
+ return _mm512_loadu_ps(tmp_values);
114
+ }
115
+ void store(void* ptr, int64_t count = size()) const {
116
+ if (count == size()) {
117
+ _mm512_storeu_ps(reinterpret_cast<float*>(ptr), values);
118
+ } else if (count > 0) {
119
+ float tmp_values[size()];
120
+ _mm512_storeu_ps(reinterpret_cast<float*>(tmp_values), values);
121
+ std::memcpy(ptr, tmp_values, count * sizeof(float));
122
+ }
123
+ }
124
+ const float& operator[](int idx) const = delete;
125
+ float& operator[](int idx) = delete;
126
+ int zero_mask() const {
127
+ // returns an integer mask where all zero elements are translated to 1-bit and others are translated to 0-bit
128
+ __mmask16 cmp = _mm512_cmp_ps_mask(values, _mm512_set1_ps(0.0), _CMP_EQ_OQ);
129
+ return static_cast<int32_t>(cmp);
130
+ }
131
+ Vectorized<float> isnan() const {
132
+ auto mask = _mm512_cmp_ps_mask(values, _mm512_set1_ps(0.0), _CMP_UNORD_Q);
133
+ return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask,
134
+ 0xFFFFFFFF));
135
+ }
136
+ Vectorized<float> map(float (*const f)(float)) const {
137
+ __at_align__ float tmp[size()];
138
+ store(tmp);
139
+ for (const auto i : c10::irange(size())) {
140
+ tmp[i] = f(tmp[i]);
141
+ }
142
+ return loadu(tmp);
143
+ }
144
+ Vectorized<float> abs() const {
145
+ auto mask = _mm512_set1_ps(-0.f);
146
+ return _mm512_andnot_ps(mask, values);
147
+ }
148
+ Vectorized<float> angle() const {
149
+ __m512 zero_vec = _mm512_set1_ps(0.f);
150
+ const auto nan_vec = _mm512_set1_ps(NAN);
151
+ const auto not_nan_mask = _mm512_cmp_ps_mask(values, values, _CMP_EQ_OQ);
152
+ const auto not_nan_vec = _mm512_mask_set1_epi32(_mm512_castps_si512(zero_vec),
153
+ not_nan_mask, 0xFFFFFFFF);
154
+ const auto nan_mask = _mm512_cmp_ps_mask(_mm512_castsi512_ps(not_nan_vec),
155
+ zero_vec, _CMP_EQ_OQ);
156
+ const auto pi = _mm512_set1_ps(c10::pi<double>);
157
+
158
+ const auto neg_mask = _mm512_cmp_ps_mask(values, zero_vec, _CMP_LT_OQ);
159
+ auto angle = _mm512_mask_blend_ps(neg_mask, zero_vec, pi);
160
+ angle = _mm512_mask_blend_ps(nan_mask, angle, nan_vec);
161
+ return angle;
162
+ }
163
+ Vectorized<float> real() const {
164
+ return *this;
165
+ }
166
+ Vectorized<float> imag() const {
167
+ return _mm512_set1_ps(0);
168
+ }
169
+ Vectorized<float> conj() const {
170
+ return *this;
171
+ }
172
+ Vectorized<float> acos() const {
173
+ return Vectorized<float>(Sleef_acosf16_u10(values));
174
+ }
175
+ Vectorized<float> asin() const {
176
+ return Vectorized<float>(Sleef_asinf16_u10(values));
177
+ }
178
+ Vectorized<float> atan() const {
179
+ return Vectorized<float>(Sleef_atanf16_u10(values));
180
+ }
181
+ Vectorized<float> atanh() const {
182
+ return Vectorized<float>(Sleef_atanhf16_u10(values));
183
+ }
184
+ Vectorized<float> atan2(const Vectorized<float> &b) const {
185
+ return Vectorized<float>(Sleef_atan2f16_u10(values, b));
186
+ }
187
+ Vectorized<float> copysign(const Vectorized<float> &sign) const {
188
+ return Vectorized<float>(Sleef_copysignf16(values, sign));
189
+ }
190
+ Vectorized<float> erf() const {
191
+ // constants
192
+ const auto neg_zero_vec = _mm512_set1_ps(-0.f);
193
+ const auto one_vec = _mm512_set1_ps(1.0f);
194
+ const auto p = _mm512_set1_ps(0.3275911f);
195
+ const auto p1 = _mm512_set1_ps(0.254829592f);
196
+ const auto p2 = _mm512_set1_ps(-0.284496736f);
197
+ const auto p3 = _mm512_set1_ps(1.421413741f);
198
+ const auto p4 = _mm512_set1_ps(-1.453152027f);
199
+ const auto p5 = _mm512_set1_ps(1.061405429f);
200
+ // sign(x)
201
+ auto sign_mask = _mm512_and_ps(neg_zero_vec, values);
202
+ auto abs_vec = _mm512_abs_ps(values);
203
+ // t = 1 / (p * abs(x) + 1)
204
+ auto tmp0 = _mm512_fmadd_ps(p, abs_vec, one_vec);
205
+ auto t = _mm512_div_ps(one_vec, tmp0);
206
+ // r = p5 * t ^ 4 + p4 * t ^ 3 + p3 * t ^ 2 + p2 * t + p1
207
+ auto tmp1 = _mm512_fmadd_ps(p5, t, p4);
208
+ auto tmp2 = _mm512_fmadd_ps(tmp1, t, p3);
209
+ auto tmp3 = _mm512_fmadd_ps(tmp2, t, p2);
210
+ auto r = _mm512_fmadd_ps(tmp3, t, p1);
211
+ // - exp(- x * x)
212
+ auto pow_2 = _mm512_mul_ps(values, values);
213
+ auto neg_pow_2 = _mm512_xor_ps(neg_zero_vec, pow_2);
214
+ // auto tmp4 = exp(neg_pow_2);
215
+ auto tmp4 = Vectorized<float>(Sleef_expf16_u10(neg_pow_2));
216
+ auto tmp5 = _mm512_xor_ps(neg_zero_vec, tmp4);
217
+ // erf(x) = sign(x) * (1 - r * t * exp(- x * x))
218
+ auto tmp6 = _mm512_mul_ps(tmp5, t);
219
+ auto tmp7 = _mm512_fmadd_ps(tmp6, r, one_vec);
220
+ return _mm512_xor_ps(sign_mask, tmp7);
221
+ }
222
+ Vectorized<float> erfc() const {
223
+ return Vectorized<float>(Sleef_erfcf16_u15(values));
224
+ }
225
+ Vectorized<float> erfinv() const {
226
+ return map(calc_erfinv);
227
+ }
228
+ Vectorized<float> exp() const {
229
+ return Vectorized<float>(Sleef_expf16_u10(values));
230
+ }
231
+ Vectorized<float> exp2() const {
232
+ return Vectorized<float>(Sleef_exp2f16_u10(values));
233
+ }
234
+ Vectorized<float> expm1() const {
235
+ return Vectorized<float>(Sleef_expm1f16_u10(values));
236
+ }
237
+ Vectorized<float> fmod(const Vectorized<float>& q) const {
238
+ return Vectorized<float>(Sleef_fmodf16(values, q));
239
+ }
240
+ Vectorized<float> log() const {
241
+ return Vectorized<float>(Sleef_logf16_u10(values));
242
+ }
243
+ Vectorized<float> log2() const {
244
+ return Vectorized<float>(Sleef_log2f16_u10(values));
245
+ }
246
+ Vectorized<float> log10() const {
247
+ return Vectorized<float>(Sleef_log10f16_u10(values));
248
+ }
249
+ Vectorized<float> log1p() const {
250
+ return Vectorized<float>(Sleef_log1pf16_u10(values));
251
+ }
252
+ Vectorized<float> frac() const;
253
+ Vectorized<float> sin() const {
254
+ return Vectorized<float>(Sleef_sinf16_u35(values));
255
+ }
256
+ Vectorized<float> sinh() const {
257
+ return Vectorized<float>(Sleef_sinhf16_u10(values));
258
+ }
259
+ Vectorized<float> cos() const {
260
+ return Vectorized<float>(Sleef_cosf16_u35(values));
261
+ }
262
+ Vectorized<float> cosh() const {
263
+ return Vectorized<float>(Sleef_coshf16_u10(values));
264
+ }
265
+ Vectorized<float> ceil() const {
266
+ return _mm512_ceil_ps(values);
267
+ }
268
+ Vectorized<float> floor() const {
269
+ return _mm512_floor_ps(values);
270
+ }
271
+ Vectorized<float> hypot(const Vectorized<float> &b) const {
272
+ return Vectorized<float>(Sleef_hypotf16_u05(values, b));
273
+ }
274
+ Vectorized<float> i0() const {
275
+ return map(calc_i0);
276
+ }
277
+ Vectorized<float> i0e() const {
278
+ return map(calc_i0e);
279
+ }
280
+ Vectorized<float> digamma() const {
281
+ return map(calc_digamma);
282
+ }
283
+ Vectorized<float> igamma(const Vectorized<float> &x) const {
284
+ __at_align__ float tmp[size()];
285
+ __at_align__ float tmp_x[size()];
286
+ store(tmp);
287
+ x.store(tmp_x);
288
+ for (const auto i : c10::irange(size())) {
289
+ tmp[i] = calc_igamma(tmp[i], tmp_x[i]);
290
+ }
291
+ return loadu(tmp);
292
+ }
293
+ Vectorized<float> igammac(const Vectorized<float> &x) const {
294
+ __at_align__ float tmp[size()];
295
+ __at_align__ float tmp_x[size()];
296
+ store(tmp);
297
+ x.store(tmp_x);
298
+ for (const auto i : c10::irange(size())) {
299
+ tmp[i] = calc_igammac(tmp[i], tmp_x[i]);
300
+ }
301
+ return loadu(tmp);
302
+ }
303
+ Vectorized<float> neg() const {
304
+ return _mm512_xor_ps(_mm512_set1_ps(-0.f), values);
305
+ }
306
+ Vectorized<float> nextafter(const Vectorized<float> &b) const {
307
+ return Vectorized<float>(Sleef_nextafterf16(values, b));
308
+ }
309
+ Vectorized<float> round() const {
310
+ return _mm512_roundscale_ps(values, (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC));
311
+ }
312
+ Vectorized<float> tan() const {
313
+ return Vectorized<float>(Sleef_tanf16_u10(values));
314
+ }
315
+ Vectorized<float> tanh() const {
316
+ return Vectorized<float>(Sleef_tanhf16_u10(values));
317
+ }
318
+ Vectorized<float> trunc() const {
319
+ return _mm512_roundscale_ps(values, (_MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC));
320
+ }
321
+ Vectorized<float> lgamma() const {
322
+ return Vectorized<float>(Sleef_lgammaf16_u10(values));
323
+ }
324
+ Vectorized<float> sqrt() const {
325
+ return _mm512_sqrt_ps(values);
326
+ }
327
+ Vectorized<float> reciprocal() const {
328
+ return _mm512_div_ps(_mm512_set1_ps(1), values);
329
+ }
330
+ Vectorized<float> rsqrt() const {
331
+ return _mm512_div_ps(_mm512_set1_ps(1), _mm512_sqrt_ps(values));
332
+ }
333
+ Vectorized<float> pow(const Vectorized<float> &b) const {
334
+ return Vectorized<float>(Sleef_powf16_u10(values, b));
335
+ }
336
+ // Comparison using the _CMP_**_OQ predicate.
337
+ // `O`: get false if an operand is NaN
338
+ // `Q`: do not raise if an operand is NaN
339
+ Vectorized<float> operator==(const Vectorized<float>& other) const {
340
+ auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_EQ_OQ);
341
+ return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask,
342
+ 0xFFFFFFFF));
343
+ }
344
+
345
+ Vectorized<float> operator!=(const Vectorized<float>& other) const {
346
+ auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_NEQ_UQ);
347
+ return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask,
348
+ 0xFFFFFFFF));
349
+ }
350
+
351
+ Vectorized<float> operator<(const Vectorized<float>& other) const {
352
+ auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_LT_OQ);
353
+ return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask,
354
+ 0xFFFFFFFF));
355
+ }
356
+
357
+ Vectorized<float> operator<=(const Vectorized<float>& other) const {
358
+ auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_LE_OQ);
359
+ return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask,
360
+ 0xFFFFFFFF));
361
+ }
362
+
363
+ Vectorized<float> operator>(const Vectorized<float>& other) const {
364
+ auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_GT_OQ);
365
+ return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask,
366
+ 0xFFFFFFFF));
367
+ }
368
+
369
+ Vectorized<float> operator>=(const Vectorized<float>& other) const {
370
+ auto mask = _mm512_cmp_ps_mask(values, other.values, _CMP_GE_OQ);
371
+ return _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, mask,
372
+ 0xFFFFFFFF));
373
+ }
374
+
375
+ Vectorized<float> eq(const Vectorized<float>& other) const;
376
+ Vectorized<float> ne(const Vectorized<float>& other) const;
377
+ Vectorized<float> gt(const Vectorized<float>& other) const;
378
+ Vectorized<float> ge(const Vectorized<float>& other) const;
379
+ Vectorized<float> lt(const Vectorized<float>& other) const;
380
+ Vectorized<float> le(const Vectorized<float>& other) const;
381
+ };
382
+
383
+ template <>
384
+ Vectorized<float> inline operator+(const Vectorized<float>& a, const Vectorized<float>& b) {
385
+ return _mm512_add_ps(a, b);
386
+ }
387
+
388
+ template <>
389
+ Vectorized<float> inline operator-(const Vectorized<float>& a, const Vectorized<float>& b) {
390
+ return _mm512_sub_ps(a, b);
391
+ }
392
+
393
+ template <>
394
+ Vectorized<float> inline operator*(const Vectorized<float>& a, const Vectorized<float>& b) {
395
+ return _mm512_mul_ps(a, b);
396
+ }
397
+
398
+ template <>
399
+ Vectorized<float> inline operator/(const Vectorized<float>& a, const Vectorized<float>& b) {
400
+ return _mm512_div_ps(a, b);
401
+ }
402
+
403
+ // frac. Implement this here so we can use subtraction
404
+ inline Vectorized<float> Vectorized<float>::frac() const {
405
+ return *this - this->trunc();
406
+ }
407
+
408
+ // Implements the IEEE 754 201X `maximum` operation, which propagates NaN if
409
+ // either input is a NaN.
410
+ template <>
411
+ Vectorized<float> inline maximum(const Vectorized<float>& a, const Vectorized<float>& b) {
412
+ auto zero_vec = _mm512_set1_epi32(0);
413
+ auto max = _mm512_max_ps(a, b);
414
+ auto isnan_mask = _mm512_cmp_ps_mask(a, b, _CMP_UNORD_Q);
415
+ auto isnan = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, isnan_mask,
416
+ 0xFFFFFFFF));
417
+ // Exploit the fact that all-ones is a NaN.
418
+ return _mm512_or_ps(max, isnan);
419
+ }
420
+
421
+ // Implements the IEEE 754 201X `minimum` operation, which propagates NaN if
422
+ // either input is a NaN.
423
+ template <>
424
+ Vectorized<float> inline minimum(const Vectorized<float>& a, const Vectorized<float>& b) {
425
+ auto zero_vec = _mm512_set1_epi32(0);
426
+ auto min = _mm512_min_ps(a, b);
427
+ auto isnan_mask = _mm512_cmp_ps_mask(a, b, _CMP_UNORD_Q);
428
+ auto isnan = _mm512_castsi512_ps(_mm512_mask_set1_epi32(zero_vec, isnan_mask,
429
+ 0xFFFFFFFF));
430
+ // Exploit the fact that all-ones is a NaN.
431
+ return _mm512_or_ps(min, isnan);
432
+ }
433
+
434
+ template <>
435
+ Vectorized<float> inline clamp(const Vectorized<float>& a, const Vectorized<float>& min, const Vectorized<float>& max) {
436
+ return _mm512_min_ps(max, _mm512_max_ps(min, a));
437
+ }
438
+
439
+ template <>
440
+ Vectorized<float> inline clamp_max(const Vectorized<float>& a, const Vectorized<float>& max) {
441
+ return _mm512_min_ps(max, a);
442
+ }
443
+
444
+ template <>
445
+ Vectorized<float> inline clamp_min(const Vectorized<float>& a, const Vectorized<float>& min) {
446
+ return _mm512_max_ps(min, a);
447
+ }
448
+
449
+ template <>
450
+ Vectorized<float> inline operator&(const Vectorized<float>& a, const Vectorized<float>& b) {
451
+ return _mm512_and_ps(a, b);
452
+ }
453
+
454
+ template <>
455
+ Vectorized<float> inline operator|(const Vectorized<float>& a, const Vectorized<float>& b) {
456
+ return _mm512_or_ps(a, b);
457
+ }
458
+
459
+ template <>
460
+ Vectorized<float> inline operator^(const Vectorized<float>& a, const Vectorized<float>& b) {
461
+ return _mm512_xor_ps(a, b);
462
+ }
463
+
464
+ inline Vectorized<float> Vectorized<float>::eq(const Vectorized<float>& other) const {
465
+ return (*this == other) & Vectorized<float>(1.0f);
466
+ }
467
+
468
+ inline Vectorized<float> Vectorized<float>::ne(const Vectorized<float>& other) const {
469
+ return (*this != other) & Vectorized<float>(1.0f);
470
+ }
471
+
472
+ inline Vectorized<float> Vectorized<float>::gt(const Vectorized<float>& other) const {
473
+ return (*this > other) & Vectorized<float>(1.0f);
474
+ }
475
+
476
+ inline Vectorized<float> Vectorized<float>::ge(const Vectorized<float>& other) const {
477
+ return (*this >= other) & Vectorized<float>(1.0f);
478
+ }
479
+
480
+ inline Vectorized<float> Vectorized<float>::lt(const Vectorized<float>& other) const {
481
+ return (*this < other) & Vectorized<float>(1.0f);
482
+ }
483
+
484
+ inline Vectorized<float> Vectorized<float>::le(const Vectorized<float>& other) const {
485
+ return (*this <= other) & Vectorized<float>(1.0f);
486
+ }
487
+
488
+ template <>
489
+ inline void convert(const float* src, float* dst, int64_t n) {
490
+ int64_t i;
491
+ #pragma unroll
492
+ for (i = 0; i <= (n - Vectorized<float>::size()); i += Vectorized<float>::size()) {
493
+ _mm512_storeu_ps(dst + i, _mm512_loadu_ps(src + i));
494
+ }
495
+ #pragma unroll
496
+ for (; i < n; i++) {
497
+ dst[i] = src[i];
498
+ }
499
+ }
500
+
501
+ template <>
502
+ Vectorized<float> inline fmadd(const Vectorized<float>& a, const Vectorized<float>& b, const Vectorized<float>& c) {
503
+ return _mm512_fmadd_ps(a, b, c);
504
+ }
505
+
506
+ template <>
507
+ Vectorized<float> inline fmsub(const Vectorized<float>& a, const Vectorized<float>& b, const Vectorized<float>& c) {
508
+ return _mm512_fmsub_ps(a, b, c);
509
+ }
510
+
511
+ // TODO(jgong5): rewrite with ATEN vectorized (need to add unpack and shuffle)
512
+ // Used by Inductor CPP codegen
513
+ // Code referred to FBGEMM:
514
+ // https://github.com/pytorch/FBGEMM/blob/39a423e4ad1a04b77fea81c7d09c3e6f8984fae9/src/UtilsAvx512.cc#LL19C6-L19C6
515
+ // 16 * 6 = 96 instructions
516
+ template<>
517
+ inline void transpose_mxn<float, 16, 16>(
518
+ const float* src,
519
+ int64_t ld_src,
520
+ float* dst,
521
+ int64_t ld_dst) {
522
+ // load from src to registers
523
+ // a: a0 a1 a2 a3 a4 a5 a6 a7 a8 a9 a10 a11 a12 a13 a14 a15
524
+ // b: b0 b1 b2 b3 b4 b5 b6 b7 b8 b9 b10 b11 b12 b13 b14 b15
525
+ // c: c0 c1 c2 c3 c4 c5 c6 c7 c8 c9 c10 c11 c12 c13 c14 c15
526
+ // d: d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15
527
+ // e: e0 e1 e2 e3 e4 e5 e6 e7 e8 e9 e10 e11 e12 e13 e14 e15
528
+ // f: f0 f1 f2 f3 f4 f5 f6 f7 f8 f9 f10 f11 f12 f13 f14 f15
529
+ // g: g0 g1 g2 g3 g4 g5 g6 g7 g8 g9 g10 g11 g12 g13 g14 g15
530
+ // h: h0 h1 h2 h3 h4 h5 h6 h7 h8 h9 h10 h11 h12 h13 h14 h15
531
+ // i: i0 i1 i2 i3 i4 i5 i6 i7 i8 i9 i10 i11 i12 i13 i14 i15
532
+ // j: j0 j1 j2 j3 j4 j5 j6 j7 j8 j9 j10 j11 j12 j13 j14 j15
533
+ // k: k0 k1 k2 k3 k4 k5 k6 k7 k8 k9 k10 k11 k12 k13 k14 k15
534
+ // l: l0 l1 l2 l3 l4 l5 l6 l7 l8 l9 l10 l11 l12 l13 l14 l15
535
+ // m: m0 m1 m2 m3 m4 m5 m6 m7 m8 m9 m10 m11 m12 m13 m14 m15
536
+ // n: n0 n1 n2 n3 n4 n5 n6 n7 n8 n9 n10 n11 n12 n13 n14 n15
537
+ // o: o0 o1 o2 o3 o4 o5 o6 o7 o8 o9 o10 o11 o12 o13 o14 o15
538
+ // p: p0 p1 p2 p3 p4 p5 p6 p7 p8 p9 p10 p11 p12 p13 p14 p15
539
+ __m512 a = _mm512_loadu_ps(&src[0 * ld_src]);
540
+ __m512 b = _mm512_loadu_ps(&src[1 * ld_src]);
541
+ __m512 c = _mm512_loadu_ps(&src[2 * ld_src]);
542
+ __m512 d = _mm512_loadu_ps(&src[3 * ld_src]);
543
+ __m512 e = _mm512_loadu_ps(&src[4 * ld_src]);
544
+ __m512 f = _mm512_loadu_ps(&src[5 * ld_src]);
545
+ __m512 g = _mm512_loadu_ps(&src[6 * ld_src]);
546
+ __m512 h = _mm512_loadu_ps(&src[7 * ld_src]);
547
+ __m512 i = _mm512_loadu_ps(&src[8 * ld_src]);
548
+ __m512 j = _mm512_loadu_ps(&src[9 * ld_src]);
549
+ __m512 k = _mm512_loadu_ps(&src[10 * ld_src]);
550
+ __m512 l = _mm512_loadu_ps(&src[11 * ld_src]);
551
+ __m512 m = _mm512_loadu_ps(&src[12 * ld_src]);
552
+ __m512 n = _mm512_loadu_ps(&src[13 * ld_src]);
553
+ __m512 o = _mm512_loadu_ps(&src[14 * ld_src]);
554
+ __m512 p = _mm512_loadu_ps(&src[15 * ld_src]);
555
+
556
+ __m512 ta, tb, tc, td, te, tf, tg, th, ti, tj, tk, tl, tm, tn, to, tq;
557
+ // unpacking and interleaving 32-bit elements
558
+ // a0 b0 a1 b1 a4 b4 a5 b5 a8 b8 a9 b9 a12 b12 a13 b13
559
+ // a2 b2 a3 b3 a6 b6 a7 b7 a10 b10 a11 b11 a14 b14 a15 b15
560
+ // c0 d0 c1 d1 ...
561
+ // c2 d2 c3 d3 ...
562
+ // e0 f0 e1 f1 ...
563
+ // e2 f2 e3 f3 ...
564
+ // g0 h0 g1 h1 ...
565
+ // g2 h2 g3 h3 ...
566
+ // i0 ...
567
+ // i2 ...
568
+ // k0 ...
569
+ // k2 ...
570
+ // m0 ...
571
+ // m2 ...
572
+ // o0 ...
573
+ // o1 ...
574
+ ta = _mm512_unpacklo_ps(a, b);
575
+ tb = _mm512_unpackhi_ps(a, b);
576
+ tc = _mm512_unpacklo_ps(c, d);
577
+ td = _mm512_unpackhi_ps(c, d);
578
+ te = _mm512_unpacklo_ps(e, f);
579
+ tf = _mm512_unpackhi_ps(e, f);
580
+ tg = _mm512_unpacklo_ps(g, h);
581
+ th = _mm512_unpackhi_ps(g, h);
582
+ ti = _mm512_unpacklo_ps(i, j);
583
+ tj = _mm512_unpackhi_ps(i, j);
584
+ tk = _mm512_unpacklo_ps(k, l);
585
+ tl = _mm512_unpackhi_ps(k, l);
586
+ tm = _mm512_unpacklo_ps(m, n);
587
+ tn = _mm512_unpackhi_ps(m, n);
588
+ to = _mm512_unpacklo_ps(o, p);
589
+ tq = _mm512_unpackhi_ps(o, p);
590
+
591
+ // unpacking and interleaving 64-bit elements
592
+ // a0 b0 c0 d0 a4 b4 c4 d4 a8 b8 c8 d8 a12 b12 c12 d12
593
+ // a1 b1 c1 d1 ...
594
+ // a2 b2 c2 d2 ...
595
+ // a3 b3 c3 d3 ...
596
+ // e0 f0 g0 h0 e4 f4 g4 h4 e8 f8 g8 h8 e12 f12 g12 h12
597
+ // e1 f1 g1 h1 ...
598
+ // e2 f2 g2 h2 ...
599
+ // e3 f3 g3 h3 ...
600
+ // i0 j0 k0 l0 ...
601
+ // i1 j1 k1 l1 ...
602
+ // i2 j2 k2 l2 ...
603
+ // i3 j3 k3 l3 ...
604
+ // m0 n0 o0 p0 ...
605
+ // m1 n1 o1 p1 ...
606
+ // m2 n2 o2 p2 ...
607
+ // m3 n3 o3 p3 ...
608
+ a = _mm512_castpd_ps(
609
+ _mm512_unpacklo_pd(_mm512_castps_pd(ta), _mm512_castps_pd(tc)));
610
+ b = _mm512_castpd_ps(
611
+ _mm512_unpackhi_pd(_mm512_castps_pd(ta), _mm512_castps_pd(tc)));
612
+ c = _mm512_castpd_ps(
613
+ _mm512_unpacklo_pd(_mm512_castps_pd(tb), _mm512_castps_pd(td)));
614
+ d = _mm512_castpd_ps(
615
+ _mm512_unpackhi_pd(_mm512_castps_pd(tb), _mm512_castps_pd(td)));
616
+ e = _mm512_castpd_ps(
617
+ _mm512_unpacklo_pd(_mm512_castps_pd(te), _mm512_castps_pd(tg)));
618
+ f = _mm512_castpd_ps(
619
+ _mm512_unpackhi_pd(_mm512_castps_pd(te), _mm512_castps_pd(tg)));
620
+ g = _mm512_castpd_ps(
621
+ _mm512_unpacklo_pd(_mm512_castps_pd(tf), _mm512_castps_pd(th)));
622
+ h = _mm512_castpd_ps(
623
+ _mm512_unpackhi_pd(_mm512_castps_pd(tf), _mm512_castps_pd(th)));
624
+ i = _mm512_castpd_ps(
625
+ _mm512_unpacklo_pd(_mm512_castps_pd(ti), _mm512_castps_pd(tk)));
626
+ j = _mm512_castpd_ps(
627
+ _mm512_unpackhi_pd(_mm512_castps_pd(ti), _mm512_castps_pd(tk)));
628
+ k = _mm512_castpd_ps(
629
+ _mm512_unpacklo_pd(_mm512_castps_pd(tj), _mm512_castps_pd(tl)));
630
+ l = _mm512_castpd_ps(
631
+ _mm512_unpackhi_pd(_mm512_castps_pd(tj), _mm512_castps_pd(tl)));
632
+ m = _mm512_castpd_ps(
633
+ _mm512_unpacklo_pd(_mm512_castps_pd(tm), _mm512_castps_pd(to)));
634
+ n = _mm512_castpd_ps(
635
+ _mm512_unpackhi_pd(_mm512_castps_pd(tm), _mm512_castps_pd(to)));
636
+ o = _mm512_castpd_ps(
637
+ _mm512_unpacklo_pd(_mm512_castps_pd(tn), _mm512_castps_pd(tq)));
638
+ p = _mm512_castpd_ps(
639
+ _mm512_unpackhi_pd(_mm512_castps_pd(tn), _mm512_castps_pd(tq)));
640
+
641
+ // shuffle 128-bits (composed of 4 32-bit elements)
642
+ // a0 b0 c0 d0 a8 b8 c8 d8 e0 f0 g0 h0 e8 f8 g8 h8
643
+ // a1 b1 c1 d1 ...
644
+ // a2 b2 c2 d2 ...
645
+ // a3 b3 c3 d3 ...
646
+ // a4 b4 c4 d4 ...
647
+ // a5 b5 c5 d5 ...
648
+ // a6 b6 c6 d6 ...
649
+ // a7 b7 c7 d7 ...
650
+ // i0 j0 k0 l0 i8 j8 k8 l8 m0 n0 o0 p0 m8 n8 o8 p8
651
+ // i1 j1 k1 l1 ...
652
+ // i2 j2 k2 l2 ...
653
+ // i3 j3 k3 l3 ...
654
+ // i4 j4 k4 l4 ...
655
+ // i5 j5 k5 l5 ...
656
+ // i6 j6 k6 l6 ...
657
+ // i7 j7 k7 l7 ...
658
+ ta = _mm512_shuffle_f32x4(a, e, 0x88);
659
+ tb = _mm512_shuffle_f32x4(b, f, 0x88);
660
+ tc = _mm512_shuffle_f32x4(c, g, 0x88);
661
+ td = _mm512_shuffle_f32x4(d, h, 0x88);
662
+ te = _mm512_shuffle_f32x4(a, e, 0xdd);
663
+ tf = _mm512_shuffle_f32x4(b, f, 0xdd);
664
+ tg = _mm512_shuffle_f32x4(c, g, 0xdd);
665
+ th = _mm512_shuffle_f32x4(d, h, 0xdd);
666
+ ti = _mm512_shuffle_f32x4(i, m, 0x88);
667
+ tj = _mm512_shuffle_f32x4(j, n, 0x88);
668
+ tk = _mm512_shuffle_f32x4(k, o, 0x88);
669
+ tl = _mm512_shuffle_f32x4(l, p, 0x88);
670
+ tm = _mm512_shuffle_f32x4(i, m, 0xdd);
671
+ tn = _mm512_shuffle_f32x4(j, n, 0xdd);
672
+ to = _mm512_shuffle_f32x4(k, o, 0xdd);
673
+ tq = _mm512_shuffle_f32x4(l, p, 0xdd);
674
+
675
+ // shuffle 128-bits (composed of 4 32-bit elements)
676
+ // a0 b0 c0 d0 ... o0
677
+ // a1 b1 c1 d1 ... o1
678
+ // a2 b2 c2 d2 ... o2
679
+ // a3 b3 c3 d3 ... o3
680
+ // a4 ...
681
+ // a5 ...
682
+ // a6 ...
683
+ // a7 ...
684
+ // a8 ...
685
+ // a9 ...
686
+ // a10 ...
687
+ // a11 ...
688
+ // a12 ...
689
+ // a13 ...
690
+ // a14 ...
691
+ // a15 b15 c15 d15 ... o15
692
+ a = _mm512_shuffle_f32x4(ta, ti, 0x88);
693
+ b = _mm512_shuffle_f32x4(tb, tj, 0x88);
694
+ c = _mm512_shuffle_f32x4(tc, tk, 0x88);
695
+ d = _mm512_shuffle_f32x4(td, tl, 0x88);
696
+ e = _mm512_shuffle_f32x4(te, tm, 0x88);
697
+ f = _mm512_shuffle_f32x4(tf, tn, 0x88);
698
+ g = _mm512_shuffle_f32x4(tg, to, 0x88);
699
+ h = _mm512_shuffle_f32x4(th, tq, 0x88);
700
+ i = _mm512_shuffle_f32x4(ta, ti, 0xdd);
701
+ j = _mm512_shuffle_f32x4(tb, tj, 0xdd);
702
+ k = _mm512_shuffle_f32x4(tc, tk, 0xdd);
703
+ l = _mm512_shuffle_f32x4(td, tl, 0xdd);
704
+ m = _mm512_shuffle_f32x4(te, tm, 0xdd);
705
+ n = _mm512_shuffle_f32x4(tf, tn, 0xdd);
706
+ o = _mm512_shuffle_f32x4(tg, to, 0xdd);
707
+ p = _mm512_shuffle_f32x4(th, tq, 0xdd);
708
+
709
+ // store from registers to dst
710
+ _mm512_storeu_ps(&dst[0 * ld_dst], a);
711
+ _mm512_storeu_ps(&dst[1 * ld_dst], b);
712
+ _mm512_storeu_ps(&dst[2 * ld_dst], c);
713
+ _mm512_storeu_ps(&dst[3 * ld_dst], d);
714
+ _mm512_storeu_ps(&dst[4 * ld_dst], e);
715
+ _mm512_storeu_ps(&dst[5 * ld_dst], f);
716
+ _mm512_storeu_ps(&dst[6 * ld_dst], g);
717
+ _mm512_storeu_ps(&dst[7 * ld_dst], h);
718
+ _mm512_storeu_ps(&dst[8 * ld_dst], i);
719
+ _mm512_storeu_ps(&dst[9 * ld_dst], j);
720
+ _mm512_storeu_ps(&dst[10 * ld_dst], k);
721
+ _mm512_storeu_ps(&dst[11 * ld_dst], l);
722
+ _mm512_storeu_ps(&dst[12 * ld_dst], m);
723
+ _mm512_storeu_ps(&dst[13 * ld_dst], n);
724
+ _mm512_storeu_ps(&dst[14 * ld_dst], o);
725
+ _mm512_storeu_ps(&dst[15 * ld_dst], p);
726
+ }
727
+
728
+ #endif
729
+
730
+ }}}
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_int.h ADDED
@@ -0,0 +1,1448 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/intrinsics.h>
7
+ #include <ATen/cpu/vec/vec_base.h>
8
+ #include <c10/macros/Macros.h>
9
+ #include <c10/util/irange.h>
10
+
11
+ namespace at {
12
+ namespace vec {
13
+ inline namespace CPU_CAPABILITY {
14
+
15
+ #ifdef CPU_CAPABILITY_AVX512
16
+
17
+ struct Vectorizedi {
18
+ protected:
19
+ __m512i values;
20
+ static constexpr __m512i zero_vector {0, 0, 0, 0, 0, 0, 0, 0};
21
+ static inline __m512i invert(const __m512i& v) {
22
+ const auto ones = _mm512_set1_epi64(-1);
23
+ return _mm512_xor_si512(ones, v);
24
+ }
25
+ public:
26
+ Vectorizedi() {}
27
+ Vectorizedi(__m512i v) : values(v) {}
28
+ operator __m512i() const {
29
+ return values;
30
+ }
31
+ };
32
+
33
+ #else
34
+
35
+ struct Vectorizedi {}; // dummy definition to make Vectorizedi always defined
36
+
37
+ #endif // CPU_CAPABILITY_AVX512
38
+
39
+ #ifdef CPU_CAPABILITY_AVX512
40
+
41
+ template <>
42
+ class Vectorized<int64_t> : public Vectorizedi {
43
+ private:
44
+ static const Vectorized<int64_t> ones;
45
+ public:
46
+ using value_type = int64_t;
47
+ using size_type = int;
48
+ static constexpr size_type size() {
49
+ return 8;
50
+ }
51
+ using Vectorizedi::Vectorizedi;
52
+ Vectorized() {}
53
+ Vectorized(int64_t v) { values = _mm512_set1_epi64(v); }
54
+ Vectorized(int64_t val1, int64_t val2, int64_t val3, int64_t val4,
55
+ int64_t val5, int64_t val6, int64_t val7, int64_t val8) {
56
+ values = _mm512_setr_epi64(val1, val2, val3, val4,
57
+ val5, val6, val7, val8);
58
+ }
59
+ template <int64_t mask>
60
+ static Vectorized<int64_t> blend(Vectorized<int64_t> a, Vectorized<int64_t> b) {
61
+ return _mm512_mask_blend_epi64(mask, a.values, b.values);
62
+ }
63
+ static Vectorized<int64_t> blendv(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b,
64
+ const Vectorized<int64_t>& mask) {
65
+ auto msb_one = _mm512_set1_epi64(0xFFFFFFFFFFFFFFFF);
66
+ auto mask_ = _mm512_cmp_epi64_mask(mask, msb_one, _MM_CMPINT_EQ);
67
+ return _mm512_mask_blend_epi64(mask_, a.values, b.values);
68
+ }
69
+ template <typename step_t>
70
+ static Vectorized<int64_t> arange(int64_t base = 0, step_t step = static_cast<step_t>(1)) {
71
+ return Vectorized<int64_t>(base, base + step, base + 2 * step, base + 3 * step,
72
+ base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step);
73
+ }
74
+ static Vectorized<int64_t>
75
+ set(Vectorized<int64_t> a, Vectorized<int64_t> b, int64_t count = size()) {
76
+ switch (count) {
77
+ case 0:
78
+ return a;
79
+ case 1:
80
+ return blend<1>(a, b);
81
+ case 2:
82
+ return blend<3>(a, b);
83
+ case 3:
84
+ return blend<7>(a, b);
85
+ case 4:
86
+ return blend<15>(a, b);
87
+ case 5:
88
+ return blend<31>(a, b);
89
+ case 6:
90
+ return blend<63>(a, b);
91
+ case 7:
92
+ return blend<127>(a, b);
93
+ }
94
+ return b;
95
+ }
96
+ static Vectorized<int64_t> loadu(const void* ptr) {
97
+ return _mm512_loadu_si512(reinterpret_cast<const __m512i*>(ptr));
98
+ }
99
+ static Vectorized<int64_t> loadu(const void* ptr, int64_t count) {
100
+ __at_align__ int64_t tmp_values[size()];
101
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
102
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
103
+ // instructions while a loop would be compiled to one instruction.
104
+ for (const auto i : c10::irange(size())) {
105
+ tmp_values[i] = 0;
106
+ }
107
+ std::memcpy(tmp_values, ptr, count * sizeof(int64_t));
108
+ return loadu(tmp_values);
109
+ }
110
+ void store(void* ptr, int count = size()) const {
111
+ if (count == size()) {
112
+ // ptr need not to be aligned here. See
113
+ // https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm512-storeu-si512.html
114
+ _mm512_storeu_si512(reinterpret_cast<__m512i*>(ptr), values);
115
+ } else if (count > 0) {
116
+ __at_align__ int64_t tmp_values[size()];
117
+ _mm512_storeu_si512(reinterpret_cast<__m512i*>(tmp_values), values);
118
+ std::memcpy(ptr, tmp_values, count * sizeof(int64_t));
119
+ }
120
+ }
121
+ const int64_t& operator[](int idx) const = delete;
122
+ int64_t& operator[](int idx) = delete;
123
+ Vectorized<int64_t> abs() const {
124
+ auto is_larger_mask = _mm512_cmpgt_epi64_mask(zero_vector, values);
125
+ auto is_larger = _mm512_mask_set1_epi64(zero_vector, is_larger_mask, 0xFFFFFFFFFFFFFFFF);
126
+ auto inverse = _mm512_xor_si512(values, is_larger);
127
+ return _mm512_sub_epi64(inverse, is_larger);
128
+ }
129
+ Vectorized<int64_t> real() const {
130
+ return *this;
131
+ }
132
+ Vectorized<int64_t> imag() const {
133
+ return _mm512_set1_epi64(0);
134
+ }
135
+ Vectorized<int64_t> conj() const {
136
+ return *this;
137
+ }
138
+ Vectorized<int64_t> neg() const;
139
+ Vectorized<int64_t> operator==(const Vectorized<int64_t>& other) const {
140
+ auto mask = _mm512_cmpeq_epi64_mask(values, other.values);
141
+ return _mm512_mask_set1_epi64(zero_vector, mask, 0xFFFFFFFFFFFFFFFF);
142
+ }
143
+ Vectorized<int64_t> operator!=(const Vectorized<int64_t>& other) const {
144
+ auto mask = _mm512_cmpneq_epi64_mask(values, other.values);
145
+ return _mm512_mask_set1_epi64(zero_vector, mask, 0xFFFFFFFFFFFFFFFF);
146
+ }
147
+ Vectorized<int64_t> operator<(const Vectorized<int64_t>& other) const {
148
+ auto mask = _mm512_cmplt_epi64_mask(values, other.values);
149
+ return _mm512_mask_set1_epi64(zero_vector, mask, 0xFFFFFFFFFFFFFFFF);
150
+ }
151
+ Vectorized<int64_t> operator<=(const Vectorized<int64_t>& other) const {
152
+ auto mask = _mm512_cmple_epi64_mask(values, other.values);
153
+ return _mm512_mask_set1_epi64(zero_vector, mask, 0xFFFFFFFFFFFFFFFF);
154
+ }
155
+ Vectorized<int64_t> operator>(const Vectorized<int64_t>& other) const {
156
+ auto mask = _mm512_cmpgt_epi64_mask(values, other.values);
157
+ return _mm512_mask_set1_epi64(zero_vector, mask, 0xFFFFFFFFFFFFFFFF);
158
+ }
159
+ Vectorized<int64_t> operator>=(const Vectorized<int64_t>& other) const {
160
+ auto mask = _mm512_cmpge_epi64_mask(values, other.values);
161
+ return _mm512_mask_set1_epi64(zero_vector, mask, 0xFFFFFFFFFFFFFFFF);
162
+ }
163
+
164
+ Vectorized<int64_t> eq(const Vectorized<int64_t>& other) const;
165
+ Vectorized<int64_t> ne(const Vectorized<int64_t>& other) const;
166
+ Vectorized<int64_t> gt(const Vectorized<int64_t>& other) const;
167
+ Vectorized<int64_t> ge(const Vectorized<int64_t>& other) const;
168
+ Vectorized<int64_t> lt(const Vectorized<int64_t>& other) const;
169
+ Vectorized<int64_t> le(const Vectorized<int64_t>& other) const;
170
+ };
171
+
172
+ template <>
173
+ class Vectorized<int32_t> : public Vectorizedi {
174
+ private:
175
+ static constexpr __m512i zero_vector {0, 0, 0, 0, 0, 0, 0, 0};
176
+ static const Vectorized<int32_t> ones;
177
+ public:
178
+ using value_type = int32_t;
179
+ static constexpr int size() {
180
+ return 16;
181
+ }
182
+ using Vectorizedi::Vectorizedi;
183
+ Vectorized() {}
184
+ Vectorized(int32_t v) { values = _mm512_set1_epi32(v); }
185
+ Vectorized(int32_t val1, int32_t val2, int32_t val3, int32_t val4,
186
+ int32_t val5, int32_t val6, int32_t val7, int32_t val8,
187
+ int32_t val9, int32_t val10, int32_t val11, int32_t val12,
188
+ int32_t val13, int32_t val14, int32_t val15, int32_t val16) {
189
+ values = _mm512_setr_epi32(val1, val2, val3, val4, val5, val6, val7, val8,
190
+ val9, val10, val11, val12, val13, val14, val15, val16);
191
+ }
192
+ template <int64_t mask>
193
+ static Vectorized<int32_t> blend(Vectorized<int32_t> a, Vectorized<int32_t> b) {
194
+ return _mm512_mask_blend_epi32(mask, a.values, b.values);
195
+ }
196
+ static Vectorized<int32_t> blendv(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b,
197
+ const Vectorized<int32_t>& mask) {
198
+ auto msb_one = _mm512_set1_epi32(0xFFFFFFFF);
199
+ auto mask_ = _mm512_cmp_epi32_mask(mask, msb_one, _MM_CMPINT_EQ);
200
+ return _mm512_mask_blend_epi32(mask_, a.values, b.values);
201
+ }
202
+ template <typename step_t>
203
+ static Vectorized<int32_t> arange(int32_t base = 0, step_t step = static_cast<step_t>(1)) {
204
+ return Vectorized<int32_t>(
205
+ base, base + step, base + 2 * step, base + 3 * step,
206
+ base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step,
207
+ base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step,
208
+ base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step);
209
+ }
210
+ static Vectorized<int32_t>
211
+ set(Vectorized<int32_t> a, Vectorized<int32_t> b, int32_t count = size()) {
212
+ switch (count) {
213
+ case 0:
214
+ return a;
215
+ case 1:
216
+ return blend<1>(a, b);
217
+ case 2:
218
+ return blend<3>(a, b);
219
+ case 3:
220
+ return blend<7>(a, b);
221
+ case 4:
222
+ return blend<15>(a, b);
223
+ case 5:
224
+ return blend<31>(a, b);
225
+ case 6:
226
+ return blend<63>(a, b);
227
+ case 7:
228
+ return blend<127>(a, b);
229
+ case 8:
230
+ return blend<255>(a, b);
231
+ case 9:
232
+ return blend<511>(a, b);
233
+ case 10:
234
+ return blend<1023>(a, b);
235
+ case 11:
236
+ return blend<2047>(a, b);
237
+ case 12:
238
+ return blend<4095>(a, b);
239
+ case 13:
240
+ return blend<8191>(a, b);
241
+ case 14:
242
+ return blend<16383>(a, b);
243
+ case 15:
244
+ return blend<32767>(a, b);
245
+ }
246
+ return b;
247
+ }
248
+ static Vectorized<int32_t> loadu(const void* ptr) {
249
+ return _mm512_loadu_si512(reinterpret_cast<const __m512i*>(ptr));
250
+ }
251
+ static Vectorized<int32_t> loadu(const void* ptr, int32_t count) {
252
+ __at_align__ int32_t tmp_values[size()];
253
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
254
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
255
+ // instructions while a loop would be compiled to one instruction.
256
+ for (const auto i : c10::irange(size())) {
257
+ tmp_values[i] = 0;
258
+ }
259
+ std::memcpy(tmp_values, ptr, count * sizeof(int32_t));
260
+ return loadu(tmp_values);
261
+ }
262
+ void store(void* ptr, int count = size()) const {
263
+ if (count == size()) {
264
+ // ptr need not to be aligned here. See
265
+ // https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm512-storeu-si512.html
266
+ _mm512_storeu_si512(reinterpret_cast<__m512i*>(ptr), values);
267
+ } else if (count > 0) {
268
+ __at_align__ int32_t tmp_values[size()];
269
+ _mm512_storeu_si512(reinterpret_cast<__m512i*>(tmp_values), values);
270
+ std::memcpy(ptr, tmp_values, count * sizeof(int32_t));
271
+ }
272
+ }
273
+ const int32_t& operator[](int idx) const = delete;
274
+ int32_t& operator[](int idx) = delete;
275
+ Vectorized<int32_t> abs() const {
276
+ return _mm512_abs_epi32(values);
277
+ }
278
+ Vectorized<int32_t> real() const {
279
+ return *this;
280
+ }
281
+ Vectorized<int32_t> imag() const {
282
+ return _mm512_set1_epi32(0);
283
+ }
284
+ Vectorized<int32_t> conj() const {
285
+ return *this;
286
+ }
287
+ Vectorized<int32_t> neg() const;
288
+ Vectorized<int32_t> operator==(const Vectorized<int32_t>& other) const {
289
+ auto mask = _mm512_cmpeq_epi32_mask(values, other.values);
290
+ return _mm512_mask_set1_epi32(zero_vector, mask, 0xFFFFFFFF);
291
+ }
292
+ Vectorized<int32_t> operator!=(const Vectorized<int32_t>& other) const {
293
+ auto mask = _mm512_cmpneq_epi32_mask(values, other.values);
294
+ return _mm512_mask_set1_epi32(zero_vector, mask, 0xFFFFFFFF);
295
+ }
296
+ Vectorized<int32_t> operator<(const Vectorized<int32_t>& other) const {
297
+ auto mask = _mm512_cmplt_epi32_mask(values, other.values);
298
+ return _mm512_mask_set1_epi32(zero_vector, mask, 0xFFFFFFFF);
299
+ }
300
+ Vectorized<int32_t> operator<=(const Vectorized<int32_t>& other) const {
301
+ auto mask = _mm512_cmple_epi32_mask(values, other.values);
302
+ return _mm512_mask_set1_epi32(zero_vector, mask, 0xFFFFFFFF);
303
+ }
304
+ Vectorized<int32_t> operator>(const Vectorized<int32_t>& other) const {
305
+ auto mask = _mm512_cmpgt_epi32_mask(values, other.values);
306
+ return _mm512_mask_set1_epi32(zero_vector, mask, 0xFFFFFFFF);
307
+ }
308
+ Vectorized<int32_t> operator>=(const Vectorized<int32_t>& other) const {
309
+ auto mask = _mm512_cmpge_epi32_mask(values, other.values);
310
+ return _mm512_mask_set1_epi32(zero_vector, mask, 0xFFFFFFFF);
311
+ }
312
+ Vectorized<int32_t> eq(const Vectorized<int32_t>& other) const;
313
+ Vectorized<int32_t> ne(const Vectorized<int32_t>& other) const;
314
+ Vectorized<int32_t> gt(const Vectorized<int32_t>& other) const;
315
+ Vectorized<int32_t> ge(const Vectorized<int32_t>& other) const;
316
+ Vectorized<int32_t> lt(const Vectorized<int32_t>& other) const;
317
+ Vectorized<int32_t> le(const Vectorized<int32_t>& other) const;
318
+ };
319
+
320
+ template <>
321
+ inline void convert(const int32_t *src, float *dst, int64_t n) {
322
+ int64_t i;
323
+ // int32_t and float have same size
324
+ #ifndef _MSC_VER
325
+ # pragma unroll
326
+ #endif
327
+ for (i = 0; i <= (n - Vectorized<int32_t>::size()); i += Vectorized<int32_t>::size()) {
328
+ auto input_vec = _mm512_loadu_si512(reinterpret_cast<const __m512i*>(src + i));
329
+ auto output_vec = _mm512_cvtepi32_ps(input_vec);
330
+ _mm512_storeu_ps(reinterpret_cast<float*>(dst + i), output_vec);
331
+ }
332
+ #ifndef _MSC_VER
333
+ # pragma unroll
334
+ #endif
335
+ for (; i < n; i++) {
336
+ dst[i] = static_cast<float>(src[i]);
337
+ }
338
+ }
339
+
340
+ template <>
341
+ inline void convert(const int32_t *src, double *dst, int64_t n) {
342
+ int64_t i;
343
+ // int32_t has half the size of double
344
+ #ifndef _MSC_VER
345
+ # pragma unroll
346
+ #endif
347
+ for (i = 0; i <= (n - Vectorized<double>::size()); i += Vectorized<double>::size()) {
348
+ auto input_256_vec = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(src + i));
349
+ auto output_vec = _mm512_cvtepi32_pd(input_256_vec);
350
+ _mm512_storeu_pd(reinterpret_cast<double*>(dst + i), output_vec);
351
+ }
352
+ #ifndef _MSC_VER
353
+ # pragma unroll
354
+ #endif
355
+ for (; i < n; i++) {
356
+ dst[i] = static_cast<double>(src[i]);
357
+ }
358
+ }
359
+
360
+ template <>
361
+ class Vectorized<int16_t> : public Vectorizedi {
362
+ private:
363
+ static const Vectorized<int16_t> ones;
364
+ static constexpr __m512i zero_vector {0, 0, 0, 0, 0, 0, 0, 0};
365
+ public:
366
+ using value_type = int16_t;
367
+ static constexpr int size() {
368
+ return 32;
369
+ }
370
+ using Vectorizedi::Vectorizedi;
371
+ Vectorized() {}
372
+ Vectorized(int16_t v) { values = _mm512_set1_epi16(v); }
373
+ Vectorized(int16_t val1, int16_t val2, int16_t val3, int16_t val4,
374
+ int16_t val5, int16_t val6, int16_t val7, int16_t val8,
375
+ int16_t val9, int16_t val10, int16_t val11, int16_t val12,
376
+ int16_t val13, int16_t val14, int16_t val15, int16_t val16,
377
+ int16_t val17, int16_t val18, int16_t val19, int16_t val20,
378
+ int16_t val21, int16_t val22, int16_t val23, int16_t val24,
379
+ int16_t val25, int16_t val26, int16_t val27, int16_t val28,
380
+ int16_t val29, int16_t val30, int16_t val31, int16_t val32) {
381
+ values = _mm512_set_epi16(val32, val31, val30, val29, val28, val27, val26, val25,
382
+ val24, val23, val22, val21, val20, val19, val18, val17,
383
+ val16, val15, val14, val13, val12, val11, val10, val9,
384
+ val8, val7, val6, val5, val4, val3, val2, val1);
385
+ }
386
+ template <int64_t mask>
387
+ static Vectorized<int16_t> blend(Vectorized<int16_t> a, Vectorized<int16_t> b) {
388
+ return _mm512_mask_blend_epi16(mask, a.values, b.values);
389
+ }
390
+ static Vectorized<int16_t> blendv(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b,
391
+ const Vectorized<int16_t>& mask) {
392
+ auto msb_one = _mm512_set1_epi16(0xFFFF);
393
+ auto mask_ = _mm512_cmp_epi16_mask(mask, msb_one, _MM_CMPINT_EQ);
394
+ return _mm512_mask_blend_epi16(mask_, a.values, b.values);
395
+ }
396
+ template <typename step_t>
397
+ static Vectorized<int16_t> arange(int16_t base = 0, step_t step = static_cast<step_t>(1)) {
398
+ return Vectorized<int16_t>(
399
+ base, base + step, base + 2 * step, base + 3 * step,
400
+ base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step,
401
+ base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step,
402
+ base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step,
403
+ base + 16 * step, base + 17 * step, base + 18 * step, base + 19 * step,
404
+ base + 20 * step, base + 21 * step, base + 22 * step, base + 23 * step,
405
+ base + 24 * step, base + 25 * step, base + 26 * step, base + 27 * step,
406
+ base + 28 * step, base + 29 * step, base + 30 * step, base + 31 * step
407
+ );
408
+ }
409
+ static Vectorized<int16_t>
410
+ set(Vectorized<int16_t> a, Vectorized<int16_t> b, int16_t count = size()) {
411
+ switch (count) {
412
+ case 0:
413
+ return a;
414
+ case 1:
415
+ return blend<0x1>(a, b);
416
+ case 2:
417
+ return blend<0x3>(a, b);
418
+ case 3:
419
+ return blend<0x7>(a, b);
420
+ case 4:
421
+ return blend<0xF>(a, b);
422
+ case 5:
423
+ return blend<0x1F>(a, b);
424
+ case 6:
425
+ return blend<0x3F>(a, b);
426
+ case 7:
427
+ return blend<0x7F>(a, b);
428
+ case 8:
429
+ return blend<0xFF>(a, b);
430
+ case 9:
431
+ return blend<0x1FF>(a, b);
432
+ case 10:
433
+ return blend<0x3FF>(a, b);
434
+ case 11:
435
+ return blend<0x7FF>(a, b);
436
+ case 12:
437
+ return blend<0xFFF>(a, b);
438
+ case 13:
439
+ return blend<0x1FFF>(a, b);
440
+ case 14:
441
+ return blend<0x3FFF>(a, b);
442
+ case 15:
443
+ return blend<0x7FFF>(a, b);
444
+ case 16:
445
+ return blend<0xFFFF>(a, b);
446
+ case 17:
447
+ return blend<0x1FFFF>(a, b);
448
+ case 18:
449
+ return blend<0x3FFFF>(a, b);
450
+ case 19:
451
+ return blend<0x7FFFF>(a, b);
452
+ case 20:
453
+ return blend<0xFFFFF>(a, b);
454
+ case 21:
455
+ return blend<0x1FFFFF>(a, b);
456
+ case 22:
457
+ return blend<0x3FFFFF>(a, b);
458
+ case 23:
459
+ return blend<0x7FFFFF>(a, b);
460
+ case 24:
461
+ return blend<0xFFFFFF>(a, b);
462
+ case 25:
463
+ return blend<0x1FFFFFF>(a, b);
464
+ case 26:
465
+ return blend<0x3FFFFFF>(a, b);
466
+ case 27:
467
+ return blend<0x7FFFFFF>(a, b);
468
+ case 28:
469
+ return blend<0xFFFFFFF>(a, b);
470
+ case 29:
471
+ return blend<0x1FFFFFFF>(a, b);
472
+ case 30:
473
+ return blend<0x3FFFFFFF>(a, b);
474
+ case 31:
475
+ return blend<0x7FFFFFFF>(a, b);
476
+ }
477
+ return b;
478
+ }
479
+ static Vectorized<int16_t> loadu(const void* ptr) {
480
+ return _mm512_loadu_si512(reinterpret_cast<const __m512i*>(ptr));
481
+ }
482
+ static Vectorized<int16_t> loadu(const void* ptr, int16_t count) {
483
+ __at_align__ int16_t tmp_values[size()];
484
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
485
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
486
+ // instructions while a loop would be compiled to one instruction.
487
+ for (const auto i : c10::irange(size())) {
488
+ tmp_values[i] = 0;
489
+ }
490
+ std::memcpy(tmp_values, ptr, count * sizeof(int16_t));
491
+ return loadu(tmp_values);
492
+ }
493
+ void store(void* ptr, int count = size()) const {
494
+ if (count == size()) {
495
+ // ptr need not to be aligned here. See
496
+ // https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm512-storeu-si512.html
497
+ _mm512_storeu_si512(reinterpret_cast<__m512i*>(ptr), values);
498
+ } else if (count > 0) {
499
+ __at_align__ int16_t tmp_values[size()];
500
+ _mm512_storeu_si512(reinterpret_cast<__m512i*>(tmp_values), values);
501
+ std::memcpy(ptr, tmp_values, count * sizeof(int16_t));
502
+ }
503
+ }
504
+ const int16_t& operator[](int idx) const = delete;
505
+ int16_t& operator[](int idx) = delete;
506
+ Vectorized<int16_t> abs() const {
507
+ return _mm512_abs_epi16(values);
508
+ }
509
+ Vectorized<int16_t> real() const {
510
+ return *this;
511
+ }
512
+ Vectorized<int16_t> imag() const {
513
+ return _mm512_set1_epi16(0);
514
+ }
515
+ Vectorized<int16_t> conj() const {
516
+ return *this;
517
+ }
518
+ Vectorized<int16_t> neg() const;
519
+ Vectorized<int16_t> operator==(const Vectorized<int16_t>& other) const {
520
+ auto mask = _mm512_cmpeq_epi16_mask(values, other.values);
521
+ return _mm512_mask_set1_epi16(zero_vector, mask, 0xFFFF);
522
+ }
523
+ Vectorized<int16_t> operator!=(const Vectorized<int16_t>& other) const {
524
+ auto mask = _mm512_cmpneq_epi16_mask(values, other.values);
525
+ return _mm512_mask_set1_epi16(zero_vector, mask, 0xFFFF);
526
+ }
527
+ Vectorized<int16_t> operator<(const Vectorized<int16_t>& other) const {
528
+ auto mask = _mm512_cmplt_epi16_mask(values, other.values);
529
+ return _mm512_mask_set1_epi16(zero_vector, mask, 0xFFFF);
530
+ }
531
+ Vectorized<int16_t> operator<=(const Vectorized<int16_t>& other) const {
532
+ auto mask = _mm512_cmple_epi16_mask(values, other.values);
533
+ return _mm512_mask_set1_epi16(zero_vector, mask, 0xFFFF);
534
+ }
535
+ Vectorized<int16_t> operator>(const Vectorized<int16_t>& other) const {
536
+ auto mask = _mm512_cmpgt_epi16_mask(values, other.values);
537
+ return _mm512_mask_set1_epi16(zero_vector, mask, 0xFFFF);
538
+ }
539
+ Vectorized<int16_t> operator>=(const Vectorized<int16_t>& other) const {
540
+ auto mask = _mm512_cmpge_epi16_mask(values, other.values);
541
+ return _mm512_mask_set1_epi16(zero_vector, mask, 0xFFFF);
542
+ }
543
+
544
+ Vectorized<int16_t> eq(const Vectorized<int16_t>& other) const;
545
+ Vectorized<int16_t> ne(const Vectorized<int16_t>& other) const;
546
+ Vectorized<int16_t> gt(const Vectorized<int16_t>& other) const;
547
+ Vectorized<int16_t> ge(const Vectorized<int16_t>& other) const;
548
+ Vectorized<int16_t> lt(const Vectorized<int16_t>& other) const;
549
+ Vectorized<int16_t> le(const Vectorized<int16_t>& other) const;
550
+ };
551
+
552
+ template <typename T>
553
+ class Vectorized8 : public Vectorizedi {
554
+ static_assert(
555
+ std::is_same<T, int8_t>::value || std::is_same<T, uint8_t>::value,
556
+ "Only int8_t/uint8_t are supported");
557
+ protected:
558
+ static constexpr __m512i zero_vector {0, 0, 0, 0, 0, 0, 0, 0};
559
+ static const Vectorized<T> ones;
560
+ public:
561
+ using value_type = T;
562
+ static constexpr int size() {
563
+ return 64;
564
+ }
565
+ using Vectorizedi::Vectorizedi;
566
+ Vectorized8() {}
567
+ Vectorized8(T v) { values = _mm512_set1_epi8(v); }
568
+ Vectorized8(T val1, T val2, T val3, T val4,
569
+ T val5, T val6, T val7, T val8,
570
+ T val9, T val10, T val11, T val12,
571
+ T val13, T val14, T val15, T val16,
572
+ T val17, T val18, T val19, T val20,
573
+ T val21, T val22, T val23, T val24,
574
+ T val25, T val26, T val27, T val28,
575
+ T val29, T val30, T val31, T val32,
576
+ T val33, T val34, T val35, T val36,
577
+ T val37, T val38, T val39, T val40,
578
+ T val41, T val42, T val43, T val44,
579
+ T val45, T val46, T val47, T val48,
580
+ T val49, T val50, T val51, T val52,
581
+ T val53, T val54, T val55, T val56,
582
+ T val57, T val58, T val59, T val60,
583
+ T val61, T val62, T val63, T val64){
584
+ values = _mm512_set_epi8(val64, val63, val62, val61, val60, val59, val58, val57,
585
+ val56, val55, val54, val53,val52, val51, val50, val49,
586
+ val48, val47, val46, val45, val44, val43, val42, val41,
587
+ val40, val39, val38, val37, val36, val35, val34, val33,
588
+ val32, val31, val30, val29, val28, val27, val26, val25,
589
+ val24, val23, val22, val21, val20, val19, val18, val17,
590
+ val16, val15, val14, val13, val12, val11, val10, val9,
591
+ val8, val7, val6, val5, val4, val3, val2, val1);
592
+ }
593
+ template <int64_t mask>
594
+ static Vectorized<T> blend(Vectorized<T> a, Vectorized<T> b) {
595
+ return _mm512_mask_blend_epi8(mask, a.values, b.values);
596
+ }
597
+ template <typename step_t>
598
+ static Vectorized<T> arange(T base = 0, step_t step = static_cast<step_t>(1)) {
599
+ return Vectorized<T>(
600
+ base, base + step, base + 2 * step, base + 3 * step,
601
+ base + 4 * step, base + 5 * step, base + 6 * step, base + 7 * step,
602
+ base + 8 * step, base + 9 * step, base + 10 * step, base + 11 * step,
603
+ base + 12 * step, base + 13 * step, base + 14 * step, base + 15 * step,
604
+ base + 16 * step, base + 17 * step, base + 18 * step, base + 19 * step,
605
+ base + 20 * step, base + 21 * step, base + 22 * step, base + 23 * step,
606
+ base + 24 * step, base + 25 * step, base + 26 * step, base + 27 * step,
607
+ base + 28 * step, base + 29 * step, base + 30 * step, base + 31 * step,
608
+ base + 32 * step, base + 33 * step, base + 34 * step, base + 35 * step,
609
+ base + 36 * step, base + 37 * step, base + 38 * step, base + 39 * step,
610
+ base + 40 * step, base + 41 * step, base + 42 * step, base + 43 * step,
611
+ base + 44 * step, base + 45 * step, base + 46 * step, base + 47 * step,
612
+ base + 48 * step, base + 49 * step, base + 50 * step, base + 51 * step,
613
+ base + 52 * step, base + 53 * step, base + 54 * step, base + 55 * step,
614
+ base + 56 * step, base + 57 * step, base + 58 * step, base + 59 * step,
615
+ base + 60 * step, base + 61 * step, base + 62 * step, base + 63 * step);
616
+ }
617
+ static Vectorized<T>
618
+ set(Vectorized<T> a, Vectorized<T> b, T count = size()) {
619
+ switch (count) {
620
+ case 0:
621
+ return a;
622
+ case 1:
623
+ return blend<0x1>(a, b);
624
+ case 2:
625
+ return blend<0x3>(a, b);
626
+ case 3:
627
+ return blend<0x7>(a, b);
628
+ case 4:
629
+ return blend<0xF>(a, b);
630
+ case 5:
631
+ return blend<0x1F>(a, b);
632
+ case 6:
633
+ return blend<0x3F>(a, b);
634
+ case 7:
635
+ return blend<0x7F>(a, b);
636
+ case 8:
637
+ return blend<0xFF>(a, b);
638
+ case 9:
639
+ return blend<0x1FF>(a, b);
640
+ case 10:
641
+ return blend<0x3FF>(a, b);
642
+ case 11:
643
+ return blend<0x7FF>(a, b);
644
+ case 12:
645
+ return blend<0xFFF>(a, b);
646
+ case 13:
647
+ return blend<0x1FFF>(a, b);
648
+ case 14:
649
+ return blend<0x3FFF>(a, b);
650
+ case 15:
651
+ return blend<0x7FFF>(a, b);
652
+ case 16:
653
+ return blend<0xFFFF>(a, b);
654
+ case 17:
655
+ return blend<0x1FFFF>(a, b);
656
+ case 18:
657
+ return blend<0x3FFFF>(a, b);
658
+ case 19:
659
+ return blend<0x7FFFF>(a, b);
660
+ case 20:
661
+ return blend<0xFFFFF>(a, b);
662
+ case 21:
663
+ return blend<0x1FFFFF>(a, b);
664
+ case 22:
665
+ return blend<0x3FFFFF>(a, b);
666
+ case 23:
667
+ return blend<0x7FFFFF>(a, b);
668
+ case 24:
669
+ return blend<0xFFFFFF>(a, b);
670
+ case 25:
671
+ return blend<0x1FFFFFF>(a, b);
672
+ case 26:
673
+ return blend<0x3FFFFFF>(a, b);
674
+ case 27:
675
+ return blend<0x7FFFFFF>(a, b);
676
+ case 28:
677
+ return blend<0xFFFFFFF>(a, b);
678
+ case 29:
679
+ return blend<0x1FFFFFFF>(a, b);
680
+ case 30:
681
+ return blend<0x3FFFFFFF>(a, b);
682
+ case 31:
683
+ return blend<0x7FFFFFFF>(a, b);
684
+ case 32:
685
+ return blend<0xFFFFFFFF>(a, b);
686
+ case 33:
687
+ return blend<0x1FFFFFFFF>(a, b);
688
+ case 34:
689
+ return blend<0x3FFFFFFFF>(a, b);
690
+ case 35:
691
+ return blend<0x7FFFFFFFF>(a, b);
692
+ case 36:
693
+ return blend<0xFFFFFFFFF>(a, b);
694
+ case 37:
695
+ return blend<0x1FFFFFFFFF>(a, b);
696
+ case 38:
697
+ return blend<0x3FFFFFFFFF>(a, b);
698
+ case 39:
699
+ return blend<0x7FFFFFFFFF>(a, b);
700
+ case 40:
701
+ return blend<0xFFFFFFFFFF>(a, b);
702
+ case 41:
703
+ return blend<0x1FFFFFFFFFF>(a, b);
704
+ case 42:
705
+ return blend<0x3FFFFFFFFFF>(a, b);
706
+ case 43:
707
+ return blend<0x7FFFFFFFFFF>(a, b);
708
+ case 44:
709
+ return blend<0xFFFFFFFFFFF>(a, b);
710
+ case 45:
711
+ return blend<0x1FFFFFFFFFFF>(a, b);
712
+ case 46:
713
+ return blend<0x3FFFFFFFFFFF>(a, b);
714
+ case 47:
715
+ return blend<0x7FFFFFFFFFFF>(a, b);
716
+ case 48:
717
+ return blend<0xFFFFFFFFFFFF>(a, b);
718
+ case 49:
719
+ return blend<0x1FFFFFFFFFFFF>(a, b);
720
+ case 50:
721
+ return blend<0x3FFFFFFFFFFFF>(a, b);
722
+ case 51:
723
+ return blend<0x7FFFFFFFFFFFF>(a, b);
724
+ case 52:
725
+ return blend<0xFFFFFFFFFFFFF>(a, b);
726
+ case 53:
727
+ return blend<0x1FFFFFFFFFFFFF>(a, b);
728
+ case 54:
729
+ return blend<0x3FFFFFFFFFFFFF>(a, b);
730
+ case 55:
731
+ return blend<0x7FFFFFFFFFFFFF>(a, b);
732
+ case 56:
733
+ return blend<0xFFFFFFFFFFFFFF>(a, b);
734
+ case 57:
735
+ return blend<0x1FFFFFFFFFFFFFF>(a, b);
736
+ case 58:
737
+ return blend<0x3FFFFFFFFFFFFFF>(a, b);
738
+ case 59:
739
+ return blend<0x7FFFFFFFFFFFFFF>(a, b);
740
+ case 60:
741
+ return blend<0xFFFFFFFFFFFFFFF>(a, b);
742
+ case 61:
743
+ return blend<0x1FFFFFFFFFFFFFFF>(a, b);
744
+ case 62:
745
+ return blend<0x3FFFFFFFFFFFFFFF>(a, b);
746
+ case 63:
747
+ return blend<0x7FFFFFFFFFFFFFFF>(a, b);
748
+ }
749
+ return b;
750
+ }
751
+ static Vectorized<T> loadu(const void* ptr) {
752
+ return _mm512_loadu_si512(reinterpret_cast<const __m512i*>(ptr));
753
+ }
754
+ static Vectorized<T> loadu_one_fourth(const void* ptr) {
755
+ // Fast path if only load element number of 16.
756
+ // Note: We didn't merge it as fast path of loadu(const void* ptr, T count),
757
+ // Because loadu(const void* ptr, T count) requires zero initialization for upper 384 bits.
758
+ // However, by using _mm512_castsi128_si512, the upper 384 bits of the result are undefined.
759
+ // TODO<leslie> We can use _mm512_zextsi128_si512 in the furture,
760
+ // since gcc 9.3 doesn't support it now.
761
+ __m128i input_128 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(ptr));
762
+ return _mm512_castsi128_si512(input_128);
763
+ }
764
+ static Vectorized<T> loadu(const void* ptr, T count) {
765
+ __at_align__ T tmp_values[size()];
766
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
767
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
768
+ // instructions while a loop would be compiled to one instruction.
769
+ for (const auto i : c10::irange(size())) {
770
+ tmp_values[i] = 0;
771
+ }
772
+ std::memcpy(tmp_values, ptr, count * sizeof(T));
773
+ return loadu(tmp_values);
774
+ }
775
+ void store(void* ptr, int count = size()) const {
776
+ if (count == size()) {
777
+ // ptr need not to be aligned here. See
778
+ // https://software.intel.com/content/www/us/en/develop/documentation/cpp-compiler-developer-guide-and-reference/top/compiler-reference/intrinsics/intrinsics-for-intel-advanced-vector-extensions/intrinsics-for-load-and-store-operations-1/mm512-storeu-si512.html
779
+ _mm512_storeu_si512(reinterpret_cast<__m512i*>(ptr), values);
780
+ } else if (count > 0) {
781
+ if (count == 16) {
782
+ // Fast path if only store element number of 16
783
+ _mm_storeu_si128(
784
+ reinterpret_cast<__m128i*>(ptr),
785
+ _mm512_castsi512_si128(values));
786
+ } else {
787
+ __at_align__ T tmp_values[size()];
788
+ _mm512_storeu_si512(reinterpret_cast<__m512i*>(tmp_values), values);
789
+ std::memcpy(ptr, tmp_values, count * sizeof(T));
790
+ }
791
+ }
792
+ }
793
+ const T& operator[](int idx) const = delete;
794
+ T& operator[](int idx) = delete;
795
+ Vectorized<T> real() const {
796
+ return *this;
797
+ }
798
+ Vectorized<T> imag() const {
799
+ return _mm512_set1_epi8(0);
800
+ }
801
+ Vectorized<T> conj() const {
802
+ return *this;
803
+ }
804
+ };
805
+
806
+ template<>
807
+ class Vectorized<int8_t>: public Vectorized8<int8_t> {
808
+ public:
809
+ using Vectorized8::Vectorized8;
810
+
811
+ static Vectorized<int8_t> blendv(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b,
812
+ const Vectorized<int8_t>& mask) {
813
+ auto msb_one = _mm512_set1_epi8(0xFF);
814
+ auto mask_ = _mm512_cmp_epi8_mask(mask, msb_one, _MM_CMPINT_EQ);
815
+ return _mm512_mask_blend_epi8(mask_, a.values, b.values);
816
+ }
817
+
818
+ Vectorized<int8_t> neg() const;
819
+
820
+ Vectorized<int8_t> abs() const {
821
+ return _mm512_abs_epi8(values);
822
+ }
823
+
824
+ Vectorized<int8_t> operator==(const Vectorized<int8_t>& other) const {
825
+ auto mask = _mm512_cmpeq_epi8_mask(values, other.values);
826
+ return _mm512_mask_set1_epi8(zero_vector, mask, 0xFF);
827
+ }
828
+ Vectorized<int8_t> operator!=(const Vectorized<int8_t>& other) const {
829
+ auto mask = _mm512_cmpneq_epi8_mask(values, other.values);
830
+ return _mm512_mask_set1_epi8(zero_vector, mask, 0xFF);
831
+ }
832
+ Vectorized<int8_t> operator<(const Vectorized<int8_t>& other) const {
833
+ auto mask = _mm512_cmplt_epi8_mask(values, other.values);
834
+ return _mm512_mask_set1_epi8(zero_vector, mask, 0xFF);
835
+ }
836
+ Vectorized<int8_t> operator<=(const Vectorized<int8_t>& other) const {
837
+ auto mask = _mm512_cmple_epi8_mask(values, other.values);
838
+ return _mm512_mask_set1_epi8(zero_vector, mask, 0xFF);
839
+ }
840
+ Vectorized<int8_t> operator>(const Vectorized<int8_t>& other) const {
841
+ return other < *this;
842
+ }
843
+ Vectorized<int8_t> operator>=(const Vectorized<int8_t>& other) const {
844
+ return other <= *this;
845
+ }
846
+
847
+ Vectorized<int8_t> eq(const Vectorized<int8_t>& other) const;
848
+ Vectorized<int8_t> ne(const Vectorized<int8_t>& other) const;
849
+ Vectorized<int8_t> gt(const Vectorized<int8_t>& other) const;
850
+ Vectorized<int8_t> ge(const Vectorized<int8_t>& other) const;
851
+ Vectorized<int8_t> lt(const Vectorized<int8_t>& other) const;
852
+ Vectorized<int8_t> le(const Vectorized<int8_t>& other) const;
853
+ };
854
+
855
+ template<>
856
+ class Vectorized<uint8_t>: public Vectorized8<uint8_t> {
857
+ public:
858
+ using Vectorized8::Vectorized8;
859
+
860
+ static Vectorized<uint8_t> blendv(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b,
861
+ const Vectorized<uint8_t>& mask) {
862
+ auto msb_one = _mm512_set1_epi8(0xFF);
863
+ auto mask_ = _mm512_cmp_epu8_mask(mask, msb_one, _MM_CMPINT_EQ);
864
+ return _mm512_mask_blend_epi8(mask_, a.values, b.values);
865
+ }
866
+
867
+ Vectorized<uint8_t> neg() const;
868
+
869
+ Vectorized<uint8_t> abs() const {
870
+ return *this;
871
+ }
872
+
873
+ Vectorized<uint8_t> operator==(const Vectorized<uint8_t>& other) const {
874
+ auto mask = _mm512_cmpeq_epu8_mask(values, other.values);
875
+ return _mm512_mask_set1_epi8(zero_vector, mask, 0xFF);
876
+ }
877
+ Vectorized<uint8_t> operator!=(const Vectorized<uint8_t>& other) const {
878
+ auto mask = _mm512_cmpneq_epu8_mask(values, other.values);
879
+ return _mm512_mask_set1_epi8(zero_vector, mask, 0xFF);
880
+ }
881
+ Vectorized<uint8_t> operator<(const Vectorized<uint8_t>& other) const {
882
+ auto mask = _mm512_cmplt_epu8_mask(values, other.values);
883
+ return _mm512_mask_set1_epi8(zero_vector, mask, 0xFF);
884
+ }
885
+ Vectorized<uint8_t> operator<=(const Vectorized<uint8_t>& other) const {
886
+ auto mask = _mm512_cmple_epu8_mask(values, other.values);
887
+ return _mm512_mask_set1_epi8(zero_vector, mask, 0xFF);
888
+ }
889
+ Vectorized<uint8_t> operator>(const Vectorized<uint8_t>& other) const {
890
+ return other < *this;
891
+ }
892
+ Vectorized<uint8_t> operator>=(const Vectorized<uint8_t>& other) const {
893
+ return other <= *this;
894
+ }
895
+
896
+ Vectorized<uint8_t> eq(const Vectorized<uint8_t>& other) const;
897
+ Vectorized<uint8_t> ne(const Vectorized<uint8_t>& other) const;
898
+ Vectorized<uint8_t> gt(const Vectorized<uint8_t>& other) const;
899
+ Vectorized<uint8_t> ge(const Vectorized<uint8_t>& other) const;
900
+ Vectorized<uint8_t> lt(const Vectorized<uint8_t>& other) const;
901
+ Vectorized<uint8_t> le(const Vectorized<uint8_t>& other) const;
902
+ };
903
+
904
+ template <>
905
+ Vectorized<int64_t> inline operator+(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
906
+ return _mm512_add_epi64(a, b);
907
+ }
908
+
909
+ template <>
910
+ Vectorized<int32_t> inline operator+(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
911
+ return _mm512_add_epi32(a, b);
912
+ }
913
+
914
+ template <>
915
+ Vectorized<int16_t> inline operator+(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
916
+ return _mm512_add_epi16(a, b);
917
+ }
918
+
919
+ template <>
920
+ Vectorized<int8_t> inline operator+(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
921
+ return _mm512_add_epi8(a, b);
922
+ }
923
+
924
+ template <>
925
+ Vectorized<uint8_t> inline operator+(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
926
+ return _mm512_add_epi8(a, b);
927
+ }
928
+
929
+ template <>
930
+ Vectorized<int64_t> inline operator-(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
931
+ return _mm512_sub_epi64(a, b);
932
+ }
933
+
934
+ template <>
935
+ Vectorized<int32_t> inline operator-(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
936
+ return _mm512_sub_epi32(a, b);
937
+ }
938
+
939
+ template <>
940
+ Vectorized<int16_t> inline operator-(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
941
+ return _mm512_sub_epi16(a, b);
942
+ }
943
+
944
+ template <>
945
+ Vectorized<int8_t> inline operator-(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
946
+ return _mm512_sub_epi8(a, b);
947
+ }
948
+
949
+ template <>
950
+ Vectorized<uint8_t> inline operator-(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
951
+ return _mm512_sub_epi8(a, b);
952
+ }
953
+
954
+ // Negation. Defined here so we can utilize operator-
955
+ inline Vectorized<int64_t> Vectorized<int64_t>::neg() const {
956
+ return Vectorized<int64_t>(0) - *this;
957
+ }
958
+
959
+ inline Vectorized<int32_t> Vectorized<int32_t>::neg() const {
960
+ return Vectorized<int32_t>(0) - *this;
961
+ }
962
+
963
+ inline Vectorized<int16_t> Vectorized<int16_t>::neg() const {
964
+ return Vectorized<int16_t>(0) - *this;
965
+ }
966
+
967
+ inline Vectorized<int8_t> Vectorized<int8_t>::neg() const {
968
+ return Vectorized<int8_t>(0) - *this;
969
+ }
970
+
971
+ inline Vectorized<uint8_t> Vectorized<uint8_t>::neg() const {
972
+ return Vectorized<uint8_t>(0) - *this;
973
+ }
974
+
975
+ template <>
976
+ Vectorized<int64_t> inline operator*(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
977
+ return _mm512_mullo_epi64(a, b);
978
+ }
979
+
980
+ template <>
981
+ Vectorized<int32_t> inline operator*(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
982
+ return _mm512_mullo_epi32(a, b);
983
+ }
984
+
985
+ template <>
986
+ Vectorized<int16_t> inline operator*(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
987
+ return _mm512_mullo_epi16(a, b);
988
+ }
989
+
990
+ template <typename T, typename Op>
991
+ Vectorized<T> inline int_elementwise_binary_512(const Vectorized<T>& a, const Vectorized<T>& b, Op op) {
992
+ T values_a[Vectorized<T>::size()];
993
+ T values_b[Vectorized<T>::size()];
994
+ a.store(values_a);
995
+ b.store(values_b);
996
+ for (int i = 0; i != Vectorized<T>::size(); i++) {
997
+ values_a[i] = op(values_a[i], values_b[i]);
998
+ }
999
+ return Vectorized<T>::loadu(values_a);
1000
+ }
1001
+
1002
+ template <>
1003
+ Vectorized<int8_t> inline operator*(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
1004
+ // We don't have an instruction for multiplying int8_t
1005
+ return int_elementwise_binary_512(a, b, std::multiplies<int8_t>());
1006
+ }
1007
+
1008
+ template <>
1009
+ Vectorized<uint8_t> inline operator*(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
1010
+ // We don't have an instruction for multiplying uint8_t
1011
+ return int_elementwise_binary_512(a, b, std::multiplies<uint8_t>());
1012
+ }
1013
+
1014
+ template <>
1015
+ Vectorized<int64_t> inline minimum(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
1016
+ return _mm512_min_epi64(a, b);
1017
+ }
1018
+
1019
+ template <>
1020
+ Vectorized<int32_t> inline minimum(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
1021
+ return _mm512_min_epi32(a, b);
1022
+ }
1023
+
1024
+ template <>
1025
+ Vectorized<int16_t> inline minimum(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
1026
+ return _mm512_min_epi16(a, b);
1027
+ }
1028
+
1029
+ template <>
1030
+ Vectorized<int8_t> inline minimum(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
1031
+ return _mm512_min_epi8(a, b);
1032
+ }
1033
+
1034
+ template <>
1035
+ Vectorized<uint8_t> inline minimum(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
1036
+ return _mm512_min_epu8(a, b);
1037
+ }
1038
+
1039
+ template <>
1040
+ Vectorized<int64_t> inline maximum(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
1041
+ return _mm512_max_epi64(a, b);
1042
+ }
1043
+
1044
+ template <>
1045
+ Vectorized<int32_t> inline maximum(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
1046
+ return _mm512_max_epi32(a, b);
1047
+ }
1048
+
1049
+ template <>
1050
+ Vectorized<int16_t> inline maximum(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
1051
+ return _mm512_max_epi16(a, b);
1052
+ }
1053
+
1054
+ template <>
1055
+ Vectorized<int8_t> inline maximum(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
1056
+ return _mm512_max_epi8(a, b);
1057
+ }
1058
+
1059
+ template <>
1060
+ Vectorized<uint8_t> inline maximum(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
1061
+ return _mm512_max_epi8(a, b);
1062
+ }
1063
+
1064
+ template <>
1065
+ Vectorized<int64_t> inline clamp(const Vectorized<int64_t>& a, const Vectorized<int64_t>& min_val, const Vectorized<int64_t>& max_val) {
1066
+ return _mm512_min_epi64(max_val, _mm512_max_epi64(a, min_val));
1067
+ }
1068
+
1069
+ template <>
1070
+ Vectorized<int32_t> inline clamp(const Vectorized<int32_t>& a, const Vectorized<int32_t>& min_val, const Vectorized<int32_t>& max_val) {
1071
+ return _mm512_min_epi32(max_val, _mm512_max_epi32(a, min_val));
1072
+ }
1073
+
1074
+ template <>
1075
+ Vectorized<int16_t> inline clamp(const Vectorized<int16_t>& a, const Vectorized<int16_t>& min_val, const Vectorized<int16_t>& max_val) {
1076
+ return _mm512_min_epi16(max_val, _mm512_max_epi16(a, min_val));
1077
+ }
1078
+
1079
+ template <>
1080
+ Vectorized<int8_t> inline clamp(const Vectorized<int8_t>& a, const Vectorized<int8_t>& min_val, const Vectorized<int8_t>& max_val) {
1081
+ return _mm512_min_epi8(max_val, _mm512_max_epi8(a, min_val));
1082
+ }
1083
+
1084
+ template <>
1085
+ Vectorized<uint8_t> inline clamp(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& min_val, const Vectorized<uint8_t>& max_val) {
1086
+ return _mm512_min_epu8(max_val, _mm512_max_epu8(a, min_val));
1087
+ }
1088
+
1089
+ template <>
1090
+ Vectorized<int64_t> inline clamp_max(const Vectorized<int64_t>& a, const Vectorized<int64_t>& max_val) {
1091
+ return _mm512_min_epi64(max_val, a);
1092
+ }
1093
+
1094
+ template <>
1095
+ Vectorized<int32_t> inline clamp_max(const Vectorized<int32_t>& a, const Vectorized<int32_t>& max_val) {
1096
+ return _mm512_min_epi32(max_val, a);
1097
+ }
1098
+
1099
+ template <>
1100
+ Vectorized<int16_t> inline clamp_max(const Vectorized<int16_t>& a, const Vectorized<int16_t>& max_val) {
1101
+ return _mm512_min_epi16(max_val, a);
1102
+ }
1103
+
1104
+ template <>
1105
+ Vectorized<int8_t> inline clamp_max(const Vectorized<int8_t>& a, const Vectorized<int8_t>& max_val) {
1106
+ return _mm512_min_epi8(max_val, a);
1107
+ }
1108
+
1109
+ template <>
1110
+ Vectorized<uint8_t> inline clamp_max(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& max_val) {
1111
+ return _mm512_min_epu8(max_val, a);
1112
+ }
1113
+
1114
+ template <>
1115
+ Vectorized<int64_t> inline clamp_min(const Vectorized<int64_t>& a, const Vectorized<int64_t>& min_val) {
1116
+ return _mm512_max_epi64(min_val, a);
1117
+ }
1118
+
1119
+ template <>
1120
+ Vectorized<int32_t> inline clamp_min(const Vectorized<int32_t>& a, const Vectorized<int32_t>& min_val) {
1121
+ return _mm512_max_epi32(min_val, a);
1122
+ }
1123
+
1124
+ template <>
1125
+ Vectorized<int16_t> inline clamp_min(const Vectorized<int16_t>& a, const Vectorized<int16_t>& min_val) {
1126
+ return _mm512_max_epi16(min_val, a);
1127
+ }
1128
+
1129
+ template <>
1130
+ Vectorized<int8_t> inline clamp_min(const Vectorized<int8_t>& a, const Vectorized<int8_t>& min_val) {
1131
+ return _mm512_max_epi8(min_val, a);
1132
+ }
1133
+
1134
+ template <>
1135
+ Vectorized<uint8_t> inline clamp_min(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& min_val) {
1136
+ return _mm512_max_epu8(min_val, a);
1137
+ }
1138
+
1139
+ template<typename T>
1140
+ Vectorized<int32_t> inline convert_to_int32(const T* ptr) {
1141
+ return Vectorized<int32_t>::loadu(ptr);
1142
+ }
1143
+
1144
+ template<>
1145
+ Vectorized<int32_t> inline convert_to_int32<int8_t>(const int8_t* ptr) {
1146
+ return _mm512_cvtepi8_epi32(_mm_loadu_si128(reinterpret_cast<const __m128i*>(ptr)));
1147
+ }
1148
+
1149
+ template<>
1150
+ Vectorized<int32_t> inline convert_to_int32<uint8_t>(const uint8_t* ptr) {
1151
+ return _mm512_cvtepu8_epi32(_mm_loadu_si128(reinterpret_cast<const __m128i*>(ptr)));
1152
+ }
1153
+
1154
+ template <>
1155
+ Vectorized<int64_t> inline operator/(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
1156
+ return int_elementwise_binary_512(a, b, std::divides<int64_t>());
1157
+ }
1158
+ template <>
1159
+ Vectorized<int32_t> inline operator/(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
1160
+ return int_elementwise_binary_512(a, b, std::divides<int32_t>());
1161
+ }
1162
+ template <>
1163
+ Vectorized<int16_t> inline operator/(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
1164
+ return int_elementwise_binary_512(a, b, std::divides<int16_t>());
1165
+ }
1166
+ template <>
1167
+ Vectorized<int8_t> inline operator/(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
1168
+ return int_elementwise_binary_512(a, b, std::divides<int8_t>());
1169
+ }
1170
+ template <>
1171
+ Vectorized<uint8_t> inline operator/(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
1172
+ return int_elementwise_binary_512(a, b, std::divides<uint8_t>());
1173
+ }
1174
+
1175
+ template<class T, typename std::enable_if_t<std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
1176
+ inline Vectorized<T> operator&(const Vectorized<T>& a, const Vectorized<T>& b) {
1177
+ return _mm512_and_si512(a, b);
1178
+ }
1179
+ template<class T, typename std::enable_if_t<std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
1180
+ inline Vectorized<T> operator|(const Vectorized<T>& a, const Vectorized<T>& b) {
1181
+ return _mm512_or_si512(a, b);
1182
+ }
1183
+ template<class T, typename std::enable_if_t<std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
1184
+ inline Vectorized<T> operator^(const Vectorized<T>& a, const Vectorized<T>& b) {
1185
+ return _mm512_xor_si512(a, b);
1186
+ }
1187
+ template<class T, typename std::enable_if_t<std::is_base_of<Vectorizedi, Vectorized<T>>::value, int> = 0>
1188
+ inline Vectorized<T> operator~(const Vectorized<T>& a) {
1189
+ return _mm512_xor_si512(a, _mm512_set1_epi32(-1));
1190
+ }
1191
+
1192
+ inline Vectorized<int64_t> Vectorized<int64_t>::eq(const Vectorized<int64_t>& other) const {
1193
+ return (*this == other) & Vectorized<int64_t>(1);
1194
+ }
1195
+
1196
+ inline Vectorized<int64_t> Vectorized<int64_t>::ne(const Vectorized<int64_t>& other) const {
1197
+ return (*this != other) & Vectorized<int64_t>(1);
1198
+ }
1199
+
1200
+ inline Vectorized<int64_t> Vectorized<int64_t>::gt(const Vectorized<int64_t>& other) const {
1201
+ return (*this > other) & Vectorized<int64_t>(1);
1202
+ }
1203
+
1204
+ inline Vectorized<int64_t> Vectorized<int64_t>::ge(const Vectorized<int64_t>& other) const {
1205
+ return (*this >= other) & Vectorized<int64_t>(1);
1206
+ }
1207
+
1208
+ inline Vectorized<int64_t> Vectorized<int64_t>::lt(const Vectorized<int64_t>& other) const {
1209
+ return (*this < other) & Vectorized<int64_t>(1);
1210
+ }
1211
+
1212
+ inline Vectorized<int64_t> Vectorized<int64_t>::le(const Vectorized<int64_t>& other) const {
1213
+ return (*this <= other) & Vectorized<int64_t>(1);
1214
+ }
1215
+
1216
+ inline Vectorized<int32_t> Vectorized<int32_t>::eq(const Vectorized<int32_t>& other) const {
1217
+ return (*this == other) & Vectorized<int32_t>(1);
1218
+ }
1219
+
1220
+ inline Vectorized<int32_t> Vectorized<int32_t>::ne(const Vectorized<int32_t>& other) const {
1221
+ return (*this != other) & Vectorized<int32_t>(1);
1222
+ }
1223
+
1224
+ inline Vectorized<int32_t> Vectorized<int32_t>::gt(const Vectorized<int32_t>& other) const {
1225
+ return (*this > other) & Vectorized<int32_t>(1);
1226
+ }
1227
+
1228
+ inline Vectorized<int32_t> Vectorized<int32_t>::ge(const Vectorized<int32_t>& other) const {
1229
+ return (*this >= other) & Vectorized<int32_t>(1);
1230
+ }
1231
+
1232
+ inline Vectorized<int32_t> Vectorized<int32_t>::lt(const Vectorized<int32_t>& other) const {
1233
+ return (*this < other) & Vectorized<int32_t>(1);
1234
+ }
1235
+
1236
+ inline Vectorized<int32_t> Vectorized<int32_t>::le(const Vectorized<int32_t>& other) const {
1237
+ return (*this <= other) & Vectorized<int32_t>(1);
1238
+ }
1239
+
1240
+ inline Vectorized<int16_t> Vectorized<int16_t>::eq(const Vectorized<int16_t>& other) const {
1241
+ return (*this == other) & Vectorized<int16_t>(1);
1242
+ }
1243
+
1244
+ inline Vectorized<int16_t> Vectorized<int16_t>::ne(const Vectorized<int16_t>& other) const {
1245
+ return (*this != other) & Vectorized<int16_t>(1);
1246
+ }
1247
+
1248
+ inline Vectorized<int16_t> Vectorized<int16_t>::gt(const Vectorized<int16_t>& other) const {
1249
+ return (*this > other) & Vectorized<int16_t>(1);
1250
+ }
1251
+
1252
+ inline Vectorized<int16_t> Vectorized<int16_t>::ge(const Vectorized<int16_t>& other) const {
1253
+ return (*this >= other) & Vectorized<int16_t>(1);
1254
+ }
1255
+
1256
+ inline Vectorized<int16_t> Vectorized<int16_t>::lt(const Vectorized<int16_t>& other) const {
1257
+ return (*this < other) & Vectorized<int16_t>(1);
1258
+ }
1259
+
1260
+ inline Vectorized<int16_t> Vectorized<int16_t>::le(const Vectorized<int16_t>& other) const {
1261
+ return (*this <= other) & Vectorized<int16_t>(1);
1262
+ }
1263
+
1264
+ inline Vectorized<int8_t> Vectorized<int8_t>::eq(const Vectorized<int8_t>& other) const {
1265
+ return (*this == other) & Vectorized<int8_t>(1);
1266
+ }
1267
+
1268
+ inline Vectorized<int8_t> Vectorized<int8_t>::ne(const Vectorized<int8_t>& other) const {
1269
+ return (*this != other) & Vectorized<int8_t>(1);
1270
+ }
1271
+
1272
+ inline Vectorized<int8_t> Vectorized<int8_t>::gt(const Vectorized<int8_t>& other) const {
1273
+ return (*this > other) & Vectorized<int8_t>(1);
1274
+ }
1275
+
1276
+ inline Vectorized<int8_t> Vectorized<int8_t>::ge(const Vectorized<int8_t>& other) const {
1277
+ return (*this >= other) & Vectorized<int8_t>(1);
1278
+ }
1279
+
1280
+ inline Vectorized<int8_t> Vectorized<int8_t>::lt(const Vectorized<int8_t>& other) const {
1281
+ return (*this < other) & Vectorized<int8_t>(1);
1282
+ }
1283
+
1284
+ inline Vectorized<int8_t> Vectorized<int8_t>::le(const Vectorized<int8_t>& other) const {
1285
+ return (*this <= other) & Vectorized<int8_t>(1);
1286
+ }
1287
+
1288
+ inline Vectorized<uint8_t> Vectorized<uint8_t>::eq(const Vectorized<uint8_t>& other) const {
1289
+ return (*this == other) & Vectorized<uint8_t>(1);
1290
+ }
1291
+
1292
+ inline Vectorized<uint8_t> Vectorized<uint8_t>::ne(const Vectorized<uint8_t>& other) const {
1293
+ return (*this != other) & Vectorized<uint8_t>(1);
1294
+ }
1295
+
1296
+ inline Vectorized<uint8_t> Vectorized<uint8_t>::gt(const Vectorized<uint8_t>& other) const {
1297
+ return (*this > other) & Vectorized<uint8_t>(1);
1298
+ }
1299
+
1300
+ inline Vectorized<uint8_t> Vectorized<uint8_t>::ge(const Vectorized<uint8_t>& other) const {
1301
+ return (*this >= other) & Vectorized<uint8_t>(1);
1302
+ }
1303
+
1304
+ inline Vectorized<uint8_t> Vectorized<uint8_t>::lt(const Vectorized<uint8_t>& other) const {
1305
+ return (*this < other) & Vectorized<uint8_t>(1);
1306
+ }
1307
+
1308
+ inline Vectorized<uint8_t> Vectorized<uint8_t>::le(const Vectorized<uint8_t>& other) const {
1309
+ return (*this <= other) & Vectorized<uint8_t>(1);
1310
+ }
1311
+
1312
+ template <bool left_shift, typename T, typename std::enable_if_t<std::is_same<T, int8_t>::value || std::is_same<T, uint8_t>::value, int> = 0>
1313
+ Vectorized<T> inline shift_512_8(const Vectorized<T>& a, const Vectorized<T>& b) {
1314
+ // No vector instruction for shifting int8_t/uint8_t, so emulating
1315
+ // it instead.
1316
+
1317
+ // Control masks for shuffle operation, treating 512 bits as an
1318
+ // array of 8-bit elements, and considering pairs of neighboring
1319
+ // elements. Specifially, a mask named "ctl_M_N" (M,N in [0,1], and
1320
+ // M!=N) is set so that shuffle will move element with index M from
1321
+ // input pair into element with index N in output pair, and element
1322
+ // with index M in output pair will be set to all 0s.
1323
+ __m512i ctl_0_1 = _mm512_set_epi8(62, 0x80, 60, 0x80, 58, 0x80, 56, 0x80,
1324
+ 54, 0x80, 52, 0x80, 50, 0x80, 48, 0x80,
1325
+ 46, 0x80, 44, 0x80, 42, 0x80, 40, 0x80,
1326
+ 38, 0x80, 36, 0x80, 34, 0x80, 32, 0x80,
1327
+ 30, 0x80, 28, 0x80, 26, 0x80, 24, 0x80,
1328
+ 22, 0x80, 20, 0x80, 18, 0x80, 16, 0x80,
1329
+ 14, 0x80, 12, 0x80, 10, 0x80, 8, 0x80,
1330
+ 6, 0x80, 4, 0x80, 2, 0x80, 0, 0x80);
1331
+ __m512i ctl_1_0 = _mm512_set_epi8(0x80, 63, 0x80, 61, 0x80, 59, 0x80, 57,
1332
+ 0x80, 55, 0x80, 53, 0x80, 51, 0x80, 49,
1333
+ 0x80, 47, 0x80, 45, 0x80, 43, 0x80, 41,
1334
+ 0x80, 39, 0x80, 37, 0x80, 35, 0x80, 33,
1335
+ 0x80, 31, 0x80, 29, 0x80, 27, 0x80, 25,
1336
+ 0x80, 23, 0x80, 21, 0x80, 19, 0x80, 17,
1337
+ 0x80, 15, 0x80, 13, 0x80, 11, 0x80, 9,
1338
+ 0x80, 7, 0x80, 5, 0x80, 3, 0x80, 1);
1339
+
1340
+ // Masks for bitwise and operation, treating 512 bits as an array of
1341
+ // 8-bit elements, and considering them in pairs of neighboring
1342
+ // elements. A mask named "keep_M" (M in [0,1]) is set so that
1343
+ // bitwise and will copy element with index M from input pair into
1344
+ // element with the same index in output pair, while the other
1345
+ // element in output pair will be set to all 0s.
1346
+ __m512i keep_0 = _mm512_set1_epi16(0xFF);
1347
+ __m512i keep_1 = _mm512_set1_epi16(0xFF00);
1348
+
1349
+ // Take each 8-bit element with idx%2==0 from input array to be
1350
+ // shifted and extend it to 16 bits so that 0s are added to the
1351
+ // right. Then, perform shifting on this 16-bit number. Upper 8
1352
+ // bits will be proper result of shifting original 8-bit number, so
1353
+ // write them to result array, into the same position from which
1354
+ // corresponding input element is taken. Also, make sure that
1355
+ // result array elements with idx%2!=0 are set to all 0s.
1356
+ //
1357
+ // Note that number of bits to shift for is extended to 16 bits by
1358
+ // adding 0s to the left. That means this number is not properly
1359
+ // sign-extended for negative values. However, number of bits to
1360
+ // shift is treated as an unsigned integer by respective shift
1361
+ // intrinsics anyway so if negative then either with or without
1362
+ // proper sign extension, it will be interpreted as a number greater
1363
+ // than 32, and the shifting result will be the same.
1364
+ __m512i a0 = _mm512_shuffle_epi8(a, ctl_0_1);
1365
+ __m512i b0 = _mm512_and_si512(b, keep_0);
1366
+ __m512i c0;
1367
+ if (left_shift)
1368
+ c0 = _mm512_sllv_epi16(a0, b0);
1369
+ else
1370
+ if constexpr (std::is_same_v<T, int8_t>)
1371
+ c0 = _mm512_srav_epi16(a0, b0);
1372
+ else
1373
+ c0 = _mm512_srlv_epi16(a0, b0);
1374
+ c0 = _mm512_shuffle_epi8(c0, ctl_1_0);
1375
+
1376
+ // Peform shifting the same way for input array elements with
1377
+ // idx%2==1.
1378
+ __m512i a1 = _mm512_and_si512(a, keep_1);
1379
+ __m512i b1 = _mm512_shuffle_epi8(b, ctl_1_0);
1380
+ __m512i c1;
1381
+ if (left_shift)
1382
+ c1 = _mm512_sllv_epi16(a1, b1);
1383
+ else
1384
+ if constexpr (std::is_same_v<T, int8_t>)
1385
+ c1 = _mm512_srav_epi16(a1, b1);
1386
+ else
1387
+ c1 = _mm512_srlv_epi16(a1, b1);
1388
+ c1 = _mm512_and_si512(c1, keep_1);
1389
+
1390
+ // Merge partial results into the final result.
1391
+ __m512i c = _mm512_or_si512(c0, c1);
1392
+
1393
+ return c;
1394
+ }
1395
+
1396
+ template <>
1397
+ Vectorized<int64_t> inline operator<<(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
1398
+ return _mm512_sllv_epi64(a, b);
1399
+ }
1400
+
1401
+ template <>
1402
+ Vectorized<int32_t> inline operator<<(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
1403
+ return _mm512_sllv_epi32(a, b);
1404
+ }
1405
+
1406
+ template <>
1407
+ Vectorized<int16_t> inline operator<<(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
1408
+ return _mm512_sllv_epi16(a, b);
1409
+ }
1410
+
1411
+ template <>
1412
+ Vectorized<int8_t> inline operator<<(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
1413
+ return shift_512_8<true>(a, b);
1414
+ }
1415
+
1416
+ template <>
1417
+ Vectorized<uint8_t> inline operator<<(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
1418
+ return shift_512_8<true>(a, b);
1419
+ }
1420
+
1421
+ template <>
1422
+ Vectorized<int64_t> inline operator>>(const Vectorized<int64_t>& a, const Vectorized<int64_t>& b) {
1423
+ return _mm512_srav_epi64(a, b);
1424
+ }
1425
+
1426
+ template <>
1427
+ Vectorized<int32_t> inline operator>>(const Vectorized<int32_t>& a, const Vectorized<int32_t>& b) {
1428
+ return _mm512_srav_epi32(a, b);
1429
+ }
1430
+
1431
+ template <>
1432
+ Vectorized<int16_t> inline operator>>(const Vectorized<int16_t>& a, const Vectorized<int16_t>& b) {
1433
+ return _mm512_srav_epi16(a, b);
1434
+ }
1435
+
1436
+ template <>
1437
+ Vectorized<int8_t> inline operator>>(const Vectorized<int8_t>& a, const Vectorized<int8_t>& b) {
1438
+ return shift_512_8<false>(a, b);
1439
+ }
1440
+
1441
+ template <>
1442
+ Vectorized<uint8_t> inline operator>>(const Vectorized<uint8_t>& a, const Vectorized<uint8_t>& b) {
1443
+ return shift_512_8<false>(a, b);
1444
+ }
1445
+
1446
+ #endif
1447
+
1448
+ }}}
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vec/vec512/vec512_qint.h ADDED
@@ -0,0 +1,1338 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // DO NOT DEFINE STATIC DATA IN THIS HEADER!
4
+ // See Note [Do not compile initializers with AVX]
5
+
6
+ #include <ATen/cpu/vec/intrinsics.h>
7
+ #include <ATen/cpu/vec/vec_base.h>
8
+ #include <ATen/native/quantized/AffineQuantizerBase.h>
9
+
10
+ #include <c10/util/irange.h>
11
+ #include <c10/util/qint32.h>
12
+ #include <c10/util/qint8.h>
13
+ #include <c10/util/quint8.h>
14
+
15
+ #include <array>
16
+ #include <cmath>
17
+
18
+ // This file defines Vectorized<> for the quantized types.
19
+ //
20
+ //
21
+ // Currently, we simply use these classes as efficient converters between
22
+ // the quantized types and Vectorized<float>, usually in bandwidth-bound cases
23
+ // where doing the arithmetic in full-precision is acceptable (e.g.
24
+ // elementwise operators).
25
+ //
26
+ //
27
+ // Conversions are as follows:
28
+ // Vectorized<qint8> -> 4x Vectorized<float>
29
+ // Vectorized<quint8> -> 4x Vectorized<float>
30
+ // Vectorized<qint32> -> 1x Vectorized<float>
31
+ //
32
+ // The size of the returned float vector is specified by the special
33
+ // constexpr function float_num_vecs. The type of the value returned
34
+ // from dequantize (and expected as an argument to quantize) is
35
+ // specified by float_vec_return_type.
36
+ //
37
+ // When writing kernels with these vectors, it is expected that floating-
38
+ // point operations will be carried out in a loop over Vectorized<T>::float_num_vecs
39
+ // iterations.
40
+
41
+ namespace at {
42
+ namespace vec {
43
+ inline namespace CPU_CAPABILITY {
44
+
45
+ #if defined(CPU_CAPABILITY_AVX512) && !defined(_MSC_VER)
46
+
47
+ struct Vectorizedqi {
48
+ protected:
49
+ __m512i vals __attribute__((aligned(64)));
50
+
51
+ public:
52
+ Vectorizedqi() {}
53
+ Vectorizedqi(__m512i v) : vals(v) {}
54
+ operator __m512i() const {
55
+ return vals;
56
+ }
57
+ };
58
+
59
+
60
+ template <typename T>
61
+ __m512i pack_saturate_and_clamp(
62
+ __m512i first,
63
+ __m512i second,
64
+ T min_val,
65
+ T max_val);
66
+
67
+ template <>
68
+ inline __m512i pack_saturate_and_clamp<int32_t>(
69
+ __m512i first,
70
+ __m512i second,
71
+ int32_t min_val,
72
+ int32_t max_val) {
73
+ // This function is for linkage only, will not be used
74
+ AT_ERROR("pack_saturate_and_clamp<int32_t> is not supported");
75
+ }
76
+
77
+ template <>
78
+ inline __m512i pack_saturate_and_clamp<int8_t>(
79
+ __m512i first,
80
+ __m512i second,
81
+ int8_t min_val,
82
+ int8_t max_val) {
83
+ __m512i packed_and_sat = _mm512_packs_epi16(first, second);
84
+ return _mm512_max_epi8(
85
+ _mm512_set1_epi8(min_val),
86
+ _mm512_min_epi8(packed_and_sat, _mm512_set1_epi8(max_val)));
87
+ }
88
+
89
+ template <>
90
+ inline __m512i pack_saturate_and_clamp<uint8_t>(
91
+ __m512i first,
92
+ __m512i second,
93
+ uint8_t min_val,
94
+ uint8_t max_val) {
95
+ __m512i packed_and_sat = _mm512_packus_epi16(first, second);
96
+ return _mm512_max_epu8(
97
+ _mm512_set1_epi8(min_val),
98
+ _mm512_min_epu8(packed_and_sat, _mm512_set1_epi8(max_val)));
99
+ }
100
+
101
+ inline Vectorized<float> convert_uint8_to_float(at::vec::Vectorized<uint8_t> src) {
102
+ // Note: this function only convert inputs number of elements equal to at::vec::Vectorized<float>.size()
103
+ // Only handle first 128 bits
104
+ __m128i input_128 = _mm512_castsi512_si128(src);
105
+ // Convert from 16*u8 to 16*int32
106
+ __m512i input_512_extended = _mm512_cvtepu8_epi32(input_128);
107
+ // Convert from 16*int32 to 16*float32
108
+ return _mm512_cvtepi32_ps(input_512_extended);
109
+ }
110
+
111
+ inline Vectorized<uint8_t> convert_float_to_uint8(at::vec::Vectorized<float> src) {
112
+ // Convert from float32 to int32 with truncation
113
+ __m512i x_values_int32 = _mm512_cvttps_epi32(src);
114
+
115
+ // Convert from int32 to int16 using signed saturation
116
+ __m512i xy_packed_v = _mm512_packs_epi32(x_values_int32, x_values_int32);
117
+
118
+ constexpr auto min_val = std::numeric_limits<uint8_t>::min();
119
+ constexpr auto max_val = std::numeric_limits<uint8_t>::max();
120
+
121
+ // Convert from int16 to uint8 using unsigned saturation
122
+ __m512i xyzw_clamped_v = pack_saturate_and_clamp<uint8_t>(
123
+ xy_packed_v, xy_packed_v, min_val, max_val);
124
+ __m512i permute_mask_v =
125
+ _mm512_set_epi32(0x0f, 0x0b, 0x07, 0x03, 0x0e, 0x0a, 0x06, 0x02,
126
+ 0x0d, 0x09, 0x05, 0x01, 0x0c, 0x08, 0x04, 0x00);
127
+ return _mm512_permutexvar_epi32(permute_mask_v, xyzw_clamped_v);
128
+ }
129
+
130
+ template <typename T>
131
+ inline void __attribute__((always_inline)) QuantizeAvx512(
132
+ const float* src,
133
+ T* dst,
134
+ int len,
135
+ float inverse_scale,
136
+ int64_t zero_point) {
137
+ constexpr int VLEN = 16;
138
+ constexpr auto min_val = std::numeric_limits<T>::min();
139
+ constexpr auto max_val = std::numeric_limits<T>::max();
140
+ const __m512i min_v = _mm512_set1_epi32(min_val);
141
+ const __m512i max_v = _mm512_set1_epi32(max_val);
142
+ // This is the largest int32 value < int32_max exactly representable in float
143
+ constexpr int32_t int32_float_max_val =
144
+ std::numeric_limits<int32_t>::max() - 127;
145
+ int i = 0;
146
+ __m512 inverse_scale_v = _mm512_set1_ps(inverse_scale);
147
+ // clang-format off
148
+ static const __m512i shuffle_mask_v = _mm512_set_epi8(
149
+ 0xff, 0xff, 0xff, 0xff,
150
+ 0xff, 0xff, 0xff, 0xff,
151
+ 0xff, 0xff, 0xff, 0xff,
152
+ 0x0c, 0x08, 0x04, 0x00,
153
+ 0xff, 0xff, 0xff, 0xff,
154
+ 0xff, 0xff, 0xff, 0xff,
155
+ 0xff, 0xff, 0xff, 0xff,
156
+ 0x0c, 0x08, 0x04, 0x00,
157
+ 0xff, 0xff, 0xff, 0xff,
158
+ 0xff, 0xff, 0xff, 0xff,
159
+ 0xff, 0xff, 0xff, 0xff,
160
+ 0x0c, 0x08, 0x04, 0x00,
161
+ 0xff, 0xff, 0xff, 0xff,
162
+ 0xff, 0xff, 0xff, 0xff,
163
+ 0xff, 0xff, 0xff, 0xff,
164
+ 0x0c, 0x08, 0x04, 0x00);
165
+ // clang-format on
166
+ __m512i permute_mask_v =
167
+ _mm512_set_epi32(0x0f, 0x0b, 0x07, 0x03, 0x0e, 0x0a, 0x06, 0x02,
168
+ 0x0d, 0x09, 0x05, 0x01, 0x0c, 0x08, 0x04, 0x00);
169
+ __m512i permute_mask_l8_v =
170
+ _mm512_set_epi32(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
171
+ 0x00, 0x00, 0x00, 0x00, 0x0c, 0x08, 0x04, 0x00);
172
+ int len_aligned = len / (VLEN * 4) * (VLEN * 4);
173
+ for (; i < len_aligned; i += 4 * VLEN) {
174
+ // x
175
+ __m512 x_vals = _mm512_load_ps(src + i);
176
+ __m512 x_transformed_v = _mm512_mul_ps(x_vals, inverse_scale_v);
177
+ // If the floating point value is greater than int32_max,
178
+ // _mm512_cvtps_epi32 converts them to -ve. Clip at int32_float_max_val to
179
+ // Clip at int32_float_max_val to avoid this.
180
+ x_transformed_v =
181
+ _mm512_min_ps(x_transformed_v, _mm512_set1_ps(int32_float_max_val));
182
+ // y
183
+ __m512 y_vals = _mm512_load_ps(src + i + VLEN);
184
+ __m512 y_transformed_v = _mm512_mul_ps(y_vals, inverse_scale_v);
185
+ y_transformed_v =
186
+ _mm512_min_ps(y_transformed_v, _mm512_set1_ps(int32_float_max_val));
187
+ // z
188
+ __m512 z_vals = _mm512_load_ps(src + i + 2 * VLEN);
189
+ __m512 z_transformed_v = _mm512_mul_ps(z_vals, inverse_scale_v);
190
+ z_transformed_v =
191
+ _mm512_min_ps(z_transformed_v, _mm512_set1_ps(int32_float_max_val));
192
+ // w
193
+ __m512 w_vals = _mm512_load_ps(src + i + 3 * VLEN);
194
+ __m512 w_transformed_v = _mm512_mul_ps(w_vals, inverse_scale_v);
195
+ w_transformed_v =
196
+ _mm512_min_ps(w_transformed_v, _mm512_set1_ps(int32_float_max_val));
197
+
198
+ __m512i x_rounded_v = _mm512_cvtps_epi32(x_transformed_v);
199
+ __m512i y_rounded_v = _mm512_cvtps_epi32(y_transformed_v);
200
+ __m512i z_rounded_v = _mm512_cvtps_epi32(z_transformed_v);
201
+ __m512i w_rounded_v = _mm512_cvtps_epi32(w_transformed_v);
202
+
203
+ // add zero point
204
+ x_rounded_v = _mm512_add_epi32(x_rounded_v, _mm512_set1_epi32(zero_point));
205
+ y_rounded_v = _mm512_add_epi32(y_rounded_v, _mm512_set1_epi32(zero_point));
206
+ z_rounded_v = _mm512_add_epi32(z_rounded_v, _mm512_set1_epi32(zero_point));
207
+ w_rounded_v = _mm512_add_epi32(w_rounded_v, _mm512_set1_epi32(zero_point));
208
+
209
+ __m512i xy_packed_v = _mm512_packs_epi32(x_rounded_v, y_rounded_v);
210
+ __m512i zw_packed_v = _mm512_packs_epi32(z_rounded_v, w_rounded_v);
211
+ __m512i xyzw_clamped_v =
212
+ pack_saturate_and_clamp<T>(xy_packed_v, zw_packed_v, min_val, max_val);
213
+
214
+ xyzw_clamped_v =
215
+ _mm512_permutexvar_epi32(permute_mask_v, xyzw_clamped_v);
216
+ _mm512_storeu_si512(reinterpret_cast<__m512i*>(dst + i), xyzw_clamped_v);
217
+ }
218
+
219
+ // Additional 8-lane AVX512 version to take advantage when len is smaller
220
+ // based on fbgemm::QuantizeAvx2 (https://github.com/pytorch/FBGEMM)
221
+ for (; i < len / VLEN * VLEN; i += VLEN) {
222
+ __m512 x_vals = _mm512_load_ps(src + i);
223
+ __m512 x_transformed_v = _mm512_mul_ps(x_vals, inverse_scale_v);
224
+ x_transformed_v =
225
+ _mm512_min_ps(x_transformed_v, _mm512_set1_ps(int32_float_max_val));
226
+ __m512i x_rounded_v = _mm512_cvtps_epi32(x_transformed_v);
227
+ x_rounded_v = _mm512_add_epi32(x_rounded_v, _mm512_set1_epi32(zero_point));
228
+ __m512i x_clipped_v =
229
+ _mm512_max_epi32(min_v, _mm512_min_epi32(max_v, x_rounded_v));
230
+
231
+ x_clipped_v = _mm512_shuffle_epi8(x_clipped_v, shuffle_mask_v);
232
+ x_clipped_v = _mm512_permutexvar_epi32(permute_mask_l8_v, x_clipped_v);
233
+ _mm_storeu_si128(
234
+ reinterpret_cast<__m128i*>(dst + i),
235
+ _mm512_castsi512_si128(x_clipped_v));
236
+ }
237
+
238
+ for (; i < len; ++i) {
239
+ float transformed = src[i] * inverse_scale;
240
+
241
+ // Not exactly the same behavior as the vectorized code.
242
+ // The vectorized code above always rounds to even in halfway cases
243
+ // (https://software.intel.com/en-us/node/523819), but std::nearbyint
244
+ // does the same only when the current rounding mode is FE_TONEAREST.
245
+ // However, in practice, this should not be a problem because most cases
246
+ // use the default rounding mode FE_TONEAREST.
247
+ // Note that we cannot implement the same behavior as the vectorized code
248
+ // using std::round because it does rounding away from zero in halfway
249
+ // cases.
250
+ transformed = zero_point + std::nearbyint(transformed);
251
+ float clipped =
252
+ std::min(std::max(transformed, float(min_val)), float(max_val));
253
+ dst[i] = clipped;
254
+ }
255
+ }
256
+
257
+ template<>
258
+ struct Vectorized<c10::qint32> : public Vectorizedqi {
259
+ using size_type = int;
260
+ static constexpr size_type size() {
261
+ return 16;
262
+ }
263
+
264
+ static constexpr int float_num_vecs() {
265
+ return 1;
266
+ }
267
+
268
+ static constexpr int int_num_vecs() {
269
+ return 1;
270
+ }
271
+
272
+ using float_vec_return_type = std::array<Vectorized<float>, 1>;
273
+ using int_vec_return_type = std::array<Vectorized<c10::qint32>, 1>;
274
+ using value_type = c10::qint32::underlying;
275
+
276
+ public:
277
+ using Vectorizedqi::Vectorizedqi;
278
+ Vectorized() {}
279
+
280
+ Vectorized(__m512i vals_) { vals = vals_;}
281
+
282
+ // Broadcast constructor
283
+ Vectorized(const c10::qint32& val) {
284
+ value_type uw = val.val_;
285
+ vals = _mm512_set1_epi32(uw);
286
+ }
287
+
288
+ void store(void* ptr, int count = size()) const {
289
+ if (count != size()) {
290
+ memcpy(ptr, &vals, count * sizeof(value_type));
291
+ } else {
292
+ _mm512_storeu_si512((__m512i*)ptr, vals);
293
+ }
294
+ }
295
+
296
+ static Vectorized<c10::qint32> loadu(const void* ptr) {
297
+ return Vectorized<c10::qint32>(ptr);
298
+ }
299
+
300
+ static Vectorized<c10::qint32> loadu(const void* ptr, int64_t count) {
301
+ __at_align__ value_type tmp_values[size()];
302
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
303
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
304
+ // instructions while a loop would be compiled to one instruction.
305
+ for (const auto i : c10::irange(size())) {
306
+ tmp_values[i] = 0;
307
+ }
308
+ std::memcpy(tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
309
+ return loadu(tmp_values);
310
+ }
311
+
312
+ float_vec_return_type dequantize(
313
+ Vectorized<float> scale,
314
+ Vectorized<float> zero_point,
315
+ Vectorized<float> scale_zp_premul) const {
316
+ __m512 float_vals = _mm512_cvtepi32_ps(vals);
317
+ return {vec::fmadd(scale, Vectorized<float>(float_vals), scale_zp_premul)};
318
+ }
319
+
320
+ float_vec_return_type dequantize(
321
+ Vectorized<float> scale,
322
+ Vectorized<float> zero_point) const {
323
+ __m512 float_vals = _mm512_cvtepi32_ps(vals);
324
+ return {(Vectorized<float>(float_vals) - zero_point) * scale};
325
+ }
326
+
327
+ static Vectorized<c10::qint32> quantize(
328
+ const float_vec_return_type& rhs,
329
+ float scale,
330
+ int32_t zero_point,
331
+ float inverse_scale) {
332
+ Vectorized<c10::qint32> retval;
333
+ auto rhs_data = (__m512)rhs[0];
334
+ at::native::quantize_vec<c10::qint32, /*precision=*/32>(
335
+ scale, zero_point, (float*)&rhs_data, (c10::qint32*)&retval.vals, 16);
336
+ return retval;
337
+ }
338
+
339
+ Vectorized<c10::qint32> maximum(Vectorized<c10::qint32> b) const {
340
+ return _mm512_max_epi32(vals, b.vals);
341
+ }
342
+
343
+ Vectorized<c10::qint32> minimum(Vectorized<c10::qint32> b) const {
344
+ return _mm512_min_epi32(vals, b.vals);
345
+ }
346
+
347
+ Vectorized<c10::qint32> relu(Vectorized<c10::qint32> zero_point) const {
348
+ return maximum(zero_point);
349
+ }
350
+
351
+ Vectorized<c10::qint32> relu6(
352
+ Vectorized<c10::qint32> zero_point,
353
+ Vectorized<c10::qint32> q_six) {
354
+ return _mm512_min_epi32(
355
+ _mm512_max_epi32(vals, zero_point.vals), q_six.vals);
356
+ }
357
+
358
+ int_vec_return_type widening_subtract(Vectorized<c10::qint32> b) const {
359
+ return {_mm512_sub_epi32(vals, b)};
360
+ }
361
+
362
+ static Vectorized<c10::qint32> requantize_from_int(
363
+ const int_vec_return_type& inp,
364
+ float multiplier,
365
+ int32_t zero_point) {
366
+ __m512 multiplier_v = _mm512_set1_ps(multiplier);
367
+ __m512i zero_point_v = _mm512_set1_epi32(zero_point);
368
+
369
+ __m512 scaled = _mm512_mul_ps(_mm512_cvtepi32_ps(inp[0]), multiplier_v);
370
+ __m512i rounded = _mm512_cvtps_epi32(scaled);
371
+ return _mm512_add_epi32(rounded, zero_point_v);
372
+ }
373
+
374
+ private:
375
+ // Load from memory constructor
376
+ Vectorized(const void* ptr) {
377
+ vals = _mm512_loadu_si512((const __m512i*)ptr);
378
+ }
379
+ };
380
+
381
+ template <>
382
+ Vectorized<c10::qint32> inline maximum(const Vectorized<c10::qint32>& a, const Vectorized<c10::qint32>& b) {
383
+ return a.maximum(b);
384
+ }
385
+
386
+ template <>
387
+ Vectorized<c10::qint32> inline operator*(
388
+ const Vectorized<c10::qint32>& a,
389
+ const Vectorized<c10::qint32>& b) {
390
+ return _mm512_mullo_epi32(a, b);
391
+ }
392
+
393
+ template <>
394
+ Vectorized<c10::qint32> inline operator+(
395
+ const Vectorized<c10::qint32>& a,
396
+ const Vectorized<c10::qint32>& b) {
397
+ return _mm512_add_epi32(a, b);
398
+ }
399
+
400
+ /*
401
+ * Convert values from int32 back to int8/uint8
402
+ */
403
+ template <typename T>
404
+ __m512i RequantizeAvx512(
405
+ const std::array<Vectorized<c10::qint32>, 4>& inp,
406
+ __m512 multiplier,
407
+ __m512i zp) {
408
+ static_assert(
409
+ std::is_same<T, int8_t>::value || std::is_same<T, uint8_t>::value,
410
+ "Only int8_t/uint8_t are supported");
411
+ constexpr auto min_val = std::numeric_limits<T>::min();
412
+ constexpr auto max_val = std::numeric_limits<T>::max();
413
+ __m512i permute_mask_v =
414
+ _mm512_set_epi32(0x0f, 0x0b, 0x07, 0x03, 0x0e, 0x0a, 0x06, 0x02,
415
+ 0x0d, 0x09, 0x05, 0x01, 0x0c, 0x08, 0x04, 0x00);
416
+ __m512 x_scaled_v = _mm512_mul_ps(_mm512_cvtepi32_ps(inp[0]), multiplier);
417
+ __m512 y_scaled_v = _mm512_mul_ps(_mm512_cvtepi32_ps(inp[1]), multiplier);
418
+ __m512 z_scaled_v = _mm512_mul_ps(_mm512_cvtepi32_ps(inp[2]), multiplier);
419
+ __m512 w_scaled_v = _mm512_mul_ps(_mm512_cvtepi32_ps(inp[3]), multiplier);
420
+
421
+ __m512i x_rounded_v = _mm512_cvtps_epi32(x_scaled_v);
422
+ __m512i y_rounded_v = _mm512_cvtps_epi32(y_scaled_v);
423
+ __m512i z_rounded_v = _mm512_cvtps_epi32(z_scaled_v);
424
+ __m512i w_rounded_v = _mm512_cvtps_epi32(w_scaled_v);
425
+
426
+ /* Add zero point */
427
+ __m512i x_v = _mm512_add_epi32(x_rounded_v, zp);
428
+ __m512i y_v = _mm512_add_epi32(y_rounded_v, zp);
429
+ __m512i z_v = _mm512_add_epi32(z_rounded_v, zp);
430
+ __m512i w_v = _mm512_add_epi32(w_rounded_v, zp);
431
+
432
+ /* Pack to int16_t and saturate */
433
+ __m512i xy_packed_v = _mm512_packs_epi32(x_v, y_v);
434
+ __m512i zw_packed_v = _mm512_packs_epi32(z_v, w_v);
435
+
436
+ __m512i xyzw_clamped_v =
437
+ pack_saturate_and_clamp<T>(xy_packed_v, zw_packed_v, min_val, max_val);
438
+
439
+ /*
440
+ * xyzw_clamped_v has results in the following layout so we need to
441
+ * permute: x0-3 y0-3 z0-3 w0-3 x4-7 y4-7 z4-7 w4-7 x8-11 y8-11 z8-11 w8-11 x12-15 y12-15 z12-15 w12-15
442
+ */
443
+ xyzw_clamped_v = _mm512_permutexvar_epi32(permute_mask_v, xyzw_clamped_v);
444
+ return xyzw_clamped_v;
445
+ }
446
+
447
+ template<>
448
+ struct Vectorized<c10::qint8> : public Vectorizedqi {
449
+ static constexpr int size() {
450
+ return 64;
451
+ }
452
+
453
+ static constexpr int float_num_vecs() {
454
+ return 4;
455
+ }
456
+
457
+ static constexpr int int_num_vecs() {
458
+ return 4;
459
+ }
460
+
461
+ using float_vec_return_type = std::array<Vectorized<float>, 4>;
462
+ using int_vec_return_type = std::array<Vectorized<c10::qint32>, 4>;
463
+ using value_type = typename c10::qint8::underlying;
464
+
465
+ public:
466
+ using Vectorizedqi::Vectorizedqi;
467
+
468
+ Vectorized() {}
469
+ Vectorized(__m512i vals_) { vals = vals_;}
470
+
471
+ // Broadcast constructor
472
+ Vectorized(const c10::qint8& val) {
473
+ value_type uw = val.val_;
474
+ vals = _mm512_set1_epi8(uw);
475
+ }
476
+
477
+ // This is needed because the compiler emits awful code for the default
478
+ // constructor for moving the enum
479
+ Vectorized(const Vectorized<c10::qint8>& other) : Vectorizedqi(other.vals) { }
480
+
481
+ // This is added to avoid error: definition of implicit copy assignment operator
482
+ // for 'Vectorized<c10::qint8>' is deprecated because it has a user-declared
483
+ // copy constructor [-Werror,-Wdeprecated-copy]
484
+ Vectorized& operator=(const Vectorized<c10::qint8>&) = default;
485
+
486
+ void store(void* ptr, int count = size()) const {
487
+ if (count != size()) {
488
+ memcpy(ptr, &vals, count * sizeof(value_type));
489
+ } else {
490
+ _mm512_storeu_si512((__m512i*)ptr, vals);
491
+ }
492
+ }
493
+
494
+ static Vectorized<c10::qint8> loadu(const void* ptr) {
495
+ return Vectorized<c10::qint8>(ptr);
496
+ }
497
+
498
+ static Vectorized<c10::qint8> loadu(const void* ptr, int64_t count) {
499
+ __at_align__ value_type tmp_values[size()];
500
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
501
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
502
+ // instructions while a loop would be compiled to one instruction.
503
+ for (const auto i : c10::irange(size())) {
504
+ tmp_values[i] = 0;
505
+ }
506
+ std::memcpy(tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
507
+ return loadu(tmp_values);
508
+ }
509
+
510
+ private:
511
+ __m512i cvtepi8_epi32(__m128i epi8_vals) const {
512
+ return _mm512_cvtepi8_epi32(epi8_vals);
513
+ }
514
+
515
+ public:
516
+ float_vec_return_type dequantize(
517
+ Vectorized<float> scale,
518
+ Vectorized<float> zero_point,
519
+ Vectorized<float> scale_neg_zp_premul) const {
520
+ __m128i int_val0 = _mm_set_epi64x(vals[1], vals[0]);
521
+ __m128i int_val1 = _mm_set_epi64x(vals[3], vals[2]);
522
+ __m128i int_val2 = _mm_set_epi64x(vals[5], vals[4]);
523
+ __m128i int_val3 = _mm_set_epi64x(vals[7], vals[6]);
524
+
525
+ __m512 float_val0 = _mm512_cvtepi32_ps(cvtepi8_epi32(int_val0));
526
+ __m512 float_val1 = _mm512_cvtepi32_ps(cvtepi8_epi32(int_val1));
527
+ __m512 float_val2 = _mm512_cvtepi32_ps(cvtepi8_epi32(int_val2));
528
+ __m512 float_val3 = _mm512_cvtepi32_ps(cvtepi8_epi32(int_val3));
529
+
530
+ auto val0 =
531
+ vec::fmadd(scale, Vectorized<float>(float_val0), scale_neg_zp_premul);
532
+ auto val1 =
533
+ vec::fmadd(scale, Vectorized<float>(float_val1), scale_neg_zp_premul);
534
+ auto val2 =
535
+ vec::fmadd(scale, Vectorized<float>(float_val2), scale_neg_zp_premul);
536
+ auto val3 =
537
+ vec::fmadd(scale, Vectorized<float>(float_val3), scale_neg_zp_premul);
538
+ return {val0, val1, val2, val3};
539
+ }
540
+
541
+ float_vec_return_type dequantize(
542
+ Vectorized<float> scale,
543
+ Vectorized<float> zero_point) const {
544
+ __m128i int_val0 = _mm_set_epi64x(vals[1], vals[0]);
545
+ __m128i int_val1 = _mm_set_epi64x(vals[3], vals[2]);
546
+ __m128i int_val2 = _mm_set_epi64x(vals[5], vals[4]);
547
+ __m128i int_val3 = _mm_set_epi64x(vals[7], vals[6]);
548
+
549
+ __m512 float_val0 = _mm512_cvtepi32_ps(cvtepi8_epi32(int_val0));
550
+ __m512 float_val1 = _mm512_cvtepi32_ps(cvtepi8_epi32(int_val1));
551
+ __m512 float_val2 = _mm512_cvtepi32_ps(cvtepi8_epi32(int_val2));
552
+ __m512 float_val3 = _mm512_cvtepi32_ps(cvtepi8_epi32(int_val3));
553
+
554
+ auto val0 = (Vectorized<float>(float_val0) - zero_point) * scale;
555
+ auto val1 = (Vectorized<float>(float_val1) - zero_point) * scale;
556
+ auto val2 = (Vectorized<float>(float_val2) - zero_point) * scale;
557
+ auto val3 = (Vectorized<float>(float_val3) - zero_point) * scale;
558
+ return {val0, val1, val2, val3};
559
+ }
560
+
561
+ static Vectorized<c10::qint8> quantize(
562
+ const float_vec_return_type& rhs,
563
+ float scale,
564
+ int32_t zero_point,
565
+ float inverse_scale) {
566
+ auto* rhs_data = (float*)rhs.data();
567
+ int8_t quantized_values[64];
568
+ QuantizeAvx512<value_type>(
569
+ rhs_data, quantized_values, 64, inverse_scale, zero_point);
570
+ return Vectorized<c10::qint8>::loadu(quantized_values);
571
+ }
572
+
573
+ Vectorized<c10::qint8> maximum(Vectorized<c10::qint8> b) const {
574
+ return _mm512_max_epi8(vals, b.vals);
575
+ }
576
+
577
+ Vectorized<c10::qint8> minimum(Vectorized<c10::qint8> b) const {
578
+ return _mm512_min_epi8(vals, b.vals);
579
+ }
580
+
581
+ Vectorized<c10::qint8> relu(Vectorized<c10::qint8> zero_point) const {
582
+ return maximum(zero_point);
583
+ }
584
+
585
+ Vectorized<c10::qint8> relu6(
586
+ Vectorized<c10::qint8> zero_point,
587
+ Vectorized<c10::qint8> q_six) {
588
+ return _mm512_min_epi8(
589
+ _mm512_max_epi8(vals, zero_point.vals), q_six.vals);
590
+ }
591
+
592
+ int_vec_return_type widening_subtract(Vectorized<c10::qint8> b) const {
593
+ __m128i int_val0 = _mm_set_epi64x(vals[1], vals[0]);
594
+ __m128i int_val1 = _mm_set_epi64x(vals[3], vals[2]);
595
+ __m128i int_val2 = _mm_set_epi64x(vals[5], vals[4]);
596
+ __m128i int_val3 = _mm_set_epi64x(vals[7], vals[6]);
597
+
598
+ __m512i int32_val0 = cvtepi8_epi32(int_val0);
599
+ __m512i int32_val1 = cvtepi8_epi32(int_val1);
600
+ __m512i int32_val2 = cvtepi8_epi32(int_val2);
601
+ __m512i int32_val3 = cvtepi8_epi32(int_val3);
602
+
603
+ __m128i int_b0 = _mm_set_epi64x(b.vals[1], b.vals[0]);
604
+ __m128i int_b1 = _mm_set_epi64x(b.vals[3], b.vals[2]);
605
+ __m128i int_b2 = _mm_set_epi64x(b.vals[5], b.vals[4]);
606
+ __m128i int_b3 = _mm_set_epi64x(b.vals[7], b.vals[6]);
607
+
608
+ __m512i int32_b0 = cvtepi8_epi32(int_b0);
609
+ __m512i int32_b1 = cvtepi8_epi32(int_b1);
610
+ __m512i int32_b2 = cvtepi8_epi32(int_b2);
611
+ __m512i int32_b3 = cvtepi8_epi32(int_b3);
612
+
613
+ __m512i res_0 = _mm512_sub_epi32(int32_val0, int32_b0);
614
+ __m512i res_1 = _mm512_sub_epi32(int32_val1, int32_b1);
615
+ __m512i res_2 = _mm512_sub_epi32(int32_val2, int32_b2);
616
+ __m512i res_3 = _mm512_sub_epi32(int32_val3, int32_b3);
617
+
618
+ return {Vectorized<c10::qint32>(res_0),
619
+ Vectorized<c10::qint32>(res_1),
620
+ Vectorized<c10::qint32>(res_2),
621
+ Vectorized<c10::qint32>(res_3)};
622
+ }
623
+
624
+ static Vectorized<c10::qint8> requantize_from_int(
625
+ const int_vec_return_type& inp,
626
+ float multiplier,
627
+ int32_t zero_point) {
628
+ __m512 multiplier_v = _mm512_set1_ps(multiplier);
629
+ __m512i zero_point_v = _mm512_set1_epi32(zero_point);
630
+ return RequantizeAvx512<value_type>(inp, multiplier_v, zero_point_v);
631
+ }
632
+
633
+ private:
634
+ // Load from memory constructor
635
+ Vectorized(const void* ptr) {
636
+ vals = _mm512_loadu_si512((const __m512i*)ptr);
637
+ }
638
+ };
639
+
640
+ template <>
641
+ Vectorized<c10::qint8> inline maximum(const Vectorized<c10::qint8>& a, const Vectorized<c10::qint8>& b) {
642
+ return a.maximum(b);
643
+ }
644
+
645
+ template<>
646
+ struct Vectorized<c10::quint8> : public Vectorizedqi {
647
+ static constexpr int size() {
648
+ return 64;
649
+ }
650
+
651
+ static constexpr int float_num_vecs() {
652
+ return 4;
653
+ }
654
+
655
+ static constexpr int int_num_vecs() {
656
+ return 4;
657
+ }
658
+
659
+ using float_vec_return_type = std::array<Vectorized<float>, 4>;
660
+ using int_vec_return_type = std::array<Vectorized<c10::qint32>, 4>;
661
+ using value_type = typename c10::quint8::underlying;
662
+
663
+ public:
664
+ using Vectorizedqi::Vectorizedqi;
665
+ Vectorized() {}
666
+
667
+ Vectorized(__m512i vals_) { vals = vals_;}
668
+
669
+ // Broadcast constructor
670
+ Vectorized(const c10::quint8& val) {
671
+ value_type uw = val.val_;
672
+ vals = _mm512_set1_epi8(uw);
673
+ }
674
+
675
+ Vectorized(const Vectorized<c10::quint8>& other) : Vectorizedqi(other.vals) { }
676
+
677
+ // This is added to avoid error: definition of implicit copy assignment operator
678
+ // for 'Vectorized<c10::quint8>' is deprecated because it has a user-declared
679
+ // copy constructor [-Werror,-Wdeprecated-copy]
680
+ Vectorized& operator=(const Vectorized<c10::quint8>&) = default;
681
+
682
+ void store(void* ptr, int count = size()) const {
683
+ if (count != size()) {
684
+ memcpy(ptr, &vals, count * sizeof(value_type));
685
+ } else {
686
+ _mm512_storeu_si512((__m512i*)ptr, vals);
687
+ }
688
+ }
689
+
690
+ static Vectorized<c10::quint8> loadu(const void* ptr) {
691
+ return Vectorized<c10::quint8>(ptr);
692
+ }
693
+
694
+ static Vectorized<c10::quint8> loadu(const void* ptr, int64_t count) {
695
+ __at_align__ value_type tmp_values[size()];
696
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
697
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
698
+ // instructions while a loop would be compiled to one instruction.
699
+ for (const auto i : c10::irange(size())) {
700
+ tmp_values[i] = 0;
701
+ }
702
+ std::memcpy(tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
703
+ return loadu(tmp_values);
704
+ }
705
+
706
+ private:
707
+ __m512i cvtepu8_epi32(__m128i epu8_vals) const {
708
+ return _mm512_cvtepu8_epi32(epu8_vals);
709
+ }
710
+
711
+ public:
712
+ float_vec_return_type dequantize(
713
+ Vectorized<float> scale,
714
+ Vectorized<float> zero_point,
715
+ Vectorized<float> scale_zp_premul) const {
716
+ __m128i int_val0 = _mm_set_epi64x(vals[1], vals[0]);
717
+ __m128i int_val1 = _mm_set_epi64x(vals[3], vals[2]);
718
+ __m128i int_val2 = _mm_set_epi64x(vals[5], vals[4]);
719
+ __m128i int_val3 = _mm_set_epi64x(vals[7], vals[6]);
720
+
721
+ __m512 float_val0 = _mm512_cvtepi32_ps(cvtepu8_epi32(int_val0));
722
+ __m512 float_val1 = _mm512_cvtepi32_ps(cvtepu8_epi32(int_val1));
723
+ __m512 float_val2 = _mm512_cvtepi32_ps(cvtepu8_epi32(int_val2));
724
+ __m512 float_val3 = _mm512_cvtepi32_ps(cvtepu8_epi32(int_val3));
725
+
726
+ auto val0 =
727
+ vec::fmadd(scale, Vectorized<float>(float_val0), scale_zp_premul);
728
+ auto val1 =
729
+ vec::fmadd(scale, Vectorized<float>(float_val1), scale_zp_premul);
730
+ auto val2 =
731
+ vec::fmadd(scale, Vectorized<float>(float_val2), scale_zp_premul);
732
+ auto val3 =
733
+ vec::fmadd(scale, Vectorized<float>(float_val3), scale_zp_premul);
734
+
735
+ return {val0, val1, val2, val3};
736
+ }
737
+
738
+ float_vec_return_type dequantize(
739
+ Vectorized<float> scale,
740
+ Vectorized<float> zero_point) const {
741
+ __m128i int_val0 = _mm_set_epi64x(vals[1], vals[0]);
742
+ __m128i int_val1 = _mm_set_epi64x(vals[3], vals[2]);
743
+ __m128i int_val2 = _mm_set_epi64x(vals[5], vals[4]);
744
+ __m128i int_val3 = _mm_set_epi64x(vals[7], vals[6]);
745
+
746
+ __m512 float_val0 = _mm512_cvtepi32_ps(cvtepu8_epi32(int_val0));
747
+ __m512 float_val1 = _mm512_cvtepi32_ps(cvtepu8_epi32(int_val1));
748
+ __m512 float_val2 = _mm512_cvtepi32_ps(cvtepu8_epi32(int_val2));
749
+ __m512 float_val3 = _mm512_cvtepi32_ps(cvtepu8_epi32(int_val3));
750
+
751
+ auto val0 = (Vectorized<float>(float_val0) - zero_point) * scale;
752
+ auto val1 = (Vectorized<float>(float_val1) - zero_point) * scale;
753
+ auto val2 = (Vectorized<float>(float_val2) - zero_point) * scale;
754
+ auto val3 = (Vectorized<float>(float_val3) - zero_point) * scale;
755
+
756
+ return {val0, val1, val2, val3};
757
+ }
758
+
759
+ static Vectorized<c10::quint8> quantize(
760
+ const float_vec_return_type& rhs,
761
+ float scale,
762
+ int32_t zero_point,
763
+ float inverse_scale) {
764
+ auto* rhs_data = (float*)rhs.data();
765
+ uint8_t quantized_values[64];
766
+ QuantizeAvx512<value_type>(
767
+ rhs_data, quantized_values, 64, inverse_scale, zero_point);
768
+ return Vectorized<c10::quint8>::loadu(quantized_values);
769
+ }
770
+
771
+ Vectorized<c10::quint8> maximum(Vectorized<c10::quint8> b) const {
772
+ return _mm512_max_epu8(vals, b.vals);
773
+ }
774
+
775
+ Vectorized<c10::quint8> minimum(Vectorized<c10::quint8> b) const {
776
+ return _mm512_min_epu8(vals, b.vals);
777
+ }
778
+
779
+ Vectorized<c10::quint8> relu(Vectorized<c10::quint8> zero_point) const {
780
+ return maximum(zero_point);
781
+ }
782
+
783
+ Vectorized<c10::quint8> relu6(
784
+ Vectorized<c10::quint8> zero_point,
785
+ Vectorized<c10::quint8> q_six) {
786
+ return _mm512_min_epu8(
787
+ _mm512_max_epu8(vals, zero_point.vals), q_six.vals);
788
+ }
789
+
790
+ int_vec_return_type widening_subtract(Vectorized<c10::quint8> b) const {
791
+ __m128i int_val0 = _mm_set_epi64x(vals[1], vals[0]);
792
+ __m128i int_val1 = _mm_set_epi64x(vals[3], vals[2]);
793
+ __m128i int_val2 = _mm_set_epi64x(vals[5], vals[4]);
794
+ __m128i int_val3 = _mm_set_epi64x(vals[7], vals[6]);
795
+
796
+ __m512i int32_val0 = cvtepu8_epi32(int_val0);
797
+ __m512i int32_val1 = cvtepu8_epi32(int_val1);
798
+ __m512i int32_val2 = cvtepu8_epi32(int_val2);
799
+ __m512i int32_val3 = cvtepu8_epi32(int_val3);
800
+
801
+ __m128i int_b0 = _mm_set_epi64x(b.vals[1], b.vals[0]);
802
+ __m128i int_b1 = _mm_set_epi64x(b.vals[3], b.vals[2]);
803
+ __m128i int_b2 = _mm_set_epi64x(b.vals[5], b.vals[4]);
804
+ __m128i int_b3 = _mm_set_epi64x(b.vals[7], b.vals[6]);
805
+
806
+ __m512i int32_b0 = cvtepu8_epi32(int_b0);
807
+ __m512i int32_b1 = cvtepu8_epi32(int_b1);
808
+ __m512i int32_b2 = cvtepu8_epi32(int_b2);
809
+ __m512i int32_b3 = cvtepu8_epi32(int_b3);
810
+
811
+ __m512i res_0 = _mm512_sub_epi32(int32_val0, int32_b0);
812
+ __m512i res_1 = _mm512_sub_epi32(int32_val1, int32_b1);
813
+ __m512i res_2 = _mm512_sub_epi32(int32_val2, int32_b2);
814
+ __m512i res_3 = _mm512_sub_epi32(int32_val3, int32_b3);
815
+ return {Vectorized<c10::qint32>(res_0),
816
+ Vectorized<c10::qint32>(res_1),
817
+ Vectorized<c10::qint32>(res_2),
818
+ Vectorized<c10::qint32>(res_3)};
819
+ }
820
+
821
+ static Vectorized<c10::quint8> requantize_from_int(
822
+ const int_vec_return_type& inp,
823
+ float multiplier,
824
+ int32_t zero_point) {
825
+ __m512 multiplier_v = _mm512_set1_ps(multiplier);
826
+ __m512i zero_point_v = _mm512_set1_epi32(zero_point);
827
+ return RequantizeAvx512<value_type>(inp, multiplier_v, zero_point_v);
828
+ }
829
+
830
+ private:
831
+
832
+ // Load from memory constructor
833
+ Vectorized(const void* ptr) {
834
+ vals = _mm512_loadu_si512((const __m512i*)ptr);
835
+ }
836
+ };
837
+
838
+ template <>
839
+ Vectorized<c10::quint8> inline maximum(const Vectorized<c10::quint8>& a, const Vectorized<c10::quint8>& b) {
840
+ return a.maximum(b);
841
+ }
842
+
843
+ #else
844
+
845
+ // NOTE: These are low-performance implementations that we fall back on.
846
+
847
+ template <
848
+ typename T,
849
+ typename float_vec_return_type_,
850
+ typename int_vec_return_type_,
851
+ int size_>
852
+ struct VectorizedQuantizedConverter {
853
+ static constexpr int size() {
854
+ return size_;
855
+ }
856
+
857
+ static constexpr int float_num_vecs() {
858
+ return size() / 8;
859
+ }
860
+
861
+ static constexpr int int_num_vecs() {
862
+ return size() / 8;
863
+ }
864
+
865
+ using float_vec_return_type = float_vec_return_type_;
866
+ using int_vec_return_type = int_vec_return_type_;
867
+
868
+ using value_type = typename T::underlying;
869
+ std::array<value_type, size_> vals;
870
+
871
+ VectorizedQuantizedConverter(T val) {
872
+ for (const auto i : c10::irange(size())) {
873
+ vals[i] = val.val_;
874
+ }
875
+ }
876
+
877
+ VectorizedQuantizedConverter(const void* ptr) {
878
+ memcpy(vals.data(), ptr, sizeof(value_type) * size());
879
+ }
880
+
881
+ void store(void* ptr, int count = size()) const {
882
+ memcpy(ptr, vals.data(), count * sizeof(value_type));
883
+ }
884
+
885
+ float_vec_return_type dequantize(
886
+ Vectorized<float> scale,
887
+ Vectorized<float> zero_point,
888
+ Vectorized<float> scale_zp_premul) const {
889
+ float_vec_return_type rv;
890
+ for (const auto i : c10::irange(float_num_vecs())) {
891
+ float tmp_vals[16];
892
+ for (const auto j : c10::irange(16)) {
893
+ tmp_vals[j] = at::native::dequantize_val<T>(
894
+ scale[j], zero_point[j], T(vals[16 * i + j]));
895
+ }
896
+ rv[i] = Vectorized<float>(tmp_vals[0],
897
+ tmp_vals[1],
898
+ tmp_vals[2],
899
+ tmp_vals[3],
900
+ tmp_vals[4],
901
+ tmp_vals[5],
902
+ tmp_vals[6],
903
+ tmp_vals[7],
904
+ tmp_vals[8],
905
+ tmp_vals[9],
906
+ tmp_vals[10],
907
+ tmp_vals[11],
908
+ tmp_vals[12],
909
+ tmp_vals[13],
910
+ tmp_vals[14],
911
+ tmp_vals[15]);
912
+ }
913
+ return rv;
914
+ }
915
+
916
+ float_vec_return_type dequantize(
917
+ Vectorized<float> scale,
918
+ Vectorized<float> zero_point) const {
919
+ Vectorized<float> scale_zp_premul;
920
+ return dequantize(scale, zero_point, scale_zp_premul);
921
+ }
922
+
923
+ protected:
924
+ VectorizedQuantizedConverter() {}
925
+ };
926
+
927
+ template <>
928
+ struct Vectorized<c10::qint32> : public VectorizedQuantizedConverter<
929
+ c10::qint32,
930
+ std::array<Vectorized<float>, 1>,
931
+ std::array<Vectorized<c10::qint32>, 1>,
932
+ 16> {
933
+ Vectorized()
934
+ : VectorizedQuantizedConverter<
935
+ c10::qint32,
936
+ std::array<Vectorized<float>, 1>,
937
+ std::array<Vectorized<c10::qint32>, 1>,
938
+ 16>() {}
939
+ Vectorized(c10::qint32 val)
940
+ : VectorizedQuantizedConverter<
941
+ c10::qint32,
942
+ std::array<Vectorized<float>, 1>,
943
+ std::array<Vectorized<c10::qint32>, 1>,
944
+ 16>(val) {}
945
+ Vectorized(const void* ptr)
946
+ : VectorizedQuantizedConverter<
947
+ c10::qint32,
948
+ std::array<Vectorized<float>, 1>,
949
+ std::array<Vectorized<c10::qint32>, 1>,
950
+ 16>(ptr) {}
951
+
952
+ static Vectorized<c10::qint32> loadu(const void* ptr) {
953
+ return Vectorized<c10::qint32>(ptr);
954
+ }
955
+
956
+ static Vectorized<c10::qint32> loadu(const void* ptr, int64_t count) {
957
+ __at_align__ value_type tmp_values[size()];
958
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
959
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
960
+ // instructions while a loop would be compiled to one instruction.
961
+ for (const auto i : c10::irange(size())) {
962
+ tmp_values[i] = 0;
963
+ }
964
+ std::memcpy(tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
965
+ return loadu(tmp_values);
966
+ }
967
+
968
+ static Vectorized<c10::qint32> quantize(
969
+ const float_vec_return_type& rhs,
970
+ float scale,
971
+ int32_t zero_point,
972
+ float inverse_scale) {
973
+ std::array<value_type, size()> qvals;
974
+ std::array<float, float_num_vecs() * 16> float_vals;
975
+
976
+ for (const auto i : c10::irange(float_num_vecs())) {
977
+ rhs[i].store(&float_vals[i * 16], 16);
978
+ }
979
+
980
+ at::native::quantize_vec<c10::qint32, /*precision=*/32>(
981
+ scale,
982
+ zero_point,
983
+ float_vals.data(),
984
+ (c10::qint32*)qvals.data(),
985
+ 16 * float_num_vecs());
986
+
987
+ return Vectorized<c10::qint32>::loadu(qvals.data());
988
+ }
989
+
990
+ Vectorized<c10::qint32> maximum(Vectorized<c10::qint32> b) const {
991
+ Vectorized<c10::qint32> retval;
992
+ for (const auto i : c10::irange(size())) {
993
+ retval.vals[i] = std::max<value_type>(vals[i], b.vals[i]);
994
+ }
995
+ return retval;
996
+ }
997
+
998
+ Vectorized<c10::qint32> minimum(Vectorized<c10::qint32> b) const {
999
+ Vectorized<c10::qint32> retval;
1000
+ for (const auto i : c10::irange(size())) {
1001
+ retval.vals[i] = std::min<value_type>(vals[i], b.vals[i]);
1002
+ }
1003
+ return retval;
1004
+ }
1005
+
1006
+ Vectorized<c10::qint32> relu(Vectorized<c10::qint32> zero_point) const {
1007
+ return maximum(zero_point);
1008
+ }
1009
+
1010
+
1011
+ Vectorized<c10::qint32> relu6(
1012
+ Vectorized<c10::qint32> zero_point,
1013
+ Vectorized<c10::qint32> q_six) {
1014
+ Vectorized<c10::qint32> retval;
1015
+ for (const auto i : c10::irange(size())) {
1016
+ retval.vals[i] = std::min<value_type>(
1017
+ std::max<value_type>(vals[i], zero_point.vals[i]), q_six.vals[i]);
1018
+ }
1019
+ return retval;
1020
+ }
1021
+
1022
+ int_vec_return_type widening_subtract(Vectorized<c10::qint32> b) const {
1023
+ int_vec_return_type retval;
1024
+ for (const auto i : c10::irange(size())) {
1025
+ retval[0].vals[i] = vals[i] - b.vals[i];
1026
+ }
1027
+ return retval;
1028
+ }
1029
+
1030
+ static Vectorized<c10::qint32> requantize_from_int(
1031
+ const int_vec_return_type& inp,
1032
+ float multiplier,
1033
+ int32_t zero_point) {
1034
+ Vectorized<c10::qint32> retval;
1035
+ for (const auto i : c10::irange(size())) {
1036
+ retval.vals[i] =
1037
+ std::nearbyint(static_cast<float>(inp[0].vals[i]) * multiplier) +
1038
+ zero_point;
1039
+ }
1040
+ return retval;
1041
+ }
1042
+ };
1043
+
1044
+ template <>
1045
+ Vectorized<c10::qint32> inline maximum(const Vectorized<c10::qint32>& a, const Vectorized<c10::qint32>& b) {
1046
+ return a.maximum(b);
1047
+ }
1048
+
1049
+ template <>
1050
+ Vectorized<c10::qint32> inline operator*(
1051
+ const Vectorized<c10::qint32>& a,
1052
+ const Vectorized<c10::qint32>& b) {
1053
+ Vectorized<c10::qint32> retval;
1054
+ for (const auto i : c10::irange(std::decay_t<decltype(a)>::size())) {
1055
+ retval.vals[i] = a.vals[i] * b.vals[i];
1056
+ }
1057
+ return retval;
1058
+ }
1059
+
1060
+ template <>
1061
+ Vectorized<c10::qint32> inline operator+(
1062
+ const Vectorized<c10::qint32>& a,
1063
+ const Vectorized<c10::qint32>& b) {
1064
+ Vectorized<c10::qint32> retval;
1065
+ for (const auto i : c10::irange(std::decay_t<decltype(a)>::size())) {
1066
+ retval.vals[i] = a.vals[i] + b.vals[i];
1067
+ }
1068
+ return retval;
1069
+ }
1070
+
1071
+ template <>
1072
+ struct Vectorized<c10::qint8> : public VectorizedQuantizedConverter<
1073
+ c10::qint8,
1074
+ std::array<Vectorized<float>, 4>,
1075
+ std::array<Vectorized<c10::qint32>, 4>,
1076
+ 64> {
1077
+ Vectorized()
1078
+ : VectorizedQuantizedConverter<
1079
+ c10::qint8,
1080
+ std::array<Vectorized<float>, 4>,
1081
+ std::array<Vectorized<c10::qint32>, 4>,
1082
+ 64>() {}
1083
+ Vectorized(c10::qint8 val)
1084
+ : VectorizedQuantizedConverter<
1085
+ c10::qint8,
1086
+ std::array<Vectorized<float>, 4>,
1087
+ std::array<Vectorized<c10::qint32>, 4>,
1088
+ 64>(val) {}
1089
+ Vectorized(const void* ptr)
1090
+ : VectorizedQuantizedConverter<
1091
+ c10::qint8,
1092
+ std::array<Vectorized<float>, 4>,
1093
+ std::array<Vectorized<c10::qint32>, 4>,
1094
+ 64>(ptr) {}
1095
+
1096
+ static Vectorized<c10::qint8> loadu(const void* ptr) {
1097
+ return Vectorized<c10::qint8>(ptr);
1098
+ }
1099
+
1100
+ static Vectorized<c10::qint8> loadu(const void* ptr, int64_t count) {
1101
+ __at_align__ value_type tmp_values[size()];
1102
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
1103
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
1104
+ // instructions while a loop would be compiled to one instruction.
1105
+ for (const auto i : c10::irange(size())) {
1106
+ tmp_values[i] = 0;
1107
+ }
1108
+ std::memcpy(tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
1109
+ return loadu(tmp_values);
1110
+ }
1111
+
1112
+ static Vectorized<c10::qint8> quantize(
1113
+ const float_vec_return_type& rhs,
1114
+ float scale,
1115
+ int32_t zero_point,
1116
+ float inverse_scale) {
1117
+ std::array<value_type, size()> qvals;
1118
+ std::array<float, float_num_vecs() * 16> float_vals;
1119
+
1120
+ for (const auto i : c10::irange(float_num_vecs())) {
1121
+ rhs[i].store(&float_vals[i * 16], 16);
1122
+ }
1123
+
1124
+ at::native::quantize_vec<c10::qint8>(
1125
+ scale,
1126
+ zero_point,
1127
+ float_vals.data(),
1128
+ (c10::qint8*)qvals.data(),
1129
+ 16 * float_num_vecs());
1130
+
1131
+ return Vectorized<c10::qint8>::loadu(qvals.data());
1132
+ }
1133
+
1134
+ Vectorized<c10::qint8> maximum(Vectorized<c10::qint8> b) const {
1135
+ Vectorized<c10::qint8> retval;
1136
+ for (const auto i : c10::irange(size())) {
1137
+ retval.vals[i] = std::max<value_type>(vals[i], b.vals[i]);
1138
+ }
1139
+ return retval;
1140
+ }
1141
+
1142
+ Vectorized<c10::qint8> minimum(Vectorized<c10::qint8> b) const {
1143
+ Vectorized<c10::qint8> retval;
1144
+ for (const auto i : c10::irange(size())) {
1145
+ retval.vals[i] = std::min<value_type>(vals[i], b.vals[i]);
1146
+ }
1147
+ return retval;
1148
+ }
1149
+
1150
+ Vectorized<c10::qint8> relu(Vectorized<c10::qint8> zero_point) const {
1151
+ return maximum(zero_point);
1152
+ }
1153
+
1154
+ Vectorized<c10::qint8> relu6(
1155
+ Vectorized<c10::qint8> zero_point,
1156
+ Vectorized<c10::qint8> q_six) {
1157
+ Vectorized<c10::qint8> retval;
1158
+ for (const auto i : c10::irange(size())) {
1159
+ retval.vals[i] = std::min<value_type>(
1160
+ std::max<value_type>(vals[i], zero_point.vals[i]), q_six.vals[i]);
1161
+ }
1162
+ return retval;
1163
+ }
1164
+
1165
+ int_vec_return_type widening_subtract(Vectorized<c10::qint8> b) const {
1166
+ int_vec_return_type retval;
1167
+ constexpr int elem_per_int_vec = size() / int_num_vecs();
1168
+ for (const auto i : c10::irange(int_num_vecs())) {
1169
+ for (const auto j : c10::irange(elem_per_int_vec)) {
1170
+ retval[i].vals[j] =
1171
+ static_cast<int32_t>(vals[i * elem_per_int_vec + j]) -
1172
+ static_cast<int32_t>(b.vals[i * elem_per_int_vec + j]);
1173
+ }
1174
+ }
1175
+ return retval;
1176
+ }
1177
+ static Vectorized<c10::qint8> requantize_from_int(
1178
+ const int_vec_return_type& inp,
1179
+ float multiplier,
1180
+ int32_t zero_point) {
1181
+ constexpr int elem_per_int_vec = size() / int_num_vecs();
1182
+ constexpr auto min_val = std::numeric_limits<value_type>::min();
1183
+ constexpr auto max_val = std::numeric_limits<value_type>::max();
1184
+ Vectorized<c10::qint8> retval;
1185
+ for (const auto i : c10::irange(int_num_vecs())) {
1186
+ for (const auto j : c10::irange(elem_per_int_vec)) {
1187
+ int32_t rounded =
1188
+ std::nearbyint(static_cast<float>(inp[i].vals[j]) * multiplier) +
1189
+ zero_point;
1190
+ retval.vals[i * elem_per_int_vec + j] =
1191
+ std::min<int32_t>(std::max<int32_t>(rounded, min_val), max_val);
1192
+ }
1193
+ }
1194
+ return retval;
1195
+ }
1196
+ };
1197
+
1198
+ template <>
1199
+ Vectorized<c10::qint8> inline maximum(const Vectorized<c10::qint8>& a, const Vectorized<c10::qint8>& b) {
1200
+ return a.maximum(b);
1201
+ }
1202
+
1203
+ template <>
1204
+ struct Vectorized<c10::quint8> : public VectorizedQuantizedConverter<
1205
+ c10::quint8,
1206
+ std::array<Vectorized<float>, 4>,
1207
+ std::array<Vectorized<c10::qint32>, 4>,
1208
+ 64> {
1209
+ Vectorized()
1210
+ : VectorizedQuantizedConverter<
1211
+ c10::quint8,
1212
+ std::array<Vectorized<float>, 4>,
1213
+ std::array<Vectorized<c10::qint32>, 4>,
1214
+ 64>() {}
1215
+ Vectorized(c10::quint8 val)
1216
+ : VectorizedQuantizedConverter<
1217
+ c10::quint8,
1218
+ std::array<Vectorized<float>, 4>,
1219
+ std::array<Vectorized<c10::qint32>, 4>,
1220
+ 64>(val) {}
1221
+ Vectorized(const void* ptr)
1222
+ : VectorizedQuantizedConverter<
1223
+ c10::quint8,
1224
+ std::array<Vectorized<float>, 4>,
1225
+ std::array<Vectorized<c10::qint32>, 4>,
1226
+ 64>(ptr) {}
1227
+
1228
+ static Vectorized<c10::quint8> loadu(const void* ptr) {
1229
+ return Vectorized<c10::quint8>(ptr);
1230
+ }
1231
+
1232
+ static Vectorized<c10::quint8> loadu(const void* ptr, int64_t count) {
1233
+ __at_align__ value_type tmp_values[size()];
1234
+ // Ensure uninitialized memory does not change the output value See https://github.com/pytorch/pytorch/issues/32502
1235
+ // for more details. We do not initialize arrays to zero using "={0}" because gcc would compile it to two
1236
+ // instructions while a loop would be compiled to one instruction.
1237
+ for (const auto i : c10::irange(size())) {
1238
+ tmp_values[i] = 0;
1239
+ }
1240
+ std::memcpy(tmp_values, reinterpret_cast<const value_type*>(ptr), count * sizeof(value_type));
1241
+ return loadu(tmp_values);
1242
+ }
1243
+
1244
+ static Vectorized<c10::quint8> quantize(
1245
+ const float_vec_return_type& rhs,
1246
+ float scale,
1247
+ int32_t zero_point,
1248
+ float inverse_scale) {
1249
+ std::array<value_type, size()> qvals;
1250
+ std::array<float, float_num_vecs() * 16> float_vals;
1251
+
1252
+ for (const auto i : c10::irange(float_num_vecs())) {
1253
+ rhs[i].store(&float_vals[i * 16], 16);
1254
+ }
1255
+
1256
+ at::native::quantize_vec<c10::quint8>(
1257
+ scale,
1258
+ zero_point,
1259
+ float_vals.data(),
1260
+ (c10::quint8*)qvals.data(),
1261
+ 16 * float_num_vecs());
1262
+
1263
+ return Vectorized<c10::quint8>::loadu(qvals.data());
1264
+ }
1265
+
1266
+ Vectorized<c10::quint8> maximum(Vectorized<c10::quint8> b) const {
1267
+ Vectorized<c10::quint8> retval;
1268
+ for (const auto i : c10::irange(size())) {
1269
+ retval.vals[i] = std::max<value_type>(vals[i], b.vals[i]);
1270
+ }
1271
+ return retval;
1272
+ }
1273
+
1274
+ Vectorized<c10::quint8> minimum(Vectorized<c10::quint8> b) const {
1275
+ Vectorized<c10::quint8> retval;
1276
+ for (const auto i : c10::irange(size())) {
1277
+ retval.vals[i] = std::min<value_type>(vals[i], b.vals[i]);
1278
+ }
1279
+ return retval;
1280
+ }
1281
+
1282
+ Vectorized<c10::quint8> relu(Vectorized<c10::quint8> zero_point) const {
1283
+ return maximum(zero_point);
1284
+ }
1285
+
1286
+
1287
+ Vectorized<c10::quint8> relu6(
1288
+ Vectorized<c10::quint8> zero_point,
1289
+ Vectorized<c10::quint8> q_six) {
1290
+ Vectorized<c10::quint8> retval;
1291
+ for (const auto i : c10::irange(size())) {
1292
+ retval.vals[i] = std::min<value_type>(
1293
+ std::max<value_type>(vals[i], zero_point.vals[i]), q_six.vals[i]);
1294
+ }
1295
+ return retval;
1296
+ }
1297
+
1298
+ int_vec_return_type widening_subtract(Vectorized<c10::quint8> b) const {
1299
+ int_vec_return_type retval;
1300
+ constexpr int elem_per_int_vec = size() / int_num_vecs();
1301
+ for (const auto i : c10::irange(int_num_vecs())) {
1302
+ for (const auto j : c10::irange(elem_per_int_vec)) {
1303
+ retval[i].vals[j] =
1304
+ static_cast<int32_t>(vals[i * elem_per_int_vec + j]) -
1305
+ static_cast<int32_t>(b.vals[i * elem_per_int_vec + j]);
1306
+ }
1307
+ }
1308
+ return retval;
1309
+ }
1310
+ static Vectorized<c10::quint8> requantize_from_int(
1311
+ const int_vec_return_type& inp,
1312
+ float multiplier,
1313
+ int32_t zero_point) {
1314
+ constexpr int elem_per_int_vec = size() / int_num_vecs();
1315
+ constexpr auto min_val = std::numeric_limits<value_type>::min();
1316
+ constexpr auto max_val = std::numeric_limits<value_type>::max();
1317
+ Vectorized<c10::quint8> retval;
1318
+ for (const auto i : c10::irange(int_num_vecs())) {
1319
+ for (const auto j : c10::irange(elem_per_int_vec)) {
1320
+ int32_t rounded =
1321
+ std::nearbyint(static_cast<float>(inp[i].vals[j]) * multiplier) +
1322
+ zero_point;
1323
+ retval.vals[i * elem_per_int_vec + j] =
1324
+ std::min<int32_t>(std::max<int32_t>(rounded, min_val), max_val);
1325
+ }
1326
+ }
1327
+ return retval;
1328
+ }
1329
+ };
1330
+
1331
+ template <>
1332
+ Vectorized<c10::quint8> inline maximum(const Vectorized<c10::quint8>& a, const Vectorized<c10::quint8>& b) {
1333
+ return a.maximum(b);
1334
+ }
1335
+
1336
+ #endif // defined(CPU_CAPABILITY_AVX512) && !defined(MSVC)
1337
+
1338
+ }}}
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/cpu/vml.h ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Config.h>
4
+ #include <ATen/Parallel.h>
5
+ #include <ATen/OpMathType.h>
6
+ #include <ATen/cpu/vec/functional.h>
7
+ #include <ATen/cpu/vec/vec.h>
8
+ #include <c10/util/complex.h>
9
+
10
+ // This header implements various unary operations using a MKL VML style
11
+ // interface.
12
+
13
+ // It implements various functions with a simple interface
14
+ // For example it enables the user to call vsin(float* out, const float* in,
15
+ // size) This functions takes a pointer to a continuous output array of floats and
16
+ // a constant input array. It will then apply sin to each value in the input
17
+ // array and write the result into the output array. out and in may point to the
18
+ // same memory, i.e. this fully supports in-place operations. These functions
19
+ // also implement their own parallelization, so take precautions when calling
20
+ // these from threaded functions.
21
+
22
+ // When MKL is available it will call into MKL's VML library similar to NumPy
23
+ // If MKL is not available it will use SLEEF.
24
+
25
+ // This file might be compiled under AVX or AVX2 when called from e.g.
26
+ // UnaryOpsKernel.cpp
27
+
28
+ #include <algorithm>
29
+ #include <cstddef>
30
+ #include <cstdint>
31
+ #include <cstring>
32
+ #include <type_traits>
33
+
34
+ #if AT_MKL_ENABLED() && !defined(__APPLE__)
35
+ #include <mkl.h>
36
+ #endif
37
+
38
+ namespace at {
39
+ namespace vml {
40
+ inline namespace CPU_CAPABILITY {
41
+
42
+ using namespace vec;
43
+
44
+ template <typename scalar_t>
45
+ inline void vrsqrt(scalar_t* out, scalar_t* in, int64_t size) {
46
+ parallel_for(0, size, 2048, [out, in](int64_t begin, int64_t end) {
47
+ map(
48
+ [](const Vectorized<scalar_t>& x) {
49
+ return Vectorized<scalar_t>((scalar_t)(1)) / x.sqrt();
50
+ },
51
+ out + begin,
52
+ in + begin,
53
+ end - begin);
54
+ });
55
+ }
56
+
57
+ // NB: We ignore numerical errors by convention and leave them to the user
58
+
59
+ #define IMPLEMENT_VML(op) \
60
+ template <typename scalar_t> \
61
+ inline void v##op(scalar_t* out, const scalar_t* in, int64_t size) { \
62
+ using vec_t = Vectorized<vec_scalar_t<scalar_t>>; \
63
+ vec::map([](vec_t x) { return x.op(); }, out, in, size); \
64
+ } \
65
+
66
+ IMPLEMENT_VML(abs)
67
+ IMPLEMENT_VML(acos)
68
+ IMPLEMENT_VML(asin)
69
+ IMPLEMENT_VML(atan)
70
+ IMPLEMENT_VML(atanh)
71
+ IMPLEMENT_VML(ceil)
72
+ IMPLEMENT_VML(cos)
73
+ // IMPLEMENT_VML(cosh)
74
+ IMPLEMENT_VML(erf)
75
+ IMPLEMENT_VML(erfc)
76
+ IMPLEMENT_VML(erfinv)
77
+ IMPLEMENT_VML(exp)
78
+ IMPLEMENT_VML(expm1)
79
+ IMPLEMENT_VML(floor)
80
+ IMPLEMENT_VML(i0)
81
+ IMPLEMENT_VML(i0e)
82
+ IMPLEMENT_VML(digamma)
83
+ IMPLEMENT_VML(reciprocal)
84
+ IMPLEMENT_VML(log)
85
+ IMPLEMENT_VML(log10)
86
+ IMPLEMENT_VML(log1p)
87
+ IMPLEMENT_VML(log2)
88
+ IMPLEMENT_VML(neg)
89
+ IMPLEMENT_VML(sin)
90
+ // IMPLEMENT_VML(sinh)
91
+ IMPLEMENT_VML(sqrt)
92
+ IMPLEMENT_VML(round)
93
+ IMPLEMENT_VML(rsqrt)
94
+ IMPLEMENT_VML(tan)
95
+ IMPLEMENT_VML(tanh)
96
+ IMPLEMENT_VML(trunc)
97
+ IMPLEMENT_VML(lgamma)
98
+
99
+
100
+ #if AT_MKL_ENABLED() && !defined(__APPLE__)
101
+
102
+ // NB: LP64 MKL is the most commonly used and thus we assume it here. That means
103
+ // we need to expect MKL_INT to be of type int, which implies int32_t in most
104
+ // cases.
105
+ static_assert(
106
+ std::is_same<MKL_INT, int32_t>::value,
107
+ "MKL_INT is assumed to be int32_t");
108
+ #define IMPLEMENT_VML_MKL_STUB(op, mklop, type, mkltype) \
109
+ template <> \
110
+ inline void v##op(type * out, const type * in, int64_t size) { \
111
+ int64_t max_mkl_ind = std::numeric_limits<MKL_INT>::max(); \
112
+ if (size <= static_cast<int64_t>(max_mkl_ind)) { \
113
+ vm##mkltype##mklop( \
114
+ size, in, out, VML_HA | VML_FTZDAZ_OFF | VML_ERRMODE_IGNORE); \
115
+ } else { \
116
+ MKL_INT ind = 0; \
117
+ int64_t chunks = size / max_mkl_ind; \
118
+ int64_t rest = size % max_mkl_ind; \
119
+ for (; ind < chunks; ind++) { \
120
+ vm##mkltype##mklop( \
121
+ max_mkl_ind, \
122
+ in + ind * max_mkl_ind, \
123
+ out + ind * max_mkl_ind, \
124
+ VML_HA | VML_FTZDAZ_OFF | VML_ERRMODE_IGNORE); \
125
+ } \
126
+ vm##mkltype##mklop( \
127
+ rest, \
128
+ in + ind * max_mkl_ind, \
129
+ out + ind * max_mkl_ind, \
130
+ VML_HA | VML_FTZDAZ_OFF | VML_ERRMODE_IGNORE); \
131
+ } \
132
+ }
133
+
134
+ #define IMPLEMENT_VML_MKL(op, mklop) \
135
+ IMPLEMENT_VML_MKL_STUB(op, mklop, float, s) \
136
+ IMPLEMENT_VML_MKL_STUB(op, mklop, double, d)
137
+
138
+ // NB: abs, cosh and sinh were temporarily disabled due to issues with Apple
139
+ // NB: expm1 is disabled because on some configs it produces expm1(nan)=-1
140
+ IMPLEMENT_VML_MKL(acos, Acos)
141
+ IMPLEMENT_VML_MKL(asin, Asin)
142
+ IMPLEMENT_VML_MKL(atan, Atan)
143
+ IMPLEMENT_VML_MKL(cos, Cos)
144
+ // IMPLEMENT_VML_MKL(cosh, Cosh)
145
+ IMPLEMENT_VML_MKL(erf, Erf)
146
+ IMPLEMENT_VML_MKL(erfc, Erfc)
147
+ IMPLEMENT_VML_MKL(erfinv, ErfInv)
148
+ IMPLEMENT_VML_MKL(exp, Exp)
149
+ // IMPLEMENT_VML_MKL(expm1, Expm1)
150
+ IMPLEMENT_VML_MKL(log, Ln)
151
+ IMPLEMENT_VML_MKL(log10, Log10)
152
+ IMPLEMENT_VML_MKL(sin, Sin)
153
+ // IMPLEMENT_VML_MKL(sinh, Sinh)
154
+ IMPLEMENT_VML_MKL(sqrt, Sqrt)
155
+ IMPLEMENT_VML_MKL(tan, Tan)
156
+ IMPLEMENT_VML_MKL(tanh, Tanh)
157
+ IMPLEMENT_VML_MKL(trunc, Trunc)
158
+
159
+ // Not vectorized in MKL version tested
160
+ // IMPLEMENT_VML_MKL(abs, Abs)
161
+ // IMPLEMENT_VML_MKL(log1p, Log1p)
162
+
163
+ #if INTEL_MKL_VERSION >= 20180406
164
+ IMPLEMENT_VML_MKL(log2, Log2)
165
+ #endif
166
+
167
+ #endif
168
+
169
+ } // namespace
170
+ } // namespace vml
171
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/hip/impl/HIPAllocatorMasqueradingAsCUDA.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/core/Allocator.h>
4
+ #include <c10/core/DeviceType.h>
5
+
6
+ // Use of c10::hip namespace here makes hipification easier, because
7
+ // I don't have to also fix namespaces. Sorry!
8
+ namespace c10 { namespace hip {
9
+
10
+ // Takes a valid HIPAllocator (of any sort) and turns it into
11
+ // an allocator pretending to be a CUDA allocator. See
12
+ // Note [Masquerading as CUDA]
13
+ class HIPAllocatorMasqueradingAsCUDA final : public Allocator {
14
+ Allocator* allocator_;
15
+ public:
16
+ explicit HIPAllocatorMasqueradingAsCUDA(Allocator* allocator)
17
+ : allocator_(allocator) {}
18
+ DataPtr allocate(size_t size) const override {
19
+ DataPtr r = allocator_->allocate(size);
20
+ r.unsafe_set_device(Device(c10::DeviceType::CUDA, r.device().index()));
21
+ return r;
22
+ }
23
+ DeleterFnPtr raw_deleter() const override {
24
+ return allocator_->raw_deleter();
25
+ }
26
+ };
27
+
28
+ }} // namespace c10::hip
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/hip/HIPCachingAllocator.h>
4
+ #include <ATen/hip/impl/HIPAllocatorMasqueradingAsCUDA.h>
5
+ #include <ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h>
6
+
7
+ namespace c10 {
8
+ // forward declaration
9
+ class DataPtr;
10
+ namespace hip {
11
+ namespace HIPCachingAllocatorMasqueradingAsCUDA {
12
+
13
+ C10_HIP_API Allocator* get();
14
+ C10_HIP_API void recordStreamMasqueradingAsCUDA(const DataPtr& ptr, HIPStreamMasqueradingAsCUDA stream);
15
+
16
+ } // namespace HIPCachingAllocatorMasqueradingAsCUDA
17
+ } // namespace hip
18
+ } // namespace c10
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/hip/impl/HIPGuardImplMasqueradingAsCUDA.h ADDED
@@ -0,0 +1,353 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/hip/HIPConfig.h>
4
+
5
+ // The includes of HIPGuard.h
6
+ #include <c10/hip/impl/HIPGuardImpl.h>
7
+ #include <c10/hip/HIPMacros.h>
8
+ #include <c10/core/DeviceType.h>
9
+ #include <c10/core/impl/InlineDeviceGuard.h>
10
+ #include <c10/core/impl/InlineStreamGuard.h>
11
+ #include <c10/util/Exception.h>
12
+
13
+ #include <c10/hip/impl/HIPGuardImpl.h>
14
+
15
+ #include <ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h>
16
+ #include <ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h>
17
+
18
+ // Use of c10::hip namespace here makes hipification easier, because
19
+ // I don't have to also fix namespaces. Sorry!
20
+ namespace c10 { namespace hip {
21
+
22
+ // Note [Masquerading as CUDA]
23
+ // ~~~~~~~~~~~~~~~~~~~~~~~~~~~
24
+ // c10_hip is very easy to understand: it is HIPified from c10_cuda,
25
+ // and anywhere you said CUDA, the source code now says HIP. HIPified
26
+ // PyTorch is much harder to understand: it is HIPified from regular
27
+ // PyTorch, yes, but NO source-to-source translation from CUDA to
28
+ // HIP occurs; instead, anywhere we see "CUDA", it actually means "HIP".
29
+ // For example, when you use HIPified PyTorch, you say x.cuda() to
30
+ // move a tensor onto ROCm device. We call this situation "HIP
31
+ // masquerading as CUDA".
32
+ //
33
+ // This leads to a very awkward situation when we want to call c10_hip
34
+ // code from PyTorch, since c10_hip is expecting things to be called
35
+ // HIP, but PyTorch is calling them CUDA (masquerading as HIP). To
36
+ // fix this impedance mismatch, we have MasqueradingAsCUDA variants
37
+ // for all c10_hip classes. These translate between the "HIP" and "CUDA
38
+ // masquerading as HIP" worlds. For example,
39
+ // HIPGuardImplMasqueradingAsCUDA (this file) provides something like a
40
+ // HIPGuardImpl, but it reports its DeviceType as CUDA (e.g., type()
41
+ // returns CUDA, getDevice() reports the current HIP device as a CUDA
42
+ // device.)
43
+ //
44
+ // We should be able to delete all of these classes entirely once
45
+ // we switch PyTorch to calling a HIP a HIP.
46
+ //
47
+ // When you add a new MasqueradingAsCUDA class/function, you need to
48
+ // also update the rewrite rules in torch/utils/hipify/cuda_to_hip_mappings.py
49
+ //
50
+ //
51
+ //
52
+ // By the way, note that the cpp file associated with this also
53
+ // *overwrites* the entry in the DeviceGuardImpl registry for CUDA with
54
+ // this HIP implementation.
55
+
56
+ struct HIPGuardImplMasqueradingAsCUDA final : public c10::impl::DeviceGuardImplInterface {
57
+ static constexpr c10::DeviceType static_type = c10::DeviceType::CUDA;
58
+ HIPGuardImplMasqueradingAsCUDA() {}
59
+ HIPGuardImplMasqueradingAsCUDA(c10::DeviceType t) {
60
+ TORCH_INTERNAL_ASSERT(t == c10::DeviceType::CUDA);
61
+ }
62
+ c10::DeviceType type() const override {
63
+ return c10::DeviceType::CUDA;
64
+ }
65
+ Device exchangeDevice(Device d) const override {
66
+ TORCH_INTERNAL_ASSERT(d.is_cuda());
67
+ Device old_device = getDevice();
68
+ if (old_device.index() != d.index()) {
69
+ C10_HIP_CHECK(hipSetDevice(d.index()));
70
+ }
71
+ return old_device;
72
+ }
73
+ Device getDevice() const override {
74
+ int device;
75
+ C10_HIP_CHECK(hipGetDevice(&device));
76
+ return Device(c10::DeviceType::CUDA, device);
77
+ }
78
+ void setDevice(Device d) const override {
79
+ TORCH_INTERNAL_ASSERT(d.is_cuda());
80
+ C10_HIP_CHECK(hipSetDevice(d.index()));
81
+ }
82
+ void uncheckedSetDevice(Device d) const noexcept override {
83
+ C10_HIP_CHECK_WARN(hipSetDevice(d.index()));
84
+ }
85
+ Stream getStream(Device d) const noexcept override {
86
+ return getCurrentHIPStreamMasqueradingAsCUDA(d.index()).unwrap();
87
+ }
88
+ Stream getDefaultStream(Device d) const override {
89
+ return getDefaultHIPStreamMasqueradingAsCUDA(d.index());
90
+ }
91
+ Stream getStreamFromGlobalPool(Device d, bool isHighPriority = false) const override {
92
+ return getStreamFromPoolMasqueradingAsCUDA(isHighPriority, d.index());
93
+ }
94
+ Stream exchangeStream(Stream s) const noexcept override {
95
+ HIPStreamMasqueradingAsCUDA cs(s);
96
+ auto old_stream = getCurrentHIPStreamMasqueradingAsCUDA(s.device().index());
97
+ setCurrentHIPStreamMasqueradingAsCUDA(cs);
98
+ return old_stream.unwrap();
99
+ }
100
+ DeviceIndex deviceCount() const noexcept override {
101
+ int deviceCnt;
102
+ hipError_t _err;
103
+ _err = hipGetDeviceCount(&deviceCnt);
104
+ #if defined(USE_ROCM) && (ROCM_VERSION < 50201)
105
+ if(_err == hipErrorInvalidDevice)
106
+ return 0;
107
+ #endif
108
+ if(_err != hipErrorNoDevice && _err != hipSuccess)
109
+ C10_HIP_CHECK(_err);
110
+ return deviceCnt;
111
+ }
112
+
113
+ // Event-related functions
114
+ // Note: hipEventCreateWithFlags should be called on the same device as
115
+ // the recording stream's device.
116
+ void createEvent(
117
+ hipEvent_t* hip_event,
118
+ const EventFlag flag) const {
119
+ // Maps PyTorch's Event::Flag to HIP flag
120
+ auto hip_flag = hipEventDefault;
121
+ switch (flag) {
122
+ case EventFlag::PYTORCH_DEFAULT:
123
+ case EventFlag::HIP_EVENT_DISABLE_TIMING:
124
+ hip_flag = hipEventDisableTiming;
125
+ break;
126
+ case EventFlag::BACKEND_DEFAULT:
127
+ case EventFlag::HIP_EVENT_DEFAULT:
128
+ hip_flag = hipEventDefault;
129
+ break;
130
+ default:
131
+ TORCH_CHECK(false, "HIP event received unknown flag");
132
+ }
133
+
134
+ C10_HIP_CHECK(hipEventCreateWithFlags(hip_event, hip_flag));
135
+ }
136
+
137
+ void destroyEvent(
138
+ void* event,
139
+ const DeviceIndex device_index) const noexcept override {
140
+ if (!event) return;
141
+ auto hip_event = static_cast<hipEvent_t>(event);
142
+ int orig_device;
143
+ C10_HIP_CHECK_WARN(hipGetDevice(&orig_device));
144
+ C10_HIP_CHECK_WARN(hipSetDevice(device_index));
145
+ C10_HIP_CHECK_WARN(hipEventDestroy(hip_event));
146
+ C10_HIP_CHECK_WARN(hipSetDevice(orig_device));
147
+ }
148
+
149
+ void record(void** event,
150
+ const Stream& stream,
151
+ const DeviceIndex device_index,
152
+ const EventFlag flag) const override {
153
+ TORCH_CHECK(device_index == -1 || device_index == stream.device_index(),
154
+ "Event device index ",
155
+ device_index,
156
+ " does not match recording stream's device index ",
157
+ stream.device_index(),
158
+ ".");
159
+
160
+ hipEvent_t hip_event = static_cast<hipEvent_t>(*event);
161
+ HIPStreamMasqueradingAsCUDA hip_stream{stream};
162
+
163
+ // Moves to stream's device to record
164
+ const auto orig_device = getDevice();
165
+ setDevice(stream.device());
166
+
167
+ // Creates the event (lazily)
168
+ if (!hip_event) createEvent(&hip_event, flag);
169
+ C10_HIP_CHECK(hipEventRecord(hip_event, hip_stream));
170
+ // Makes the void* point to the (possibly just allocated) HIP event
171
+ *event = hip_event;
172
+
173
+ // Resets device
174
+ setDevice(orig_device);
175
+ }
176
+
177
+ void block(
178
+ void* event,
179
+ const Stream& stream) const override {
180
+ if (!event) return;
181
+ hipEvent_t hip_event = static_cast<hipEvent_t>(event);
182
+ HIPStreamMasqueradingAsCUDA hip_stream{stream};
183
+ const auto orig_device = getDevice();
184
+ setDevice(stream.device());
185
+ C10_HIP_CHECK(hipStreamWaitEvent(
186
+ hip_stream,
187
+ hip_event,
188
+ /*flags (must be zero)=*/ 0));
189
+ setDevice(orig_device);
190
+ }
191
+
192
+ bool queryEvent(void* event) const override {
193
+ if (!event) return true;
194
+ hipEvent_t hip_event = static_cast<hipEvent_t>(event);
195
+ const hipError_t err = hipEventQuery(hip_event);
196
+ if (err != hipErrorNotReady) C10_HIP_CHECK(err);
197
+ else {
198
+ // ignore and clear the error if not ready
199
+ (void)hipGetLastError();
200
+ }
201
+ return (err == hipSuccess);
202
+ }
203
+
204
+ // Stream-related functions
205
+ bool queryStream(const Stream& stream) const override {
206
+ HIPStreamMasqueradingAsCUDA hip_stream{stream};
207
+ return hip_stream.query();
208
+ }
209
+
210
+ void synchronizeStream(const Stream& stream) const override {
211
+ HIPStreamMasqueradingAsCUDA hip_stream{stream};
212
+ hip_stream.synchronize();
213
+ }
214
+
215
+ void recordDataPtrOnStream(
216
+ const c10::DataPtr& data_ptr,
217
+ const Stream& stream) const override {
218
+ HIPStreamMasqueradingAsCUDA hip_stream{stream};
219
+ HIPCachingAllocatorMasqueradingAsCUDA::recordStreamMasqueradingAsCUDA(data_ptr, hip_stream);
220
+ }
221
+ };
222
+
223
+ // All of the guards which have HIPGuardImpl burned in need to also have
224
+ // variants using HIPGuardImplMasqueradingAsCUDA.
225
+
226
+ /// This code is all a direct copy from c10/cuda/HIPGuardMasqueradingAsCUDA.h, but with
227
+ /// the correct InlineDeviceGuard burned in. Sorry about the
228
+ /// copy-pasting.
229
+
230
+ struct HIPGuardMasqueradingAsCUDA {
231
+ explicit HIPGuardMasqueradingAsCUDA() = delete;
232
+ explicit HIPGuardMasqueradingAsCUDA(DeviceIndex device_index) : guard_(device_index) {}
233
+ explicit HIPGuardMasqueradingAsCUDA(Device device) : guard_(device) {}
234
+
235
+ HIPGuardMasqueradingAsCUDA(const HIPGuardMasqueradingAsCUDA&) = delete;
236
+ HIPGuardMasqueradingAsCUDA& operator=(const HIPGuardMasqueradingAsCUDA&) = delete;
237
+ HIPGuardMasqueradingAsCUDA(HIPGuardMasqueradingAsCUDA&& other) = delete;
238
+ HIPGuardMasqueradingAsCUDA& operator=(HIPGuardMasqueradingAsCUDA&& other) = delete;
239
+
240
+ void set_device(Device device) { guard_.set_device(device); }
241
+ void reset_device(Device device) { guard_.reset_device(device); }
242
+ void set_index(DeviceIndex device_index) { guard_.set_index(device_index); }
243
+ Device original_device() const { return guard_.original_device(); }
244
+ Device current_device() const { return guard_.current_device(); }
245
+
246
+ private:
247
+ c10::impl::InlineDeviceGuard<HIPGuardImplMasqueradingAsCUDA> guard_;
248
+ };
249
+
250
+ struct OptionalHIPGuardMasqueradingAsCUDA {
251
+ explicit OptionalHIPGuardMasqueradingAsCUDA() : guard_() {}
252
+ explicit OptionalHIPGuardMasqueradingAsCUDA(optional<Device> device_opt) : guard_(device_opt) {}
253
+ explicit OptionalHIPGuardMasqueradingAsCUDA(optional<DeviceIndex> device_index_opt) : guard_(device_index_opt) {}
254
+
255
+ OptionalHIPGuardMasqueradingAsCUDA(const OptionalHIPGuardMasqueradingAsCUDA&) = delete;
256
+ OptionalHIPGuardMasqueradingAsCUDA& operator=(const OptionalHIPGuardMasqueradingAsCUDA&) = delete;
257
+ OptionalHIPGuardMasqueradingAsCUDA(OptionalHIPGuardMasqueradingAsCUDA&& other) = delete;
258
+ OptionalHIPGuardMasqueradingAsCUDA& operator=(OptionalHIPGuardMasqueradingAsCUDA&& other) = delete;
259
+
260
+ void set_device(Device device) { guard_.set_device(device); }
261
+ void reset_device(Device device) { guard_.reset_device(device); }
262
+ void set_index(DeviceIndex device_index) { guard_.set_index(device_index); }
263
+ optional<Device> original_device() const { return guard_.original_device(); }
264
+ optional<Device> current_device() const { return guard_.current_device(); }
265
+ void reset() { guard_.reset(); }
266
+
267
+ private:
268
+ c10::impl::InlineOptionalDeviceGuard<HIPGuardImplMasqueradingAsCUDA> guard_;
269
+ };
270
+
271
+ struct HIPStreamGuardMasqueradingAsCUDA {
272
+ explicit HIPStreamGuardMasqueradingAsCUDA() = delete;
273
+ explicit HIPStreamGuardMasqueradingAsCUDA(Stream stream) : guard_(stream) {}
274
+ HIPStreamGuardMasqueradingAsCUDA(const HIPStreamGuardMasqueradingAsCUDA&) = delete;
275
+ HIPStreamGuardMasqueradingAsCUDA& operator=(const HIPStreamGuardMasqueradingAsCUDA&) = delete;
276
+ HIPStreamGuardMasqueradingAsCUDA(HIPStreamGuardMasqueradingAsCUDA&& other) = delete;
277
+ HIPStreamGuardMasqueradingAsCUDA& operator=(HIPStreamGuardMasqueradingAsCUDA&& other) = delete;
278
+
279
+ void reset_stream(Stream stream) { guard_.reset_stream(stream); }
280
+
281
+ HIPStreamMasqueradingAsCUDA original_stream() const {
282
+ return HIPStreamMasqueradingAsCUDA(HIPStreamMasqueradingAsCUDA::UNCHECKED, guard_.original_stream());
283
+ }
284
+ HIPStreamMasqueradingAsCUDA current_stream() const {
285
+ return HIPStreamMasqueradingAsCUDA(HIPStreamMasqueradingAsCUDA::UNCHECKED, guard_.current_stream());
286
+ }
287
+
288
+ Device current_device() const { return guard_.current_device(); }
289
+ Device original_device() const { return guard_.original_device(); }
290
+
291
+ private:
292
+ c10::impl::InlineStreamGuard<HIPGuardImplMasqueradingAsCUDA> guard_;
293
+ };
294
+
295
+ struct OptionalHIPStreamGuardMasqueradingAsCUDA {
296
+ explicit OptionalHIPStreamGuardMasqueradingAsCUDA() : guard_() {}
297
+ explicit OptionalHIPStreamGuardMasqueradingAsCUDA(Stream stream) : guard_(stream) {}
298
+ explicit OptionalHIPStreamGuardMasqueradingAsCUDA(optional<Stream> stream_opt) : guard_(stream_opt) {}
299
+
300
+ OptionalHIPStreamGuardMasqueradingAsCUDA(const OptionalHIPStreamGuardMasqueradingAsCUDA&) = delete;
301
+ OptionalHIPStreamGuardMasqueradingAsCUDA& operator=(const OptionalHIPStreamGuardMasqueradingAsCUDA&) = delete;
302
+ OptionalHIPStreamGuardMasqueradingAsCUDA(OptionalHIPStreamGuardMasqueradingAsCUDA&& other) = delete;
303
+ OptionalHIPStreamGuardMasqueradingAsCUDA& operator=(OptionalHIPStreamGuardMasqueradingAsCUDA&& other) = delete;
304
+
305
+ void reset_stream(Stream stream) { guard_.reset_stream(stream); }
306
+
307
+ optional<HIPStreamMasqueradingAsCUDA> original_stream() const {
308
+ auto r = guard_.original_stream();
309
+ if (r.has_value()) {
310
+ return make_optional(HIPStreamMasqueradingAsCUDA(HIPStreamMasqueradingAsCUDA::UNCHECKED, r.value()));
311
+ } else {
312
+ return nullopt;
313
+ }
314
+ }
315
+
316
+ optional<HIPStreamMasqueradingAsCUDA> current_stream() const {
317
+ auto r = guard_.current_stream();
318
+ if (r.has_value()) {
319
+ return make_optional(HIPStreamMasqueradingAsCUDA(HIPStreamMasqueradingAsCUDA::UNCHECKED, r.value()));
320
+ } else {
321
+ return nullopt;
322
+ }
323
+ }
324
+
325
+ void reset() { guard_.reset(); }
326
+
327
+ private:
328
+ c10::impl::InlineOptionalStreamGuard<HIPGuardImplMasqueradingAsCUDA> guard_;
329
+ };
330
+
331
+ struct HIPMultiStreamGuardMasqueradingAsCUDA {
332
+ explicit HIPMultiStreamGuardMasqueradingAsCUDA(ArrayRef<HIPStreamMasqueradingAsCUDA> streams)
333
+ : guard_(unwrapStreams(streams)) {}
334
+
335
+ HIPMultiStreamGuardMasqueradingAsCUDA(const HIPMultiStreamGuardMasqueradingAsCUDA&) = delete;
336
+ HIPMultiStreamGuardMasqueradingAsCUDA& operator=(const HIPMultiStreamGuardMasqueradingAsCUDA&) = delete;
337
+ HIPMultiStreamGuardMasqueradingAsCUDA(HIPMultiStreamGuardMasqueradingAsCUDA&& other) = delete;
338
+ HIPMultiStreamGuardMasqueradingAsCUDA& operator=(HIPMultiStreamGuardMasqueradingAsCUDA&& other) = delete;
339
+
340
+ private:
341
+ c10::impl::InlineMultiStreamGuard<HIPGuardImplMasqueradingAsCUDA> guard_;
342
+
343
+ static std::vector<Stream> unwrapStreams(ArrayRef<HIPStreamMasqueradingAsCUDA> hipStreams) {
344
+ std::vector<Stream> streams;
345
+ streams.reserve(hipStreams.size());
346
+ for (const HIPStreamMasqueradingAsCUDA& hipStream : hipStreams) {
347
+ streams.push_back(hipStream);
348
+ }
349
+ return streams;
350
+ }
351
+ };
352
+
353
+ }} // namespace c10::hip
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/hip/impl/HIPStreamMasqueradingAsCUDA.h ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <c10/hip/HIPStream.h>
4
+
5
+ // Use of c10::hip namespace here makes hipification easier, because
6
+ // I don't have to also fix namespaces. Sorry!
7
+ namespace c10 { namespace hip {
8
+
9
+ // See Note [Masquerading as CUDA] for motivation
10
+
11
+ class HIPStreamMasqueradingAsCUDA {
12
+ public:
13
+
14
+ enum Unchecked { UNCHECKED };
15
+
16
+ explicit HIPStreamMasqueradingAsCUDA(Stream stream)
17
+ : HIPStreamMasqueradingAsCUDA(UNCHECKED, stream) {
18
+ // We did the coercion unchecked; check that it was right.
19
+ TORCH_CHECK(stream.device().is_cuda() /* !!! */);
20
+ }
21
+
22
+ explicit HIPStreamMasqueradingAsCUDA(Unchecked, Stream stream)
23
+ // Unsafely coerce the "CUDA" stream into a HIP stream
24
+ : stream_(
25
+ HIPStream(
26
+ Stream(
27
+ Stream::UNSAFE,
28
+ Device(c10::DeviceType::HIP, stream.device_index()),
29
+ stream.id())
30
+ )
31
+ ) {}
32
+
33
+ // New constructor, just for this. Does NOT coerce.
34
+ explicit HIPStreamMasqueradingAsCUDA(HIPStream stream) : stream_(stream) {}
35
+
36
+ bool operator==(const HIPStreamMasqueradingAsCUDA& other) const noexcept {
37
+ return stream_ == other.stream_;
38
+ }
39
+
40
+ bool operator!=(const HIPStreamMasqueradingAsCUDA& other) const noexcept {
41
+ return stream_ != other.stream_;
42
+ }
43
+
44
+ operator hipStream_t() const { return stream_.stream(); }
45
+
46
+ operator Stream() const {
47
+ // Unsafely coerce HIP stream into a "CUDA" stream
48
+ return Stream(Stream::UNSAFE, device(), id());
49
+ }
50
+
51
+ DeviceIndex device_index() const { return stream_.device_index(); }
52
+
53
+ // Unsafely coerce HIP device into CUDA device
54
+ c10::DeviceType device_type() const { return c10::DeviceType::CUDA; }
55
+
56
+ Device device() const {
57
+ // Unsafely coerce HIP device into CUDA device
58
+ return Device(c10::DeviceType::CUDA, stream_.device_index());
59
+ }
60
+
61
+ StreamId id() const { return stream_.id(); }
62
+ bool query() const { return stream_.query(); }
63
+ void synchronize() const { stream_.synchronize(); }
64
+ int priority() const { return stream_.priority(); }
65
+ hipStream_t stream() const { return stream_.stream(); }
66
+
67
+ Stream unwrap() const {
68
+ // Unsafely coerce HIP stream into "CUDA" stream
69
+ return Stream(Stream::UNSAFE, device(), id());
70
+ }
71
+
72
+ c10::StreamData3 pack3() const noexcept {
73
+ // Unsafely coerce HIP stream into "CUDA" stream before packing
74
+ return unwrap().pack3();
75
+ }
76
+
77
+ static HIPStreamMasqueradingAsCUDA unpack3(StreamId stream_id,
78
+ DeviceIndex device_index,
79
+ c10::DeviceType device_type) {
80
+ // NB: constructor manages CUDA->HIP translation for us
81
+ return HIPStreamMasqueradingAsCUDA(Stream::unpack3(
82
+ stream_id, device_index, device_type));
83
+ }
84
+
85
+ static std::tuple<int, int> priority_range() { return HIPStream::priority_range(); }
86
+
87
+ // New method, gets the underlying HIPStream
88
+ HIPStream hip_stream() const { return stream_; }
89
+
90
+ private:
91
+ HIPStream stream_;
92
+ };
93
+
94
+ HIPStreamMasqueradingAsCUDA
95
+ inline getStreamFromPoolMasqueradingAsCUDA(const bool isHighPriority = false, DeviceIndex device = -1) {
96
+ return HIPStreamMasqueradingAsCUDA(getStreamFromPool(isHighPriority, device));
97
+ }
98
+
99
+ HIPStreamMasqueradingAsCUDA
100
+ inline getStreamFromExternalMasqueradingAsCUDA(hipStream_t ext_stream, DeviceIndex device) {
101
+ return HIPStreamMasqueradingAsCUDA(getStreamFromExternal(ext_stream, device));
102
+ }
103
+
104
+ inline HIPStreamMasqueradingAsCUDA getDefaultHIPStreamMasqueradingAsCUDA(DeviceIndex device_index = -1) {
105
+ return HIPStreamMasqueradingAsCUDA(getDefaultHIPStream(device_index));
106
+ }
107
+
108
+ inline HIPStreamMasqueradingAsCUDA getCurrentHIPStreamMasqueradingAsCUDA(DeviceIndex device_index = -1) {
109
+ return HIPStreamMasqueradingAsCUDA(getCurrentHIPStream(device_index));
110
+ }
111
+
112
+ inline void setCurrentHIPStreamMasqueradingAsCUDA(HIPStreamMasqueradingAsCUDA stream) {
113
+ setCurrentHIPStream(stream.hip_stream());
114
+ }
115
+
116
+ inline std::ostream& operator<<(std::ostream& stream, const HIPStreamMasqueradingAsCUDA& s) {
117
+ stream << s.hip_stream() << " (masquerading as CUDA)";
118
+ return stream;
119
+ }
120
+
121
+ }} // namespace c10::hip
122
+
123
+ namespace std {
124
+ template <>
125
+ struct hash<c10::hip::HIPStreamMasqueradingAsCUDA> {
126
+ size_t operator()(c10::hip::HIPStreamMasqueradingAsCUDA s) const noexcept {
127
+ return std::hash<c10::Stream>{}(s.unwrap());
128
+ }
129
+ };
130
+ } // namespace std
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/miopen/Descriptors.h ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/miopen/Exceptions.h>
4
+
5
+ #include <ATen/miopen/miopen-wrapper.h>
6
+ #include <ATen/core/Tensor.h>
7
+ #include <ATen/TensorUtils.h>
8
+
9
+ namespace at { namespace native {
10
+
11
+ inline int dataSize(miopenDataType_t dataType)
12
+ {
13
+ switch (dataType) {
14
+ case miopenHalf: return 2;
15
+ case miopenFloat: return 4;
16
+ case miopenBFloat16: return 2;
17
+ default: return 8;
18
+ }
19
+ }
20
+
21
+ template <typename T, miopenStatus_t (*dtor)(T*)>
22
+ struct DescriptorDeleter {
23
+ void operator()(T* x) {
24
+ if (x != nullptr) {
25
+ MIOPEN_CHECK(dtor(x));
26
+ }
27
+ }
28
+ };
29
+
30
+ // A generic class for wrapping MIOpen descriptor types. All you need
31
+ // is to give the underlying type the Descriptor_t points to (usually,
32
+ // if it's miopenTensorDescriptor_t it points to miopenTensorStruct),
33
+ // the constructor and the destructor. Subclasses are responsible
34
+ // for defining a set() function to actually set the descriptor.
35
+ //
36
+ // Descriptors default construct to a nullptr, and have a descriptor
37
+ // initialized the first time you call set() or any other initializing
38
+ // function.
39
+ template <typename T, miopenStatus_t (*ctor)(T**), miopenStatus_t (*dtor)(T*)>
40
+ class Descriptor
41
+ {
42
+ public:
43
+ // Use desc() to access the underlying descriptor pointer in
44
+ // a read-only fashion. Most client code should use this.
45
+ // If the descriptor was never initialized, this will return
46
+ // nullptr.
47
+ T* desc() const { return desc_.get(); }
48
+ T* desc() { return desc_.get(); }
49
+
50
+ // Use mut_desc() to access the underlying descriptor pointer
51
+ // if you intend to modify what it points to (e.g., using
52
+ // miopenSetFooDescriptor). This will ensure that the descriptor
53
+ // is initialized. Code in this file will use this function.
54
+ T* mut_desc() { init(); return desc_.get(); }
55
+ protected:
56
+ void init() {
57
+ if (desc_ == nullptr) {
58
+ T* raw_desc;
59
+ MIOPEN_CHECK(ctor(&raw_desc));
60
+ desc_.reset(raw_desc);
61
+ }
62
+ }
63
+ private:
64
+ std::unique_ptr<T, DescriptorDeleter<T, dtor>> desc_;
65
+ };
66
+
67
+ class TensorDescriptor
68
+ : public Descriptor<miopenTensorDescriptor,
69
+ &miopenCreateTensorDescriptor,
70
+ &miopenDestroyTensorDescriptor>
71
+ {
72
+ public:
73
+ TensorDescriptor() {}
74
+ explicit TensorDescriptor(const at::Tensor &t, size_t pad = 0) {
75
+ set(t, pad);
76
+ }
77
+
78
+ void set(const at::Tensor &t, size_t pad = 0);
79
+ void set(miopenDataType_t dataType, IntArrayRef sizes, IntArrayRef strides, size_t pad = 0);
80
+
81
+ void print();
82
+
83
+ private:
84
+ void set(miopenDataType_t dataType, int dim, int* size, int* stride) {
85
+ MIOPEN_CHECK(miopenSetTensorDescriptor(mut_desc(), dataType, dim, size, stride));
86
+ }
87
+ };
88
+
89
+ std::ostream& operator<<(std::ostream & out, const TensorDescriptor& d);
90
+
91
+ class FilterDescriptor
92
+ : public Descriptor<miopenTensorDescriptor,
93
+ &miopenCreateTensorDescriptor,
94
+ &miopenDestroyTensorDescriptor>
95
+ {
96
+ public:
97
+ void set(const at::Tensor &t, int64_t pad = 0) {
98
+ set(t, at::MemoryFormat::Contiguous, pad);
99
+ }
100
+
101
+ void set(const at::Tensor &t, const at::MemoryFormat memory_format, int64_t pad = 0);
102
+
103
+ private:
104
+ void set(miopenDataType_t dataType, int dim, int* size, int* stride) {
105
+ MIOPEN_CHECK(miopenSetTensorDescriptor(mut_desc(), dataType, dim, size, stride));
106
+ }
107
+ };
108
+
109
+ struct ConvolutionDescriptor
110
+ : public Descriptor<miopenConvolutionDescriptor,
111
+ &miopenCreateConvolutionDescriptor,
112
+ &miopenDestroyConvolutionDescriptor>
113
+ {
114
+ void set(miopenDataType_t dataType, miopenConvolutionMode_t c_mode, int dim, int* pad, int* stride, int * upscale /* aka dilation */, int groups, bool deterministic) {
115
+ MIOPEN_CHECK(miopenInitConvolutionNdDescriptor(mut_desc(), dim, pad, stride, upscale, c_mode));
116
+ MIOPEN_CHECK(miopenSetConvolutionGroupCount(mut_desc(), groups));
117
+ MIOPEN_CHECK(miopenSetConvolutionAttribute(mut_desc(), MIOPEN_CONVOLUTION_ATTRIB_DETERMINISTIC, deterministic ? 1 : 0));
118
+ }
119
+ };
120
+
121
+
122
+ struct RNNDescriptor
123
+ : public Descriptor<miopenRNNDescriptor,
124
+ &miopenCreateRNNDescriptor,
125
+ &miopenDestroyRNNDescriptor>
126
+ {
127
+ void set(int64_t hidden_size, int64_t num_layers, miopenRNNInputMode_t input_mode, miopenRNNDirectionMode_t direction, miopenRNNMode_t rnn_mode,
128
+ miopenRNNBiasMode_t bias_mode, miopenRNNAlgo_t algorithm, miopenDataType_t datatype) {
129
+ MIOPEN_CHECK(miopenSetRNNDescriptor(mut_desc(), hidden_size, num_layers, input_mode, direction, rnn_mode, bias_mode, algorithm, datatype));
130
+ }
131
+ };
132
+
133
+ union Constant
134
+ {
135
+ float f;
136
+ double d;
137
+ Constant(miopenDataType_t dataType, double value) {
138
+ if (dataType == miopenHalf || dataType == miopenFloat || dataType == miopenBFloat16) {
139
+ f = static_cast<float>(value);
140
+ } else {
141
+ d = value;
142
+ }
143
+ }
144
+ };
145
+
146
+ }} // namespace
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/miopen/Exceptions.h ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/miopen/miopen-wrapper.h>
4
+ #include <string>
5
+ #include <stdexcept>
6
+ #include <sstream>
7
+
8
+ namespace at { namespace native {
9
+
10
+ class miopen_exception : public std::runtime_error {
11
+ public:
12
+ miopenStatus_t status;
13
+ miopen_exception(miopenStatus_t status, const char* msg)
14
+ : std::runtime_error(msg)
15
+ , status(status) {}
16
+ miopen_exception(miopenStatus_t status, const std::string& msg)
17
+ : std::runtime_error(msg)
18
+ , status(status) {}
19
+ };
20
+
21
+ inline void MIOPEN_CHECK(miopenStatus_t status)
22
+ {
23
+ if (status != miopenStatusSuccess) {
24
+ if (status == miopenStatusNotImplemented) {
25
+ throw miopen_exception(status, std::string(miopenGetErrorString(status)) +
26
+ ". This error may appear if you passed in a non-contiguous input.");
27
+ }
28
+ throw miopen_exception(status, miopenGetErrorString(status));
29
+ }
30
+ }
31
+
32
+ inline void HIP_CHECK(hipError_t error)
33
+ {
34
+ if (error != hipSuccess) {
35
+ std::string msg("HIP error: ");
36
+ msg += hipGetErrorString(error);
37
+ throw std::runtime_error(msg);
38
+ }
39
+ }
40
+
41
+ }} // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/miopen/Handle.h ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/miopen/miopen-wrapper.h>
4
+
5
+ namespace at { namespace native {
6
+
7
+ miopenHandle_t getMiopenHandle();
8
+
9
+ }} // namespace
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/miopen/Types.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/miopen/miopen-wrapper.h>
4
+ #include <ATen/Tensor.h>
5
+
6
+ namespace at { namespace native {
7
+
8
+ miopenDataType_t getMiopenDataType(const at::Tensor& tensor);
9
+
10
+ int64_t miopen_version();
11
+
12
+ }} // namespace at::miopen
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/miopen/Utils.h ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/core/Tensor.h>
4
+ #include <ATen/miopen/miopen-wrapper.h>
5
+ #include <ATen/miopen/Handle.h>
6
+
7
+ namespace at { namespace native {
8
+
9
+ // This function makes tensors which have zero stride contiguous, by
10
+ // setting the strides to 1.
11
+ inline Tensor contiguousIfZeroInStrides(const Tensor& t) {
12
+ for (auto s : t.strides()) {
13
+ if (s == 0) return t.contiguous();
14
+ }
15
+ return t;
16
+ }
17
+
18
+ }}
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/miopen/miopen-wrapper.h ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <miopen/miopen.h>
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/mps/EmptyTensor.h ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright © 2022 Apple Inc.
2
+
3
+ #pragma once
4
+ #include <ATen/core/TensorBase.h>
5
+
6
+ namespace at::detail {
7
+
8
+ C10_EXPORT TensorBase empty_mps(
9
+ IntArrayRef size,
10
+ c10::optional<ScalarType> dtype_opt,
11
+ c10::optional<Layout> layout_opt,
12
+ c10::optional<Device> device_opt,
13
+ c10::optional<bool> pin_memory_opt,
14
+ c10::optional<c10::MemoryFormat> memory_format_opt);
15
+ C10_EXPORT TensorBase empty_mps(
16
+ IntArrayRef size, const TensorOptions &options);
17
+
18
+ C10_EXPORT TensorBase empty_strided_mps(
19
+ IntArrayRef size,
20
+ IntArrayRef stride,
21
+ ScalarType dtype,
22
+ c10::optional<Device> device_opt);
23
+
24
+ C10_EXPORT TensorBase empty_strided_mps(
25
+ IntArrayRef size,
26
+ IntArrayRef stride,
27
+ const TensorOptions &options);
28
+
29
+ } // namespace at::detail
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/mps/IndexKernels.h ADDED
@@ -0,0 +1,573 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ namespace at::mps {
4
+
5
+ static const char * indexing_metal_shaders = R"INDEX_METAL(
6
+ #include <metal_stdlib>
7
+ #include <metal_atomic>
8
+
9
+ using namespace metal;
10
+
11
+ #if __METAL_VERSION__ < 300
12
+ struct IndexAB {
13
+ // Allow up to 16 indices
14
+ metal::array<constant void *, 16> indexArray [[ id(0) ]];
15
+ };
16
+ #else
17
+ struct IndexAB {
18
+ constant int64_t* indexArray;
19
+ };
20
+
21
+ #endif
22
+
23
+ template<typename T>
24
+ kernel void index_select(
25
+ #if __METAL_VERSION__ >= 300
26
+ constant IndexAB * indexAB [[buffer(0)]],
27
+ #else
28
+ constant IndexAB & indexAB [[buffer(0)]],
29
+ #endif
30
+ constant void * indexSizes [[buffer(1)]],
31
+ constant void * indexStrides [[buffer(2)]],
32
+ constant uint3 * offsets [[buffer(3)]],
33
+ constant void * inputData [[buffer(4)]],
34
+ device void * outputData [[buffer(5)]],
35
+ constant uint32_t & num_indices [[buffer(6)]],
36
+ uint thread_index [[thread_position_in_grid]]) {
37
+ constant int64_t * index_sizes = (constant int64_t *)indexSizes;
38
+ constant int64_t * index_strides = (constant int64_t *)indexStrides;
39
+ int64_t offset = 0;
40
+ for (uint32_t i = 0; i < num_indices; i++) {
41
+ #if __METAL_VERSION__ >= 300
42
+ constant int64_t* indexArray = indexAB[i].indexArray;
43
+ #else
44
+ constant int64_t* indexArray = (constant int64_t*)indexAB.indexArray[i];
45
+ #endif
46
+ int64_t index = indexArray[offsets[thread_index].z / sizeof(int64_t)];
47
+ if (index < 0) {
48
+ index += index_sizes[i];
49
+ }
50
+ offset += index * index_strides[i];
51
+ }
52
+ device T * out = (device T*)((device char*)outputData + offsets[thread_index].x);
53
+ constant T * in = (constant T*)((constant char*)inputData + offsets[thread_index].y + offset);
54
+ *out = *in;
55
+ }
56
+
57
+ template<typename T>
58
+ void index_put_impl(
59
+ #if __METAL_VERSION__ >= 300
60
+ constant IndexAB * indexAB,
61
+ #else
62
+ constant IndexAB & indexAB,
63
+ #endif
64
+ constant int64_t * index_sizes,
65
+ constant int64_t * index_strides,
66
+ constant uint3 * offsets,
67
+ constant void * inputData,
68
+ device void * outputData,
69
+ constant uint32_t & num_indices,
70
+ uint thread_index
71
+ ){
72
+ int64_t offset = 0;
73
+ for (uint32_t i = 0; i < num_indices; i++) {
74
+ #if __METAL_VERSION__ >= 300
75
+ constant int64_t* indexArray = indexAB[i].indexArray;
76
+ #else
77
+ constant int64_t* indexArray = (constant int64_t*)indexAB.indexArray[i];
78
+ #endif
79
+ int64_t index = indexArray[offsets[thread_index].z / sizeof(int64_t)];
80
+
81
+ if (index < 0) {
82
+ index += index_sizes[i];
83
+ }
84
+ offset += index * index_strides[i];
85
+ }
86
+ device T * out = (device T*)((device char*)outputData + offsets[thread_index].x + offset);
87
+ constant T * in = (constant T*)((constant char*)inputData + offsets[thread_index].y);
88
+ *out = *in;
89
+ }
90
+
91
+ template<typename T>
92
+ kernel void index_put_serial(
93
+ #if __METAL_VERSION__ >= 300
94
+ constant IndexAB * indexAB [[buffer(0)]],
95
+ #else
96
+ constant IndexAB & indexAB [[buffer(0)]],
97
+ #endif
98
+ constant void * indexSizes [[buffer(1)]],
99
+ constant void * indexStrides [[buffer(2)]],
100
+ constant uint3 * offsets [[buffer(3)]],
101
+ constant void * inputData [[buffer(4)]],
102
+ device void * outputData [[buffer(5)]],
103
+ constant uint32_t & num_indices [[buffer(6)]],
104
+ constant uint * numIters [[buffer(7)]],
105
+ uint thread_index [[thread_position_in_grid]]) {
106
+
107
+ constant int64_t * index_sizes = (constant int64_t *)indexSizes;
108
+ constant int64_t * index_strides = (constant int64_t *)indexStrides;
109
+
110
+ for (uint iter_i = 0; iter_i < *numIters; iter_i++) {
111
+ index_put_impl<T>(indexAB, index_sizes, index_strides, offsets, inputData, outputData, num_indices, iter_i);
112
+ }
113
+ }
114
+
115
+ template<typename T>
116
+ kernel void index_put(
117
+ #if __METAL_VERSION__ >= 300
118
+ constant IndexAB * indexAB [[buffer(0)]],
119
+ #else
120
+ constant IndexAB & indexAB [[buffer(0)]],
121
+ #endif
122
+ constant void * indexSizes [[buffer(1)]],
123
+ constant void * indexStrides [[buffer(2)]],
124
+ constant uint3 * offsets [[buffer(3)]],
125
+ constant void * inputData [[buffer(4)]],
126
+ device void * outputData [[buffer(5)]],
127
+ constant uint32_t & num_indices [[buffer(6)]],
128
+ uint thread_index [[thread_position_in_grid]]) {
129
+
130
+ constant int64_t * index_sizes = (constant int64_t *)indexSizes;
131
+ constant int64_t * index_strides = (constant int64_t *)indexStrides;
132
+ index_put_impl<T>(indexAB, index_sizes, index_strides, offsets, inputData, outputData, num_indices, thread_index);
133
+ }
134
+
135
+ #if __METAL_VERSION__ < 300
136
+ #define REGISTER_INDEX_OP(DTYPE_SIZE, DTYPE, INDEX_OP_TYPE) \
137
+ template \
138
+ [[host_name("index_" #INDEX_OP_TYPE "_" #DTYPE_SIZE)]] \
139
+ kernel void index_ ## INDEX_OP_TYPE<DTYPE>( \
140
+ constant IndexAB & indexAB [[buffer(0)]], \
141
+ constant void * indexSizes [[buffer(1)]], \
142
+ constant void * indexStrides [[buffer(2)]], \
143
+ constant uint3 * offsets [[buffer(3)]], \
144
+ constant void * inputData [[buffer(4)]], \
145
+ device void * outputData [[buffer(5)]], \
146
+ constant uint32_t & num_indices [[buffer(6)]], \
147
+ uint thread_index [[thread_position_in_grid]]);
148
+ #else
149
+ #define REGISTER_INDEX_OP(DTYPE_SIZE, DTYPE, INDEX_OP_TYPE) \
150
+ template \
151
+ [[host_name("index_" #INDEX_OP_TYPE "_" #DTYPE_SIZE)]] \
152
+ kernel void index_ ## INDEX_OP_TYPE<DTYPE>( \
153
+ constant IndexAB * indexAB [[buffer(0)]], \
154
+ constant void * indexSizes [[buffer(1)]], \
155
+ constant void * indexStrides [[buffer(2)]], \
156
+ constant uint3 * offsets [[buffer(3)]], \
157
+ constant void * inputData [[buffer(4)]], \
158
+ device void * outputData [[buffer(5)]], \
159
+ constant uint32_t & num_indices [[buffer(6)]], \
160
+ uint thread_index [[thread_position_in_grid]]);
161
+ #endif
162
+
163
+ #define REGISTER_INDEX_OP_ALL_DTYPES(INDEX_OP_TYPE) \
164
+ REGISTER_INDEX_OP(8bit, char, INDEX_OP_TYPE); \
165
+ REGISTER_INDEX_OP(16bit, short, INDEX_OP_TYPE); \
166
+ REGISTER_INDEX_OP(32bit, int, INDEX_OP_TYPE); \
167
+ REGISTER_INDEX_OP(64bit, long, INDEX_OP_TYPE);
168
+
169
+ REGISTER_INDEX_OP_ALL_DTYPES(select);
170
+ REGISTER_INDEX_OP_ALL_DTYPES(put);
171
+
172
+ #if __METAL_VERSION__ < 300
173
+ #define REGISTER_SINGLE_THREADED_INDEX_OP(DTYPE_SIZE, DTYPE, INDEX_OP_TYPE) \
174
+ template \
175
+ [[host_name("index_" #INDEX_OP_TYPE "_" #DTYPE_SIZE)]] \
176
+ kernel void index_ ## INDEX_OP_TYPE<DTYPE>( \
177
+ constant IndexAB & indexAB [[buffer(0)]], \
178
+ constant void * indexSizes [[buffer(1)]], \
179
+ constant void * indexStrides [[buffer(2)]], \
180
+ constant uint3 * offsets [[buffer(3)]], \
181
+ constant void * inputData [[buffer(4)]], \
182
+ device void * outputData [[buffer(5)]], \
183
+ constant uint32_t & num_indices [[buffer(6)]], \
184
+ constant uint * numIters [[buffer(7)]], \
185
+ uint thread_index [[thread_position_in_grid]]);
186
+ #else
187
+ #define REGISTER_SINGLE_THREADED_INDEX_OP(DTYPE_SIZE, DTYPE, INDEX_OP_TYPE) \
188
+ template \
189
+ [[host_name("index_" #INDEX_OP_TYPE "_" #DTYPE_SIZE)]] \
190
+ kernel void index_ ## INDEX_OP_TYPE<DTYPE>( \
191
+ constant IndexAB * indexAB [[buffer(0)]], \
192
+ constant void * indexSizes [[buffer(1)]], \
193
+ constant void * indexStrides [[buffer(2)]], \
194
+ constant uint3 * offsets [[buffer(3)]], \
195
+ constant void * inputData [[buffer(4)]], \
196
+ device void * outputData [[buffer(5)]], \
197
+ constant uint32_t & num_indices [[buffer(6)]], \
198
+ constant uint * numIters [[buffer(7)]], \
199
+ uint thread_index [[thread_position_in_grid]]);
200
+ #endif
201
+
202
+ #define REGISTER_SINGLE_THREADED_INDEX_OP_ALL_DTYPES(INDEX_OP_TYPE) \
203
+ REGISTER_SINGLE_THREADED_INDEX_OP(8bit, char, INDEX_OP_TYPE); \
204
+ REGISTER_SINGLE_THREADED_INDEX_OP(16bit, short, INDEX_OP_TYPE); \
205
+ REGISTER_SINGLE_THREADED_INDEX_OP(32bit, int, INDEX_OP_TYPE); \
206
+ REGISTER_SINGLE_THREADED_INDEX_OP(64bit, long, INDEX_OP_TYPE);
207
+
208
+ REGISTER_SINGLE_THREADED_INDEX_OP_ALL_DTYPES(put_serial);
209
+
210
+ kernel void kernel_index_offsets(constant packed_uint3 * strides [[buffer(0)]],
211
+ device uint3 * data_offsets [[buffer(1)]],
212
+ constant uint * iter_shape [[buffer(2)]],
213
+ constant uint & num_dimensions [[buffer(3)]],
214
+ constant uint & num_offsets [[buffer(4)]],
215
+ uint thread_index [[thread_position_in_grid]]) {
216
+ data_offsets[thread_index] = 0;
217
+ uint32_t idx = thread_index;
218
+ for (uint32_t dim = 0; dim < num_dimensions; dim++) {
219
+ uint32_t remainder = idx % iter_shape[dim];
220
+ idx /= iter_shape[dim];
221
+
222
+ data_offsets[thread_index] += remainder * strides[dim];
223
+ }
224
+ }
225
+
226
+ kernel void kernel_index_offset(constant uint * strides [[buffer(0)]],
227
+ device uint * data_offsets [[buffer(1)]],
228
+ constant uint * iter_shape [[buffer(2)]],
229
+ constant uint & num_dimensions [[buffer(3)]],
230
+ uint thread_index [[thread_position_in_grid]]) {
231
+ data_offsets[thread_index] = 0;
232
+ uint32_t idx = thread_index;
233
+ for (uint32_t dim = 0; dim < num_dimensions; dim++) {
234
+ uint32_t reversed_dim = num_dimensions - dim -1;
235
+ uint32_t remainder = idx % iter_shape[reversed_dim];
236
+ idx /= iter_shape[reversed_dim];
237
+
238
+ data_offsets[thread_index] += remainder * strides[reversed_dim];
239
+ }
240
+ }
241
+
242
+ template<typename T, typename E>
243
+ kernel void index_put_accumulate_native_dtypes(
244
+ #if __METAL_VERSION__ >= 300
245
+ constant IndexAB * indexAB [[buffer(0)]],
246
+ #else
247
+ constant IndexAB & indexAB [[buffer(0)]],
248
+ #endif
249
+ constant void * indexSizes [[buffer(1)]],
250
+ constant void * indexStrides [[buffer(2)]],
251
+ constant uint3 * offsets [[buffer(3)]],
252
+ constant void * inputData [[buffer(4)]],
253
+ device void * outputData [[buffer(5)]],
254
+ constant uint32_t& num_indices [[buffer(6)]],
255
+ uint thread_index [[thread_position_in_grid]]) {
256
+ constant int64_t * index_sizes = (constant int64_t *)indexSizes;
257
+ constant int64_t * index_strides = (constant int64_t *)indexStrides;
258
+ int64_t offset = 0;
259
+ for (uint32_t i = 0; i < num_indices; i++) {
260
+ #if __METAL_VERSION__ >= 300
261
+ constant int64_t* indexArray = indexAB[i].indexArray;
262
+ #else
263
+ constant int64_t* indexArray = (constant int64_t*)indexAB.indexArray[i];
264
+ #endif
265
+ int64_t index = indexArray[offsets[thread_index].z / sizeof(int64_t)];
266
+ if (index < 0) {
267
+ index += index_sizes[i];
268
+ }
269
+ offset += index * index_strides[i];
270
+ }
271
+ device T * out = (device T*)((device char*)outputData + offsets[thread_index].x + offset);
272
+ constant E * in = (constant E*)((constant char*)inputData + offsets[thread_index].y);
273
+ atomic_fetch_add_explicit(out, *in, memory_order_relaxed);
274
+ }
275
+
276
+ template<typename T>
277
+ __attribute__((__always_inline__)) void atomic_fetch_add_relaxed(device void * addr, T value) {
278
+ device atomic_uint* uintAddr = (device atomic_uint*)addr;
279
+ uint expected = atomic_load_explicit(uintAddr, memory_order_relaxed);
280
+ T updated = as_type<T>(expected) + value;
281
+ while (!atomic_compare_exchange_weak_explicit(uintAddr, &expected, as_type<uint>(updated), memory_order_relaxed, memory_order_relaxed)) {
282
+ updated = as_type<T>(expected) + value;
283
+ }
284
+ }
285
+
286
+ template<typename T>
287
+ kernel void atomic_index_put_accumulate(
288
+ #if __METAL_VERSION__ >= 300
289
+ constant IndexAB * indexAB [[buffer(0)]],
290
+ #else
291
+ constant IndexAB & indexAB [[buffer(0)]],
292
+ #endif
293
+ constant void * indexSizes [[buffer(1)]],
294
+ constant void * indexStrides [[buffer(2)]],
295
+ constant uint3 * offsets [[buffer(3)]],
296
+ constant void * inputData [[buffer(4)]],
297
+ device void * outputData [[buffer(5)]],
298
+ constant uint32_t& num_indices [[buffer(6)]],
299
+ uint thread_index [[thread_position_in_grid]]) {
300
+ constant int64_t * index_sizes = (constant int64_t *)indexSizes;
301
+ constant int64_t * index_strides = (constant int64_t *)indexStrides;
302
+ int64_t offset = 0;
303
+ for (uint32_t i = 0; i < num_indices; i++) {
304
+ #if __METAL_VERSION__ >= 300
305
+ constant int64_t* indexArray = indexAB[i].indexArray;
306
+ #else
307
+ constant int64_t* indexArray = (constant int64_t*)indexAB.indexArray[i];
308
+ #endif
309
+ int64_t index = indexArray[offsets[thread_index].z / sizeof(int64_t)];
310
+ if (index < 0) {
311
+ index += index_sizes[i];
312
+ }
313
+ offset += index * index_strides[i];
314
+ }
315
+ device void * out = (device void*)((device char*)outputData + offsets[thread_index].x + offset);
316
+ constant T * in = (constant T*)((constant char*)inputData + offsets[thread_index].y);
317
+ atomic_fetch_add_relaxed<T>(out, *in);
318
+ }
319
+
320
+ template
321
+ [[host_name("index_put_accumulate_32bit_float")]]
322
+ kernel void atomic_index_put_accumulate<float>(
323
+ #if __METAL_VERSION__ >= 300
324
+ constant IndexAB * indexAB [[buffer(0)]],
325
+ #else
326
+ constant IndexAB & indexAB [[buffer(0)]],
327
+ #endif
328
+ constant void * indexSizes [[buffer(1)]],
329
+ constant void * indexStrides [[buffer(2)]],
330
+ constant uint3 * offsets [[buffer(3)]],
331
+ constant void * inputData [[buffer(4)]],
332
+ device void * outputData [[buffer(5)]],
333
+ constant uint32_t& num_indices [[buffer(6)]],
334
+ uint thread_index [[thread_position_in_grid]]);
335
+
336
+ template
337
+ [[host_name("index_put_accumulate_32bit_int")]]
338
+ kernel void index_put_accumulate_native_dtypes<atomic_int, int>(
339
+ #if __METAL_VERSION__ >= 300
340
+ constant IndexAB * indexAB [[buffer(0)]],
341
+ #else
342
+ constant IndexAB & indexAB [[buffer(0)]],
343
+ #endif
344
+ constant void * indexSizes [[buffer(1)]],
345
+ constant void * indexStrides [[buffer(2)]],
346
+ constant uint3 * offsets [[buffer(3)]],
347
+ constant void * inputData [[buffer(4)]],
348
+ device void * outputData [[buffer(5)]],
349
+ constant uint32_t& num_indices [[buffer(6)]],
350
+ uint thread_index [[thread_position_in_grid]]);
351
+ )INDEX_METAL";
352
+
353
+ static const char *SCATTER_OPS_TEMPLATE = R"METAL_SCATTER(
354
+ struct __attribute__ ((packed)) packed_uint5{{
355
+ uint32_t x; uint32_t y; uint32_t z; uint32_t w; uint32_t u;
356
+ }};
357
+
358
+ kernel void scatter_kernel_5(uint linear_index [[thread_position_in_grid]],
359
+ constant void * src_ [[buffer(0)]],
360
+ device void * dst_ [[buffer(1)]],
361
+ constant packed_uint5 & size [[buffer(2)]],
362
+ constant packed_uint5 & stride [[buffer(3)]],
363
+ constant uint32_t & numel [[buffer(4)]]) {{
364
+ if (linear_index >= numel) return;
365
+
366
+ constant {0} * src = (constant {0} *)src_;
367
+ device {1} * dst = (device {1} *)dst_;
368
+
369
+ packed_uint5 local_index;
370
+ local_index.x = linear_index / (size.u * size.w * size.z * size.y) % size.x;
371
+ local_index.y = linear_index / (size.u * size.w * size.z) % size.y;
372
+ local_index.z = linear_index / (size.u * size.w) % size.z;
373
+ local_index.w = linear_index / size.u % size.w;
374
+ local_index.u = linear_index % size.u;
375
+
376
+ packed_uint5 strided_index;
377
+ strided_index.x = local_index.x * stride.x;
378
+ strided_index.y = local_index.y * stride.y;
379
+ strided_index.z = local_index.z * stride.z;
380
+ strided_index.w = local_index.w * stride.w;
381
+ strided_index.u = local_index.u * stride.u;
382
+
383
+ dst[strided_index.x + strided_index.y + strided_index.z + strided_index.w + strided_index.u] = src[linear_index];
384
+ }}
385
+
386
+ kernel void scatter_kernel_4(uint linear_index [[thread_position_in_grid]],
387
+ constant void * src_ [[buffer(0)]],
388
+ device void * dst_ [[buffer(1)]],
389
+ constant packed_uint4 & size [[buffer(2)]],
390
+ constant packed_uint4 & stride [[buffer(3)]],
391
+ constant uint32_t & numel [[buffer(4)]]) {{
392
+ if (linear_index >= numel) return;
393
+
394
+ constant {0} * src = (constant {0} *)src_;
395
+ device {1} * dst = (device {1} *)dst_;
396
+
397
+ packed_uint4 local_index;
398
+ local_index.x = linear_index / (size[3] * size[2] * size[1]) % size[0];
399
+ local_index.y = linear_index / (size[3] * size[2]) % size[1];
400
+ local_index.z = linear_index / size[3] % size[2];
401
+ local_index.w = linear_index % size[3];
402
+
403
+ const packed_uint4 strided_index = local_index * stride;
404
+ dst[strided_index.x + strided_index.y + strided_index.z + strided_index.w] = src[linear_index];
405
+ }}
406
+
407
+ kernel void scatter_kernel_3(uint linear_index [[thread_position_in_grid]],
408
+ constant void * src_ [[buffer(0)]],
409
+ device void * dst_ [[buffer(1)]],
410
+ constant packed_uint3 & size [[buffer(2)]],
411
+ constant packed_uint3 & stride [[buffer(3)]],
412
+ constant uint32_t & numel [[buffer(4)]]) {{
413
+ if (linear_index >= numel) return;
414
+
415
+ constant {0} * src = (constant {0} *)src_;
416
+ device {1} * dst = (device {1} *)dst_;
417
+
418
+ packed_uint3 local_index;
419
+ local_index.x = linear_index / (size[2] * size[1]) % size[0];
420
+ local_index.y = linear_index / size[2] % size[1];
421
+ local_index.z = linear_index % size[2];
422
+
423
+ const packed_uint3 strided_index = local_index * stride;
424
+ dst[strided_index.x + strided_index.y + strided_index.z] = src[linear_index];
425
+ }}
426
+
427
+ kernel void scatter_kernel_2(uint linear_index [[thread_position_in_grid]],
428
+ constant void * src_ [[buffer(0)]],
429
+ device void * dst_ [[buffer(1)]],
430
+ constant packed_uint2 & size [[buffer(2)]],
431
+ constant packed_uint2 & stride [[buffer(3)]],
432
+ constant uint32_t & numel [[buffer(4)]]) {{
433
+ if (linear_index >= numel) return;
434
+
435
+ constant {0} * src = (constant {0} *)src_;
436
+ device {1} * dst = (device {1} *)dst_;
437
+
438
+ packed_uint2 local_index;
439
+ local_index.x = linear_index / size[1] % size[0];
440
+ local_index.y = linear_index % size[1];
441
+
442
+ const packed_uint2 strided_index = local_index * stride;
443
+ dst[strided_index.x + strided_index.y] = src[linear_index];
444
+ }}
445
+
446
+ kernel void scatter_kernel_1(uint linear_index [[thread_position_in_grid]],
447
+ constant void * src_ [[buffer(0)]],
448
+ device void * dst_ [[buffer(1)]],
449
+ constant int & size [[buffer(2)]],
450
+ constant int & stride [[buffer(3)]],
451
+ constant uint32_t & numel [[buffer(4)]]) {{
452
+ if (linear_index >= numel) return;
453
+
454
+ constant {0} * src = (constant {0} *)src_;
455
+ device {1} * dst = (device {1} *)dst_;
456
+
457
+ const int local_index = linear_index % size;
458
+ const int strided_index = local_index * stride;
459
+ dst[strided_index] = src[linear_index];
460
+ }}
461
+ )METAL_SCATTER";
462
+
463
+ static const char *GATHER_OPS_TEMPLATE = R"METAL_GATHER(
464
+ struct __attribute__ ((packed)) packed_uint5{{
465
+ uint32_t x; uint32_t y; uint32_t z; uint32_t w; uint32_t u;
466
+ }};
467
+
468
+ kernel void gather_kernel_5(uint linear_index [[thread_position_in_grid]],
469
+ constant void * src_ [[buffer(0)]],
470
+ device void * dst_ [[buffer(1)]],
471
+ constant packed_uint5 & size [[buffer(2)]],
472
+ constant packed_uint5 & stride [[buffer(3)]],
473
+ constant uint32_t & numel [[buffer(4)]]) {{
474
+ if (linear_index >= numel) return;
475
+
476
+ constant {0} * src = (constant {0} *)src_;
477
+ device {1} * dst = (device {1} *)dst_;
478
+
479
+
480
+ packed_uint5 local_index;
481
+ local_index.x = linear_index / (size.u * size.w * size.z * size.y) % size.x;
482
+ local_index.y = linear_index / (size.u * size.w * size.z) % size.y;
483
+ local_index.z = linear_index / (size.u * size.w) % size.z;
484
+ local_index.w = linear_index / size.u % size.w;
485
+ local_index.u = linear_index % size.u;
486
+
487
+ packed_uint5 strided_index;
488
+ strided_index.x = local_index.x * stride.x;
489
+ strided_index.y = local_index.y * stride.y;
490
+ strided_index.z = local_index.z * stride.z;
491
+ strided_index.w = local_index.w * stride.w;
492
+ strided_index.u = local_index.u * stride.u;
493
+
494
+ dst[linear_index] = src[strided_index.x + strided_index.y + strided_index.z + strided_index.w + strided_index.u];
495
+ }}
496
+
497
+ kernel void gather_kernel_4(uint linear_index [[thread_position_in_grid]],
498
+ constant void * src_ [[buffer(0)]],
499
+ device void * dst_ [[buffer(1)]],
500
+ constant packed_uint4 & size [[buffer(2)]],
501
+ constant packed_uint4 & stride [[buffer(3)]],
502
+ constant uint32_t & numel [[buffer(4)]]) {{
503
+ if (linear_index >= numel) return;
504
+
505
+ constant {0} * src = (constant {0} *)src_;
506
+ device {1} * dst = (device {1} *)dst_;
507
+
508
+ packed_uint4 local_index;
509
+ local_index.x = linear_index / (size[3] * size[2] * size[1]) % size[0];
510
+ local_index.y = linear_index / (size[3] * size[2]) % size[1];
511
+ local_index.z = linear_index / size[3] % size[2];
512
+ local_index.w = linear_index % size[3];
513
+
514
+ const packed_uint4 strided_index = local_index * stride;
515
+ dst[linear_index] = src[strided_index.x + strided_index.y + strided_index.z + strided_index.w];
516
+ }}
517
+
518
+ kernel void gather_kernel_3(uint linear_index [[thread_position_in_grid]],
519
+ constant void * src_ [[buffer(0)]],
520
+ device void * dst_ [[buffer(1)]],
521
+ constant packed_uint3 & size [[buffer(2)]],
522
+ constant packed_uint3 & stride [[buffer(3)]],
523
+ constant uint32_t & numel [[buffer(4)]]) {{
524
+ if (linear_index >= numel) return;
525
+
526
+ constant {0} * src = (constant {0} *)src_;
527
+ device {1} * dst = (device {1} *)dst_;
528
+
529
+ packed_uint3 local_index;
530
+ local_index.x = linear_index / (size[2] * size[1]) % size[0];
531
+ local_index.y = linear_index / size[2] % size[1];
532
+ local_index.z = linear_index % size[2];
533
+
534
+ const packed_uint3 strided_index = local_index * stride;
535
+ dst[linear_index] = src[strided_index.x + strided_index.y + strided_index.z];
536
+ }}
537
+
538
+ kernel void gather_kernel_2(uint linear_index [[thread_position_in_grid]],
539
+ constant void * src_ [[buffer(0)]],
540
+ device void * dst_ [[buffer(1)]],
541
+ constant packed_uint2 & size [[buffer(2)]],
542
+ constant packed_uint2 & stride [[buffer(3)]],
543
+ constant uint32_t & numel [[buffer(4)]]) {{
544
+ if (linear_index >= numel) return;
545
+
546
+ constant {0} * src = (constant {0} *)src_;
547
+ device {1} * dst = (device {1} *)dst_;
548
+
549
+ packed_uint2 local_index;
550
+ local_index.x = linear_index / size[1] % size[0];
551
+ local_index.y = linear_index % size[1];
552
+
553
+ const packed_uint2 strided_index = local_index * stride;
554
+ dst[linear_index] = src[strided_index.x + strided_index.y];
555
+ }}
556
+
557
+ kernel void gather_kernel_1(uint linear_index [[thread_position_in_grid]],
558
+ constant void * src_ [[buffer(0)]],
559
+ device void * dst_ [[buffer(1)]],
560
+ constant int & size [[buffer(2)]],
561
+ constant int & stride [[buffer(3)]],
562
+ constant uint32_t & numel [[buffer(4)]]) {{
563
+ if (linear_index >= numel) return;
564
+
565
+ constant {0} * src = (constant {0} *)src_;
566
+ device {1} * dst = (device {1} *)dst_;
567
+
568
+ const int local_index = linear_index % size;
569
+ const int strided_index = local_index * stride;
570
+ dst[linear_index] = src[strided_index];
571
+ }}
572
+ )METAL_GATHER";
573
+ } // namespace at::mps
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/mps/MPSAllocator.h ADDED
@@ -0,0 +1,401 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright © 2022 Apple Inc.
2
+
3
+ #pragma once
4
+
5
+ #include <ATen/mps/MPSAllocatorInterface.h>
6
+ #include <ATen/mps/MPSEvent.h>
7
+ #include <ATen/mps/MPSStream.h>
8
+
9
+ #include <cstdio>
10
+ #include <mutex>
11
+ #include <set>
12
+ #include <unordered_set>
13
+ #include <mach/vm_page_size.h>
14
+ #include <c10/util/flat_hash_map.h>
15
+
16
+ // this implementation is based on CUDACachingAllocator.
17
+ // It utilizes Metal Heaps to improve the performance with buffer allocation.
18
+ // Do not include this header. Use MPSAllocatorInterface.h instead.
19
+ // TODO: Unify the logic with CUDACachingAllocator and remove redundant code.
20
+ namespace at::mps::HeapAllocator {
21
+
22
+ static const size_t kMaxSmallAlloc = MB(1); // largest "small" allocation is 1 MiB
23
+ static const size_t kMinLargeAlloc = MB(10); // allocations between 1 and 10 MiB may use kLargeHeap
24
+ static const size_t kRoundLarge = MB(2); // round up large allocations to 2 MiB
25
+ static const size_t kSmallHeap = MB(8); // "small" allocations are packed in 8 MiB heaps
26
+ static const size_t kLargeHeap = MB(32); // "large" allocations may be packed in 32 MiB heaps
27
+ static const size_t kXLargeHeapD = MB(128); // "extra large" allocations on Discrete devices may be packed in 128 MiB heaps
28
+ static const size_t kXLargeHeapU = MB(1024); // "extra large" allocations on Unified devices may be packed in 1 GiB heaps
29
+ static const size_t kMaxScalarAlloc = (sizeof(int64_t)); // largest "scalar" allocation
30
+
31
+ // buffer pools could be customized with a combination of usage flags
32
+ enum UsageFlags : uint32_t {
33
+ PRIVATE = 0,
34
+ SMALL = (1 << 0), // small heaps have sizes of kSmallHeap, and large ones kLargeHeap
35
+ SHARED = (1 << 1), // shared pools allocated on devices with unified memory; otherwise, private between host/device
36
+ MANAGED = (1 << 2), // managed storage mode
37
+ HAZARD = (1 << 3), // enables Automatic Hazard Tracking for the resources allocated on the pool
38
+ SCALAR = (1 << 4), // used to import CPU scalar values to GPU and use them in MPS Stream
39
+ };
40
+ // debug verbosity flags
41
+ enum DebugVerbosity : uint32_t {
42
+ SILENT = 0,
43
+ PROFILING = (1 << 0), // print generic profiling data for total system memory usage
44
+ ALLOCATIONS = (1 << 1), // print buffer allocations
45
+ RECYCLES = (1 << 2), // print buffer recycling
46
+ RELEASES = (1 << 3), // print buffer releases
47
+ LARGE_ONLY = (1 << 4), // only log large buffer pool transactions
48
+ };
49
+
50
+ struct HeapBlock;
51
+
52
+ struct BufferBlock {
53
+ id<MTLBuffer> buffer;
54
+ void* cpu_ptr = nullptr; // stores the pointer to CPU mapping of a Shared MTLBuffer
55
+ size_t size; // size after alignment
56
+ size_t requested_size; // requested size (before alignment)
57
+ // buffer shape is used for retrieving base of views in cached graphs
58
+ std::vector<int64_t> shape;
59
+ bool in_use = false;
60
+ HeapBlock* heap;
61
+ id_t buf_id;
62
+ // counter to candidate least recently used buffers for garbage collection
63
+ uint32_t gc_count = 0;
64
+ uint32_t use_count = 0;
65
+ // counter to assign unique ids to buffer blocks
66
+ static uint64_t buffer_counter;
67
+ // Metal events used to sync GPU/CPU operations on the shared-storage buffers
68
+ MPSEventPtr event;
69
+
70
+ BufferBlock(size_t Size, size_t RequestedSize = 0, const id<MTLBuffer> Buffer = nullptr,
71
+ HeapBlock* Heap = nullptr) :
72
+ buffer(Buffer), size(Size), requested_size(RequestedSize),
73
+ heap(Heap), buf_id(Buffer ? ++buffer_counter : 0) { }
74
+
75
+ static bool Comparator(const BufferBlock* a, const BufferBlock* b) {
76
+ return (a->size != b->size) ? a->size < b->size : (uintptr_t)a->buffer < (uintptr_t)b->buffer;
77
+ }
78
+ static size_t alignUp(size_t Size, size_t Alignment) {
79
+ assert(((Alignment - 1) & Alignment) == 0);
80
+ return ((Size + Alignment - 1) & ~(Alignment - 1));
81
+ }
82
+ uint32_t retainCount() const { return [buffer retainCount]; }
83
+ };
84
+ typedef bool (*BufferComparison)(const BufferBlock*, const BufferBlock*);
85
+
86
+ struct BufferPool;
87
+ struct AllocParams {
88
+ AllocParams(size_t Alloc_Size, size_t Requested_Size, BufferPool* Pool) :
89
+ search_key(Alloc_Size), pool(Pool), requested_size(Requested_Size) { }
90
+ size_t size() const { return search_key.size; }
91
+
92
+ BufferBlock search_key;
93
+ BufferPool* pool;
94
+ BufferBlock* buffer_block = nullptr;
95
+ size_t requested_size;
96
+ // true if we exceed the low watermark limit. In this case
97
+ // we apply strategies to relieve the pressure before allocation.
98
+ bool has_memory_pressure = false;
99
+ // true if we're allocating on a unified memory device
100
+ bool has_unified_memory = true;
101
+ };
102
+
103
+ struct HeapBlock {
104
+ id<MTLHeap> heap;
105
+ struct { size_t total, available; } size;
106
+ BufferPool* pool;
107
+ unsigned int n_buffers = 0;
108
+ id_t heap_id;
109
+ // indicates if we split this heap to sub-allocate 'several' buffers (otherwise single buffer)
110
+ bool is_split;
111
+ // counter to assign unique ids to heap blocks
112
+ static uint64_t heap_counter;
113
+
114
+ HeapBlock(size_t Size, const id<MTLHeap> Heap = nullptr, BufferPool *Pool = nullptr) :
115
+ heap(Heap), size({.total = Size, .available = Size}), pool(Pool),
116
+ heap_id(Heap ? ++heap_counter : 0), is_split(true) { }
117
+
118
+ static MTLResourceOptions getOptions(uint32_t usage) {
119
+ // TODO: check the caching performance of write-combined mode
120
+ MTLResourceOptions options = MTLResourceCPUCacheModeDefaultCache;
121
+
122
+ if (usage & UsageFlags::MANAGED)
123
+ options |= MTLResourceStorageModeManaged;
124
+ else if (usage & UsageFlags::SHARED)
125
+ options |= MTLResourceStorageModeShared;
126
+ else
127
+ options |= MTLResourceStorageModePrivate;
128
+
129
+ options |= (usage & UsageFlags::HAZARD) ? MTLResourceHazardTrackingModeTracked : MTLResourceHazardTrackingModeUntracked;
130
+
131
+ return options;
132
+ }
133
+
134
+ static HeapBlock* createHeapBlock(AllocParams& params, id<MTLDevice> device, uint32_t usage) {
135
+ HeapBlock *heapBlock = nullptr;
136
+ bool is_split = true;
137
+ const size_t size = params.size();
138
+ MTLHeapDescriptor *d = [MTLHeapDescriptor new];
139
+ if (d) {
140
+ const size_t kXLargeHeap = params.has_unified_memory ? kXLargeHeapU : kXLargeHeapD;
141
+ if (size <= kMaxSmallAlloc) {
142
+ d.size = kSmallHeap;
143
+ } else if (size < kMinLargeAlloc) {
144
+ d.size = kLargeHeap;
145
+ } else if (size < kXLargeHeap / 2 && !params.has_memory_pressure) {
146
+ d.size = kXLargeHeap;
147
+ } else {
148
+ d.size = kRoundLarge * ((size + kRoundLarge - 1) / kRoundLarge);
149
+ is_split = false;
150
+ }
151
+ d.storageMode = (usage & UsageFlags::SHARED) ? MTLStorageModeShared : MTLStorageModePrivate;
152
+ d.cpuCacheMode = MTLCPUCacheModeDefaultCache;
153
+ // this automatically handles Metal buffer access synchronizations at the
154
+ // cost of slightly lower performance.
155
+ d.hazardTrackingMode = (usage & UsageFlags::HAZARD) ? MTLHazardTrackingModeTracked : MTLHazardTrackingModeUntracked;
156
+ d.resourceOptions = getOptions(usage);
157
+ d.type = MTLHeapTypeAutomatic;
158
+ id<MTLHeap> heap = [device newHeapWithDescriptor: d];
159
+ if (heap) {
160
+ [heap setPurgeableState:MTLPurgeableStateNonVolatile];
161
+ const size_t heap_size = heapAvailableSize(heap);
162
+ heapBlock = new HeapBlock(heap_size, heap, params.pool);
163
+ if (heapBlock) {
164
+ heapBlock->is_split = is_split;
165
+ }
166
+ }
167
+ [d release];
168
+ }
169
+ return heapBlock;
170
+ }
171
+ static bool Comparator(const HeapBlock* a, const HeapBlock* b) {
172
+ return (a->size.available != b->size.available) ? a->size.available < b->size.available :
173
+ (uintptr_t)a->heap < (uintptr_t)b->heap;
174
+ }
175
+ static NSUInteger heapAvailableSize(id<MTLHeap> heap, size_t Alignment = vm_page_size) {
176
+ return [heap maxAvailableSizeWithAlignment:Alignment];
177
+ }
178
+ NSUInteger Size() {
179
+ return [heap size];
180
+ }
181
+ id<MTLBuffer> newMTLBuffer(size_t length, uint32_t usage) {
182
+ id<MTLBuffer> buf = [heap newBufferWithLength:length options:getOptions(usage)];
183
+ if (buf) {
184
+ updateAvailableSize();
185
+ n_buffers++;
186
+ }
187
+ return buf;
188
+ }
189
+ // returns the retainCount before releasing the buffer
190
+ uint32_t releaseMTLBuffer(id<MTLBuffer>& buffer) {
191
+ const uint32_t retainCount = [buffer retainCount];
192
+ [buffer release];
193
+ buffer = nil;
194
+ updateAvailableSize();
195
+ n_buffers--;
196
+ return retainCount;
197
+ }
198
+ // returns the retainCount before releasing the heap
199
+ uint32_t releaseMTLHeap() {
200
+ const uint32_t retainCount = [heap retainCount];
201
+ TORCH_INTERNAL_ASSERT(!n_buffers); // assert if heap isn't empty
202
+ [heap setPurgeableState:MTLPurgeableStateEmpty];
203
+ [heap release];
204
+ heap = nil;
205
+ size.available = 0;
206
+ return retainCount;
207
+ }
208
+ uint32_t retainCount() const { return [heap retainCount]; }
209
+ void updateAvailableSize() { size.available = heapAvailableSize(heap); }
210
+ };
211
+ typedef bool (*HeapComparison)(const HeapBlock*, const HeapBlock*);
212
+
213
+ struct BufferPool {
214
+ enum class Kind {
215
+ PRIVATE_SMALL,
216
+ PRIVATE_LARGE,
217
+ SHARED_SMALL,
218
+ SHARED_LARGE,
219
+ SCALAR,
220
+ };
221
+
222
+ BufferPool(const id<MTLDevice> Device, uint32_t Usage) :
223
+ device(Device), usage(Usage),
224
+ heaps(HeapBlock::Comparator), available_buffers(BufferBlock::Comparator) { }
225
+
226
+ const id<MTLDevice> device;
227
+ // usage flags to customize the pool for various purposes (see UsageFlags enum)
228
+ const uint32_t usage;
229
+ // total number of buffers in the pool
230
+ uint32_t n_buffers = 0;
231
+ // total allocations size on this pool
232
+ size_t allocated_size = 0;
233
+ // total memory available in the pool
234
+ size_t available_size = 0;
235
+ // list of heaps ordered by their "available" (not total) memory size
236
+ std::set<HeapBlock*, HeapComparison> heaps;
237
+ // list of only "available" buffers in the pool (i.e., buffers not in-use)
238
+ std::set<BufferBlock*, BufferComparison> available_buffers;
239
+ // list of buffers that are in a state of "limbo" where they've already been freed
240
+ // from PyTorch-side, but were not returned to pool due to still being
241
+ // in-use by command buffers with retainCount > 1. In this state, the buffer is
242
+ // neither ready to be recycled, nor could be returned to pool as available.
243
+ // These buffers will be returned to pool once the command buffer's
244
+ // completionHandler callbacks are called.
245
+ std::unordered_set<BufferBlock*> buffers_pending_free;
246
+ // list of heaps pending size update
247
+ std::unordered_set<HeapBlock*> heaps_pending_update;
248
+ };
249
+
250
+ class MPSHeapAllocatorImpl {
251
+ public:
252
+ explicit MPSHeapAllocatorImpl() :
253
+ m_device(at::mps::MPSDevice::getInstance()->device()),
254
+ m_max_buffer_size([m_device maxBufferLength]),
255
+ m_stream(getDefaultMPSStream()),
256
+ m_event_pool(getMPSEventPool()) {
257
+ init_allocator();
258
+ }
259
+ ~MPSHeapAllocatorImpl() {
260
+ emptyCache();
261
+ }
262
+ // interface exposed to at::Allocator
263
+ id<MTLBuffer> malloc(size_t size, uint32_t usage);
264
+ // frees a buffer and returns it into buffer pool
265
+ void free(void* ptr);
266
+ // releases all the cached buffers and their associated heaps
267
+ void emptyCache();
268
+ // free inactive buffers that are pending to be freed
269
+ void freeInactiveBuffers();
270
+ // returns true if buffer was allocated from the shared pool
271
+ bool isSharedBuffer(const void* ptr);
272
+ // get the requested unaligned size of an MTLBuffer
273
+ ssize_t getUnalignedBufferSize(const void* ptr);
274
+ // set the shape of a base tensor from a view tensor
275
+ void setBufferShape(const void* ptr, const IntArrayRef& shape);
276
+ // retrieve the shape of a base tensor from a view tensor
277
+ IntArrayRef getBufferShape(const void* ptr);
278
+ // get the unique ID of the buffer
279
+ id_t getBufferId(const void* ptr);
280
+ // allocate a buffer from a specialized pool to import CPU scalars into GPU
281
+ id<MTLBuffer> allocScalarBufferWithValue(void* value, size_t size);
282
+ // returns a CPU-mapping of the input buffer and its retainCount,
283
+ // if only it has Shared storage-mode and allocated on MPSAllocator
284
+ std::pair<const void*, uint32_t> getSharedBufferPtr(const void* buffer);
285
+ // records events for a list of MTLBuffers (list is used to lock the mutex once)
286
+ // returns true if records any event (given if passed buffers exist and are shared-storage)
287
+ bool recordEvents(c10::ArrayRef<const void*> buffers);
288
+ // waits for the event to signal the completion of GPU execution
289
+ // on the passed shared buffers (list is used to lock the mutex once)
290
+ // returns true if actually waited on any event
291
+ bool waitForEvents(c10::ArrayRef<const void*> buffers);
292
+ // this indicates how far (in Megabytes) the current total allocations are from the
293
+ // low watermark limit which is used to detect if we're under memory pressure
294
+ // This returns zero if we've reached the low watermark limit
295
+ ssize_t getLowWatermarkValue();
296
+ // (see m_low_watermark_ratio for description)
297
+ void setLowWatermarkRatio(double ratio);
298
+ // (see m_high_watermark_ratio for description)
299
+ void setHighWatermarkRatio(double ratio);
300
+ // (see m_low_watermark_limit for description)
301
+ size_t getLowWatermarkLimit() const { return m_low_watermark_limit; }
302
+ // (see m_max_total_allowed_size for description)
303
+ size_t getHighWatermarkLimit() const { return m_max_total_allowed_size; }
304
+ // (see m_total_allocated_memory for description)
305
+ size_t getTotalAllocatedMemory() const { return m_total_allocated_memory; }
306
+ // (see m_current_allocated_memory for description)
307
+ size_t getCurrentAllocatedMemory() const { return m_current_allocated_memory; }
308
+ // total GPU memory allocated in the process by Metal driver; including
309
+ // implicit allocations from MPS/MPSGraph frameworks and MPSHeapAllocatorImpl.
310
+ size_t getDriverAllocatedMemory() const { return current_allocated_size(); }
311
+ // (see enum DebugVerbosity for description)
312
+ uint32_t getDebugVerbosity() const { return m_debug_verbosity; }
313
+ // returns the device that we allocate from
314
+ inline id<MTLDevice> Device() const { return m_device; }
315
+
316
+ // TODO: make a common function to do size unit conversions in PyTorch.
317
+ inline std::string format_size(uint64_t size) const;
318
+
319
+ private:
320
+ // (see m_high_watermark_ratio for description)
321
+ constexpr static double default_high_watermark_ratio = 1.7;
322
+ // we set the allowed upper bound to twice the size of recommendedMaxWorkingSetSize.
323
+ constexpr static double default_high_watermark_upper_bound = 2.0;
324
+ // (see m_low_watermark_ratio for description)
325
+ // on unified memory, we could allocate beyond the recommendedMaxWorkingSetSize
326
+ constexpr static double default_low_watermark_ratio_unified = 1.4;
327
+ constexpr static double default_low_watermark_ratio_discrete = 1.0;
328
+
329
+ const id<MTLDevice> m_device;
330
+ std::recursive_mutex m_mutex;
331
+ // allocated buffers by device pointer
332
+ ska::flat_hash_map<const void*, BufferBlock*> m_allocated_buffers;
333
+ // using a container for pools to simplify iterating them
334
+ ska::flat_hash_map<BufferPool::Kind, std::unique_ptr<BufferPool>> m_pools;
335
+ // total memory allocated by HeapAllocator (including blocks in pools)
336
+ size_t m_total_allocated_memory = 0;
337
+ // currently active memory allocations in use (i.e., blocks not in pools)
338
+ size_t m_current_allocated_memory = 0;
339
+ // max buffer size allowed by Metal
340
+ size_t m_max_buffer_size = 0;
341
+ // maximum total size allowed to be allocated
342
+ size_t m_max_total_allowed_size = 0;
343
+ // high watermark ratio is a hard limit for the total allowed allocations
344
+ // 0. : disables high watermark limit (may cause system failure if system-wide OOM occurs)
345
+ // 1. : recommended maximum allocation size (i.e., device.recommendedMaxWorkingSetSize)
346
+ // >1.: allows limits beyond the device.recommendedMaxWorkingSetSize
347
+ // e.g., value 0.95 means we allocate up to 95% of recommended maximum
348
+ // allocation size; beyond that, the allocations would fail with OOM error.
349
+ double m_high_watermark_ratio;
350
+ // low watermark ratio is a soft limit to attempt limiting memory allocations up to the lower watermark
351
+ // level by garbage collection or committing command buffers more frequently (a.k.a, adaptive commit).
352
+ // Value between 0 to m_high_watermark_ratio (setting 0.0 disables adaptive commit and garbage collection)
353
+ // e.g., value 0.9 means we 'attempt' to limit allocations up to 90% of recommended maximum
354
+ // allocation size.
355
+ double m_low_watermark_ratio;
356
+ // low watermark size limit (in Bytes) at the time we initialize the allocator
357
+ size_t m_low_watermark_limit;
358
+ // use "PYTORCH_DEBUG_MPS_ALLOCATOR" env-var to set debug verbosity
359
+ uint32_t m_debug_verbosity;
360
+ // default MPS stream
361
+ MPSStream* m_stream;
362
+ // we hold a reference to MPSEventPool so it could get destroyed after MPSAllocator
363
+ std::shared_ptr<MPSEventPool> m_event_pool;
364
+
365
+ void init_allocator();
366
+ void init_buffer_pools();
367
+ HeapBlock* get_free_heap(AllocParams& params);
368
+ bool get_free_buffer(AllocParams& params);
369
+ BufferBlock* get_allocated_buffer_block(const void* ptr);
370
+ BufferBlock* alloc_buffer_block(size_t size, uint32_t usage);
371
+ bool alloc_buffer(AllocParams& params);
372
+ void free_buffer(BufferBlock* buffer_block);
373
+ // returns true if the container heap is also released
374
+ bool release_buffer(BufferBlock* buffer_block, bool remove_empty_heap = true);
375
+ void release_buffers(BufferPool& pool);
376
+ bool release_available_cached_buffers(AllocParams& params);
377
+ bool release_cached_buffers();
378
+ // free unused cached blocks to reclaim GPU memory if memory pressure is high
379
+ void garbage_collect_cached_buffers(AllocParams& params);
380
+ // returns the suitable buffer pool type for the usage or
381
+ // requested/allocated sizes
382
+ BufferPool& get_pool(size_t requested_size, size_t aligned_size, uint32_t usage);
383
+ // returns the aligned allocation size that is optimized
384
+ // for the buffers to get reused frequently
385
+ size_t get_allocation_size(size_t size, uint32_t usage) const;
386
+ // maximum size of device memory available for allocation in current process
387
+ // Note: the recommendedMaxWorkingSetSize is typically 75% of the total system memory.
388
+ size_t max_device_size() const { return [m_device recommendedMaxWorkingSetSize]; }
389
+ // there are implicit allocations from MPS backend, so we need to query the 'device' for
390
+ // total allocated size instead of manually tracking in MPSAllocator
391
+ size_t current_allocated_size() const { return [m_device currentAllocatedSize]; }
392
+
393
+ bool trigger_memory_callbacks(BufferBlock* buffer_block, IMpsAllocatorCallback::EventType event) const {
394
+ for (const auto& name : MPSAllocatorCallbacksRegistry()->Keys()) {
395
+ MPSAllocatorCallbacksRegistry()->Create(name)->executeMPSAllocatorCallback(buffer_block ? buffer_block->buffer : nullptr, event);
396
+ }
397
+ return true;
398
+ }
399
+ };
400
+
401
+ } // namespace at::mps::HeapAllocator
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/mps/MPSAllocatorInterface.h ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright © 2023 Apple Inc.
2
+
3
+ #pragma once
4
+
5
+ #include <c10/core/Allocator.h>
6
+ #include <c10/util/Registry.h>
7
+ #include <ATen/core/ATen_fwd.h>
8
+
9
+ #define MB(x) (x * 1048576UL)
10
+
11
+ namespace at::mps {
12
+
13
+ // this is a public interface to access MPSAllocator.
14
+ // Do not declare methods that would depend on MPS or Metal frameworks.
15
+ class IMPSAllocator : public c10::Allocator {
16
+ public:
17
+ // see the comments in MPSAllocator.h for the description of these methods.
18
+ virtual void emptyCache() const = 0;
19
+ virtual void freeInactiveBuffers() const = 0;
20
+ virtual ssize_t getUnalignedBufferSize(const void* ptr) const = 0;
21
+ virtual IntArrayRef getBufferShape(const void* ptr) const = 0;
22
+ virtual id_t getBufferId(const void* ptr) const = 0;
23
+ virtual void setBufferShape(const void* ptr, const IntArrayRef& shape) const = 0;
24
+ virtual bool isSharedBuffer(const void* ptr) const = 0;
25
+ virtual bool isSharedStorageSupported() const = 0;
26
+ virtual c10::DataPtr allocScalarBufferWithValue(void* value, size_t size) const = 0;
27
+ virtual std::string formatSize(size_t size) const = 0;
28
+ virtual void setLowWatermarkRatio(double ratio) const = 0;
29
+ virtual void setHighWatermarkRatio(double ratio) const = 0;
30
+ virtual ssize_t getLowWatermarkValue() const = 0;
31
+ virtual size_t getLowWatermarkLimit() const = 0;
32
+ virtual size_t getHighWatermarkLimit() const = 0;
33
+ virtual size_t getTotalAllocatedMemory() const = 0;
34
+ virtual size_t getCurrentAllocatedMemory() const = 0;
35
+ virtual size_t getDriverAllocatedMemory() const = 0;
36
+ virtual std::pair<const void*, uint32_t> getSharedBufferPtr(const void* ptr) const = 0;
37
+ virtual bool recordEvents(c10::ArrayRef<const void*> buffers) const = 0;
38
+ virtual bool waitForEvents(c10::ArrayRef<const void*> buffers) const = 0;
39
+ };
40
+
41
+ class IMpsAllocatorCallback {
42
+ public:
43
+ enum class EventType {
44
+ ALLOCATED, // buffer got allocated to be used immediately
45
+ RECYCLED, // buffer pulled from free list to be reused
46
+ FREED, // buffer put to free list for future recycling
47
+ RELEASED, // buffer memory released
48
+ ALLOCATION_FAILED // buffer allocation failed
49
+ };
50
+ virtual ~IMpsAllocatorCallback() = default;
51
+ virtual void executeMPSAllocatorCallback(void* ptr, EventType event) = 0;
52
+ };
53
+
54
+ // MPS allocator will execute every registered callback when a block of memory is freed.
55
+ C10_DECLARE_REGISTRY(MPSAllocatorCallbacksRegistry, IMpsAllocatorCallback);
56
+ #define REGISTER_MPS_ALLOCATOR_CALLBACK(name, ...) \
57
+ C10_REGISTER_CLASS(MPSAllocatorCallbacksRegistry, name, __VA_ARGS__);
58
+
59
+ IMPSAllocator* getIMPSAllocator(bool sharedAllocator = false);
60
+
61
+ } // namespace at::mps
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/mps/MPSDevice.h ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright © 2022 Apple Inc.
2
+
3
+ #pragma once
4
+ #include <c10/core/Allocator.h>
5
+ #include <c10/macros/Macros.h>
6
+ #include <c10/util/Exception.h>
7
+
8
+
9
+ #ifdef __OBJC__
10
+ #include <Foundation/Foundation.h>
11
+ #include <Metal/Metal.h>
12
+ #include <MetalPerformanceShaders/MetalPerformanceShaders.h>
13
+ typedef id<MTLDevice> MTLDevice_t;
14
+ typedef id<MTLLibrary> MTLLibrary_t;
15
+ typedef id<MTLComputePipelineState> MTLComputePipelineState_t;
16
+ typedef id<MTLLibrary> MTLLibrary_t;
17
+ #else
18
+ typedef void* MTLDevice;
19
+ typedef void* MTLDevice_t;
20
+ typedef void* MTLLibrary_t;
21
+ typedef void* MTLComputePipelineState_t;
22
+ typedef void* MTLLibrary_t;
23
+ #endif
24
+
25
+ using namespace std;
26
+
27
+ namespace at::mps {
28
+
29
+ // Helper enum to check if a MPSGraph op is supported in a given macOS version
30
+ enum class MacOSVersion : uint32_t {
31
+ MACOS_VER_13_0_PLUS = 0,
32
+ MACOS_VER_13_1_PLUS,
33
+ MACOS_VER_13_2_PLUS,
34
+ MACOS_VER_13_3_PLUS,
35
+ };
36
+
37
+ //-----------------------------------------------------------------
38
+ // MPSDevice
39
+ //
40
+ // MPSDevice is a singleton class that returns the default device
41
+ //-----------------------------------------------------------------
42
+
43
+ class TORCH_API MPSDevice {
44
+ public:
45
+ /**
46
+ * MPSDevice should not be cloneable.
47
+ */
48
+ MPSDevice(MPSDevice& other) = delete;
49
+ /**
50
+ * MPSDevice should not be assignable.
51
+ */
52
+ void operator=(const MPSDevice&) = delete;
53
+ /**
54
+ * Gets single instance of the Device.
55
+ */
56
+ static MPSDevice* getInstance();
57
+ /**
58
+ * Returns the single device.
59
+ */
60
+ MTLDevice_t device() {
61
+ return _mtl_device;
62
+ }
63
+ /**
64
+ * Returns whether running on Ventura or newer
65
+ */
66
+ bool isMacOS13Plus(MacOSVersion version) const;
67
+
68
+ MTLComputePipelineState_t metalIndexingPSO(const std::string &kernel);
69
+ MTLLibrary_t getMetalIndexingLibrary();
70
+
71
+ ~MPSDevice();
72
+
73
+ private:
74
+ static MPSDevice* _device;
75
+ MTLDevice_t _mtl_device;
76
+ MTLLibrary_t _mtl_indexing_library;
77
+ MPSDevice();
78
+ };
79
+
80
+ TORCH_API bool is_available();
81
+ TORCH_API bool is_macos_13_or_newer(MacOSVersion version = MacOSVersion::MACOS_VER_13_0_PLUS);
82
+ TORCH_API at::Allocator* GetMPSAllocator(bool useSharedAllocator = false);
83
+
84
+ } // namespace at::mps
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/mps/MPSEvent.h ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright © 2023 Apple Inc.
2
+
3
+ #pragma once
4
+
5
+ #include <ATen/mps/MPSStream.h>
6
+ #include <ctime>
7
+ #include <stack>
8
+
9
+ namespace at::mps {
10
+
11
+ // NOTE: don't create instances of this class directly.
12
+ // Use MPSEventPool to acquire instances of MPSEvent.
13
+ class MPSEvent {
14
+ public:
15
+ explicit MPSEvent(id_t ID, MPSStream* stream, bool enable_timing);
16
+ ~MPSEvent();
17
+
18
+ // records an event on the stream
19
+ void record(bool needsLock, bool syncEvent = false);
20
+ // makes all future work submitted to the stream wait for this event.
21
+ bool wait(bool needsLock, bool syncEvent = false);
22
+ // schedules a notifyListener callback for the event.
23
+ bool notify(bool needsLock, MTLSharedEventNotificationBlock block);
24
+ // checks if events are already signaled.
25
+ bool query() const;
26
+ // blocks the CPU thread until all the GPU work that were scheduled
27
+ // prior to recording this event are completed.
28
+ bool synchronize();
29
+ // resets this event with new parameters in case it gets reused from the event pool
30
+ void reset(MPSStream* stream, bool enable_timing);
31
+ // returns the unique ID of the event instance
32
+ id_t getID() const { return m_id; }
33
+ // returns the completion timestamp of the event
34
+ uint64_t getCompletionTime() const { return m_completion_time; }
35
+ // if already recorded, waits for cpu_sync_cv to be signaled
36
+ void waitForCpuSync();
37
+
38
+ private:
39
+ id_t m_id;
40
+ // enables measuring the completion time of the notifyListener of this event
41
+ bool m_enable_timing;
42
+ uint64_t m_signalCounter = 0;
43
+ MPSStream* m_stream = nullptr;
44
+ MTLSharedEvent_t m_event = nullptr;
45
+ MTLSharedEventListener* m_listener = nullptr;
46
+ // used to sync the events created on this Stream with CPU
47
+ std::mutex m_cpu_sync_mutex{};
48
+ std::condition_variable m_cpu_sync_cv{};
49
+ // CondVar predicate to sync the events created on this Stream with CPU
50
+ bool m_cpu_sync_completed = false;
51
+ // used to compute elapsed time
52
+ uint64_t m_completion_time = 0;
53
+
54
+ void recordLocked(bool syncEvent);
55
+ bool waitLocked(bool syncEvent);
56
+ bool notifyLocked(MTLSharedEventNotificationBlock block);
57
+ void notifyCpuSync();
58
+ static uint64_t getTime() {
59
+ return clock_gettime_nsec_np(CLOCK_MONOTONIC_RAW);
60
+ }
61
+ };
62
+
63
+ typedef std::unique_ptr<MPSEvent, std::function<void(MPSEvent*)>> MPSEventPtr;
64
+
65
+ class MPSEventPool {
66
+ public:
67
+ explicit MPSEventPool(MPSStream* default_stream);
68
+ ~MPSEventPool();
69
+
70
+ MPSEventPtr acquireEvent(bool enable_timing, MPSStream* stream);
71
+ void emptyCache();
72
+
73
+ // these are mainly used for MPSHooks and torch.mps.Event() bindings
74
+ id_t acquireEvent(bool enable_timing);
75
+ void releaseEvent(id_t event_id);
76
+ void recordEvent(id_t event_id, bool syncEvent);
77
+ void waitForEvent(id_t event_id, bool syncEvent);
78
+ void synchronizeEvent(id_t event_id);
79
+ bool queryEvent(id_t event_id);
80
+ // returns elapsed time between two recorded events in milliseconds
81
+ double elapsedTime(id_t start_event_id, id_t end_event_id);
82
+
83
+ private:
84
+ MPSStream* m_default_stream = nullptr;
85
+ std::recursive_mutex m_mutex;
86
+ std::stack<std::unique_ptr<MPSEvent>> m_pool{};
87
+ // dictionary to associate event IDs with event objects
88
+ // used to retain in-use events out of the pool
89
+ // for torch.mps.Event() bindings.
90
+ std::unordered_map<id_t, MPSEventPtr> m_in_use_events{};
91
+ uint64_t m_event_counter = 0;
92
+ std::function<void(MPSEvent*)> m_default_deleter;
93
+
94
+ MPSEvent* getInUseEvent(id_t event_id, bool locked = true);
95
+ };
96
+
97
+ // shared_ptr is used to get MPSEventPool destroyed after dependent instances
98
+ std::shared_ptr<MPSEventPool> getMPSEventPool();
99
+
100
+ } // namespace at::mps
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/mps/MPSGeneratorImpl.h ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright © 2022 Apple Inc.
2
+
3
+ #pragma once
4
+
5
+ #include <ATen/core/Generator.h>
6
+ #include <ATen/core/PhiloxRNGEngine.h>
7
+ #include <c10/core/GeneratorImpl.h>
8
+ #include <c10/util/Optional.h>
9
+
10
+ namespace at {
11
+ namespace mps::detail {
12
+
13
+ static const uint32_t PHILOX_STATE_N = 7;
14
+ struct rng_data_pod {
15
+ std::array<uint32_t, PHILOX_STATE_N> state{1};
16
+ uint64_t seed = default_rng_seed_val;
17
+ };
18
+
19
+ TORCH_API const Generator& getDefaultMPSGenerator();
20
+ TORCH_API Generator createMPSGenerator(uint64_t seed_val = default_rng_seed_val);
21
+
22
+ } // namespace mps::detail
23
+
24
+ struct TORCH_API MPSGeneratorImpl : public c10::GeneratorImpl {
25
+ // Constructors
26
+ MPSGeneratorImpl(uint64_t seed_in = default_rng_seed_val);
27
+ ~MPSGeneratorImpl() override = default;
28
+
29
+ // MPSGeneratorImpl methods
30
+ std::shared_ptr<MPSGeneratorImpl> clone() const;
31
+ void set_current_seed(uint64_t seed) override;
32
+ void set_offset(uint64_t offset) override;
33
+ uint64_t get_offset() const override;
34
+ uint64_t current_seed() const override;
35
+ uint64_t seed() override;
36
+ void set_state(const c10::TensorImpl& new_state) override;
37
+ c10::intrusive_ptr<c10::TensorImpl> get_state() const override;
38
+ void update_philox_counters();
39
+
40
+ void set_engine(at::Philox4_32 engine) { engine_ = engine; };
41
+ at::Philox4_32 engine() { return engine_; };
42
+ uint32_t* state_data() { return data_.state.data(); }
43
+ static DeviceType device_type() { return DeviceType::MPS; };
44
+
45
+ private:
46
+ mps::detail::rng_data_pod data_;
47
+ at::Philox4_32 engine_;
48
+
49
+ MPSGeneratorImpl* clone_impl() const override;
50
+ };
51
+
52
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/mps/MPSGuardImpl.h ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright © 2022 Apple Inc.
2
+
3
+ #pragma once
4
+ #include <c10/core/impl/DeviceGuardImplInterface.h>
5
+ #include <c10/macros/Macros.h>
6
+ #include <c10/util/Exception.h>
7
+ #include <ATen/Context.h>
8
+ #include <ATen/mps/MPSStream.h>
9
+ #include <ATen/mps/MPSEvent.h>
10
+
11
+ #ifdef __OBJC__
12
+ #include <Foundation/Foundation.h>
13
+ #include <Metal/Metal.h>
14
+ #include <MetalPerformanceShaders/MetalPerformanceShaders.h>
15
+ #endif
16
+
17
+ #include <ATen/Tensor.h>
18
+ #include <c10/core/MemoryFormat.h>
19
+ #include <c10/core/Storage.h>
20
+ #include <c10/core/TensorImpl.h>
21
+ #include <sys/_types/_size_t.h>
22
+ #include <memory>
23
+ #include <c10/core/UndefinedTensorImpl.h>
24
+ #include <c10/util/intrusive_ptr.h>
25
+
26
+
27
+ namespace at::mps {
28
+
29
+ typedef MPSEvent* mpsEvent_t;
30
+
31
+ // TODO: Move the MPSGuardImpl to inherit from NoOpDeviceGuardImpl
32
+ // https://github.com/pytorch/pytorch/issues/77170
33
+ struct TORCH_API MPSGuardImpl final : public c10::impl::DeviceGuardImplInterface {
34
+ static constexpr c10::DeviceType static_type = c10::DeviceType::MPS;
35
+
36
+ // constructor
37
+ MPSGuardImpl() {}
38
+ explicit MPSGuardImpl(c10::DeviceType t) {
39
+ TORCH_INTERNAL_ASSERT(t == c10::DeviceType::MPS);
40
+ }
41
+
42
+ // returns the type
43
+ c10::DeviceType type() const override {
44
+ return c10::DeviceType::MPS;
45
+ }
46
+
47
+ Device exchangeDevice(Device d) const override {
48
+ return Device(c10::DeviceType::MPS, 0);
49
+ }
50
+
51
+ Device getDevice() const override {
52
+ return Device(c10::DeviceType::MPS, 0);
53
+ }
54
+
55
+ c10::optional<Device> uncheckedGetDevice() const noexcept {
56
+ return Device(c10::DeviceType::MPS, 0);
57
+ }
58
+
59
+ void setDevice(Device d) const override {
60
+ TORCH_INTERNAL_ASSERT(d.is_mps());
61
+ }
62
+
63
+ void uncheckedSetDevice(Device d) const noexcept override {
64
+ // TODO: Currently setting only device 0
65
+ }
66
+
67
+ Stream getStream(Device d) const noexcept override {
68
+ return Stream(Stream::DEFAULT, Device(c10::DeviceType::MPS, 0));
69
+ }
70
+
71
+ Stream getDefaultStream(Device d) const override {
72
+ return Stream(Stream::DEFAULT, Device(c10::DeviceType::MPS, 0));
73
+ }
74
+
75
+ // NB: These do NOT set the current device
76
+ Stream exchangeStream(Stream s) const noexcept override {
77
+ return Stream(Stream::DEFAULT, Device(c10::DeviceType::MPS, 0));
78
+ }
79
+ DeviceIndex deviceCount() const noexcept override {
80
+ if (at::hasMPS()) {
81
+ //TODO: extend it for multi-device case
82
+ return 1;
83
+ } else {
84
+ return 0;
85
+ }
86
+ }
87
+
88
+ // Event-related functions
89
+ void createEvent(
90
+ mpsEvent_t* event,
91
+ const EventFlag flag) const;
92
+
93
+ void destroyEvent(
94
+ void* event,
95
+ const DeviceIndex device_index) const noexcept override;
96
+
97
+ void record(
98
+ void** event,
99
+ const Stream& stream,
100
+ const DeviceIndex device_index,
101
+ const EventFlag flag) const override;
102
+
103
+ void block(
104
+ void* event,
105
+ const Stream& stream) const override;
106
+
107
+ bool queryEvent(void* event) const override;
108
+
109
+ };
110
+
111
+ /// A variant of OptionalDeviceGuard that is specialized for MPS.
112
+ struct OptionalMPSGuard {
113
+ explicit OptionalMPSGuard() : guard_() {}
114
+
115
+ explicit OptionalMPSGuard(c10::optional<Device> device_opt)
116
+ : guard_(device_opt) {}
117
+
118
+ /// Set the current MPS device to the passed device index, if it is not
119
+ /// nullopt
120
+ explicit OptionalMPSGuard(c10::optional<DeviceIndex> device_index_opt)
121
+ : guard_(device_index_opt) {}
122
+
123
+ // Copy is not allowed
124
+ OptionalMPSGuard(const OptionalMPSGuard&) = delete;
125
+ OptionalMPSGuard& operator=(const OptionalMPSGuard&) = delete;
126
+ OptionalMPSGuard(OptionalMPSGuard&& other) = delete;
127
+ OptionalMPSGuard& operator=(OptionalMPSGuard&& other) = delete;
128
+
129
+ /// Sets the MPS device to the given device, initializing the guard if it
130
+ /// is not already initialized. Errors if the given device is not a MPS
131
+ /// device.
132
+ void set_device(Device device) {
133
+ guard_.set_device(device);
134
+ }
135
+
136
+ /// Sets the MPS device to the given device, initializing the guard if it is
137
+ /// not already initialized. Errors if the given device is not a MPS device.
138
+ void reset_device(Device device) {
139
+ guard_.reset_device(device);
140
+ }
141
+
142
+ /// Sets the MPS device to the given device index, initializing the guard if
143
+ /// it is not already initialized.
144
+ void set_index(DeviceIndex device_index) {
145
+ guard_.set_index(device_index);
146
+ }
147
+
148
+ /// Returns the device that was set immediately prior to initialization of the
149
+ /// guard, or nullopt if the guard is uninitialized.
150
+ c10::optional<Device> original_device() const {
151
+ return guard_.original_device();
152
+ }
153
+
154
+ /// Returns the most recent device that was set using this device guard,
155
+ /// either from construction, or via set_device, if the guard is initialized,
156
+ /// or nullopt if the guard is uninitialized.
157
+ c10::optional<Device> current_device() const {
158
+ return guard_.current_device();
159
+ }
160
+
161
+ /// Restore the original MPS device, resetting this guard to uninitialized
162
+ /// state.
163
+ void reset() {
164
+ guard_.reset();
165
+ }
166
+
167
+ private:
168
+ c10::impl::InlineOptionalDeviceGuard<MPSGuardImpl> guard_;
169
+ };
170
+
171
+
172
+ C10_REGISTER_GUARD_IMPL(MPS, MPSGuardImpl);
173
+
174
+ } // namespace at::mps
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/mps/MPSHooks.h ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright © 2022 Apple Inc.
2
+
3
+ #pragma once
4
+
5
+ #include <ATen/detail/MPSHooksInterface.h>
6
+ #include <ATen/Generator.h>
7
+ #include <ATen/mps/MPSEvent.h>
8
+ #include <c10/util/Optional.h>
9
+
10
+ namespace at::mps {
11
+
12
+ // The real implementation of MPSHooksInterface
13
+ struct MPSHooks : public at::MPSHooksInterface {
14
+ MPSHooks(at::MPSHooksArgs) {}
15
+ void initMPS() const override;
16
+
17
+ // MPSDevice interface
18
+ bool hasMPS() const override;
19
+ bool isOnMacOS13orNewer(unsigned minor) const override;
20
+
21
+ // MPSGeneratorImpl interface
22
+ const Generator& getDefaultMPSGenerator() const override;
23
+
24
+ // MPSStream interface
25
+ void deviceSynchronize() const override;
26
+ void commitStream() const override;
27
+ void* getCommandBuffer() const override;
28
+ void* getDispatchQueue() const override;
29
+
30
+ // MPSAllocator interface
31
+ Allocator* getMPSDeviceAllocator() const override;
32
+ void emptyCache() const override;
33
+ size_t getCurrentAllocatedMemory() const override;
34
+ size_t getDriverAllocatedMemory() const override;
35
+ void setMemoryFraction(double ratio) const override;
36
+
37
+ // MPSProfiler interface
38
+ void profilerStartTrace(const std::string& mode, bool waitUntilCompleted) const override;
39
+ void profilerStopTrace() const override;
40
+
41
+ // MPSEvent interface
42
+ uint32_t acquireEvent(bool enable_timing) const override;
43
+ void releaseEvent(uint32_t event_id) const override;
44
+ void recordEvent(uint32_t event_id) const override;
45
+ void waitForEvent(uint32_t event_id) const override;
46
+ void synchronizeEvent(uint32_t event_id) const override;
47
+ bool queryEvent(uint32_t event_id) const override;
48
+ double elapsedTimeOfEvents(uint32_t start_event_id, uint32_t end_event_id) const override;
49
+ };
50
+
51
+ } // namespace at::mps
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/mps/MPSProfiler.h ADDED
@@ -0,0 +1,393 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright © 2022 Apple Inc.
2
+
3
+ #pragma once
4
+
5
+ #include <ATen/Tensor.h>
6
+ #include <ATen/mps/MPSStream.h>
7
+ #include <ATen/mps/MPSAllocatorInterface.h>
8
+
9
+ #include <os/signpost.h>
10
+ #include <os/log.h>
11
+
12
+ #include <sstream>
13
+ #include <string>
14
+ #include <atomic>
15
+ #include <unordered_map>
16
+ #include <utility>
17
+ #include <ctime>
18
+
19
+ namespace at::mps {
20
+
21
+ namespace Profiler {
22
+
23
+ struct BaseInfo {
24
+ // profiling info types
25
+ enum class Type {
26
+ GRAPH,
27
+ KERNEL,
28
+ COPY,
29
+ CPU_FALLBACK,
30
+ };
31
+
32
+ BaseInfo(Type infoType, uint64_t Id, const uintptr_t Handle) :
33
+ type(infoType), profileId(Id), handle(Handle) { }
34
+ virtual ~BaseInfo() = default;
35
+
36
+ // type of profiling info
37
+ Type type;
38
+ // unique profile ID for execution instances of operations or copies
39
+ uint64_t profileId;
40
+ // ID generated by os_signpost
41
+ // since it's possible to use event and interval-based signposts at the
42
+ // same time, we need separate IDs for each.
43
+ os_signpost_id_t eventSignpostId = 0, intervalSignpostId = 0;
44
+ // accumulated GPU time in ms (obtained from CompletionHandler's "GPUEndTime - GPUStartTime")
45
+ std::atomic<double> totalGpuTime{0.0};
46
+ // accumulated Scheduling time in ms (obtained from CompletionHandler's "KernelEndTime - KernelStartTime")
47
+ std::atomic<double> totalSchedulingTime{0.0};
48
+ // indicates if the operation or copy execution has completed
49
+ std::atomic_bool completed{false};
50
+ // handle used to identify the profile info's instance (usually the pointer)
51
+ const uintptr_t handle;
52
+
53
+ virtual const std::string toString(double gpuTime = 0, double schedulingTime = 0) const;
54
+ // builds a string for a tensor (format: Device:ScalarType[tensor.sizes()])
55
+ static std::string buildTensorString(const Tensor& tensor, bool includeBufferId = false) {
56
+ if (tensor.defined()) {
57
+ std::stringstream tensorStr;
58
+ auto deviceType = tensor.device().type();
59
+ tensorStr << c10::DeviceTypeName(deviceType);
60
+ // see comments for INCLUDE_BUFFER_ID
61
+ if (includeBufferId && deviceType == at::kMPS) {
62
+ id<MTLBuffer> buffer = __builtin_bit_cast(id<MTLBuffer>, tensor.storage().data());
63
+ tensorStr << "(buf#" << (getIMPSAllocator()->getBufferId(buffer))
64
+ << ":" << buffer.retainCount << ")";
65
+ }
66
+ tensorStr << ":"
67
+ << tensor.scalar_type() << tensor.sizes();
68
+ return tensorStr.str();
69
+ } else {
70
+ return "undefined";
71
+ }
72
+ }
73
+ static uint64_t getTime() {
74
+ return clock_gettime_nsec_np(CLOCK_MONOTONIC_RAW);
75
+ }
76
+ };
77
+
78
+ struct OperationInfo : BaseInfo {
79
+ OperationInfo(const void* Handle, bool IsGraph, uint64_t Id, const std::string& StrKey) :
80
+ BaseInfo(IsGraph ? Type::GRAPH : Type::KERNEL, Id, uintptr_t(Handle)), strKey(StrKey) { }
81
+
82
+ uint64_t runCount = 0;
83
+ std::string strKey;
84
+
85
+ const std::string toString(double gpuTime = 0, double schedulingTime = 0) const override;
86
+
87
+ // builds a string for a kernel
88
+ static std::string buildKernelString(const std::string& kernelName,
89
+ const TensorList& tensors,
90
+ bool includeBufferId = false) {
91
+ std::stringstream kernelStr;
92
+ kernelStr << kernelName;
93
+ for (const Tensor& tensor: tensors) {
94
+ kernelStr << ":" << BaseInfo::buildTensorString(tensor, includeBufferId);
95
+ }
96
+ return kernelStr.str();
97
+ }
98
+ };
99
+
100
+ struct CpuFbInfo : BaseInfo {
101
+ CpuFbInfo(uint64_t Id, const std::string& OpName) :
102
+ BaseInfo(Type::CPU_FALLBACK, Id, 0), opName(OpName) { }
103
+
104
+ uint64_t runCount = 0;
105
+ // the current and total overhead of copies in bytes required to convert the Op's
106
+ // input tensors from MPS to CPU and then output from CPU back to MPS
107
+ size_t currentCopyOverhead = 0;
108
+ size_t totalCopyOverhead = 0;
109
+ std::string opName;
110
+ std::string strKey;
111
+ uint64_t startTime = 0;
112
+
113
+ const std::string toString(double gpuTime = 0, double schedulingTime = 0) const override;
114
+
115
+ void updateCopyOverhead(const TensorList& tensors) {
116
+ currentCopyOverhead = 0;
117
+ for (const Tensor& tensor: tensors) {
118
+ if (tensor.defined()) {
119
+ currentCopyOverhead += tensor.nbytes();
120
+ }
121
+ }
122
+ totalCopyOverhead += currentCopyOverhead;
123
+ }
124
+ };
125
+
126
+ struct CopyInfo : BaseInfo {
127
+ enum class Kind {
128
+ MPS_TO_MPS,
129
+ MPS_TO_CPU,
130
+ CPU_TO_MPS,
131
+ };
132
+
133
+ CopyInfo(const void* Handle, size_t Length, uint64_t Id, bool IsNonBlocking, bool UsesBlitter) :
134
+ BaseInfo(Type::COPY, Id, uintptr_t(Handle)), kind(Kind::MPS_TO_MPS),
135
+ length(Length), isNonBlocking(IsNonBlocking), usesBlitter(UsesBlitter) { }
136
+
137
+ Kind kind;
138
+ size_t length;
139
+ bool isNonBlocking;
140
+ bool usesBlitter;
141
+ std::string srcStrKey;
142
+ std::string dstStrKey;
143
+ // for copies that don't use blitters, we measure CPU time
144
+ uint64_t startTime = 0;
145
+
146
+ const std::string toString(double gpuTime = 0, double schedulingTime = 0) const override;
147
+
148
+ static std::string buildTensorString(const void* buffer, const OptionalTensorRef tensor, bool includeBufferId = false);
149
+
150
+ static bool isStorageOnMPS(const void* buffer, const OptionalTensorRef tensor) {
151
+ if (tensor.has_value()) {
152
+ return tensor->device().type() == at::kMPS;
153
+ }
154
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(buffer);
155
+ // getUnalignedBufferSize() returns -1 if input buffer is not on MPS device
156
+ return getIMPSAllocator()->getUnalignedBufferSize(buffer) >= 0;
157
+ }
158
+
159
+ static Kind getCopyKind(const void* srcBuffer, const void* dstBuffer,
160
+ const OptionalTensorRef srcTensor, const OptionalTensorRef dstTensor) {
161
+ const bool isSrcOnMPS = isStorageOnMPS(srcBuffer, srcTensor);
162
+ const bool isDstOnMPS = isStorageOnMPS(dstBuffer, dstTensor);
163
+ TORCH_INTERNAL_ASSERT_DEBUG_ONLY(isSrcOnMPS || isDstOnMPS);
164
+ if (isSrcOnMPS && !isDstOnMPS) {
165
+ return Kind::MPS_TO_CPU;
166
+ } else if (!isSrcOnMPS && isDstOnMPS) {
167
+ return Kind::CPU_TO_MPS;
168
+ }
169
+ return Kind::MPS_TO_MPS;
170
+ }
171
+ };
172
+
173
+ struct CopyStat : CopyInfo {
174
+ explicit CopyStat(std::string CopyKindStr) :
175
+ CopyInfo(nullptr, 0, 0, false, false), kindStr(std::move(CopyKindStr)) {}
176
+ // total number of copies
177
+ size_t totalCount = 0;
178
+ // number of Scalar copies (i.e., less than sizeof(int64))
179
+ size_t scalarsCount = 0;
180
+ // number of blocking copies (i.e., require syncing to GPU)
181
+ size_t blockingCount = 0;
182
+ // number of copies that used memcpy(), instead of Metal Blit Encoder
183
+ size_t memcpyCount = 0;
184
+ // accumulated GPU time in ms for the scalar copies
185
+ std::atomic<double> scalarsGpuTime{0.0};
186
+ // copy kind in string type
187
+ std::string kindStr;
188
+ };
189
+
190
+ class MPSProfiler {
191
+ public:
192
+ // lower 16 bits used for profiler options
193
+ enum ProfileOptions : uint32_t {
194
+ OPTIONS_NONE = 0,
195
+ // ALL_* means, all signpost types (RUN_OPERATION|BLIT_COPY|CPU_FALLBACK, etc.)
196
+ // (used for convenience to not compute bit flags by OR-ing manually)
197
+ // trace all signpost types using events
198
+ ALL_SIGNPOST_EVENTS = (1 << 0),
199
+ // trace all signpost types using intervals
200
+ ALL_SIGNPOST_INTERVALS = (1 << 1),
201
+ // always wait for command buffer to finish executing after each commit
202
+ WAIT_UNTIL_COMPLETED = (1 << 2),
203
+ // for interval-based signposts, include the scheduling portion of
204
+ // Graph/Kernel/Copy executions as well.
205
+ // if flag is disable, only "GPU run time" is included in interval,
206
+ // and not schedule time.
207
+ INCLUDE_SCHEDULE_INTERVAL = (1 << 3),
208
+
209
+ // use these if you need to trace signposts types individually (rarely required)
210
+ // trace signpost using intervals
211
+ USE_INTERVALS = (1 << 4),
212
+ // trace signpost by emitting events
213
+ USE_EVENTS = (1 << 5),
214
+ // used for sanity check (Change this when new option added)
215
+ OPTIONS_COUNT = (USE_EVENTS << 1) - 1,
216
+ };
217
+
218
+ // when adding new types, #define the type string in MPSProfiler.mm as well.
219
+ // upper 16 bits used for event types
220
+ enum SignpostTypes : uint32_t {
221
+ SIGNPOST_NONE = 0,
222
+ // trace signposts for PyTorch operation executions
223
+ RUN_OPERATION = (1 << 16),
224
+ // trace signposts for blitter copies
225
+ BLIT_COPY = (1 << 17),
226
+ // trace signposts for ops that fall back on CPU
227
+ CPU_FALLBACK = (1 << 18),
228
+ // used for sanity check (Change this when new type added)
229
+ SIGNPOST_COUNT = (CPU_FALLBACK << 1) - 1,
230
+ };
231
+
232
+ enum LogOptions : uint32_t {
233
+ LOG_NONE = 0,
234
+
235
+ // Info logging options during execution
236
+ // -------------------------------------
237
+ // prints operation info (id/key/run_count) during execution
238
+ OPERATION_INFO = (1 << 0),
239
+ // prints copy info (src/dst tensors/buffers, size, etc.) during execution
240
+ COPY_INFO = (1 << 1),
241
+ // prints CPU Fallback info (id/runCount/opName/copyOverhead) during execution
242
+ CPU_FALLBACK_INFO = (1 << 2),
243
+
244
+ // Profiling Statistics logging options when process terminates
245
+ // ------------------------------------------------------------
246
+ // prints all stats (OPERATION_STATS, COPY_STATS, CPU_FALLBACK_STATS) before process terminates
247
+ // this is convenient to not combine following stats bit flags manually
248
+ ALL_STATS = (1 << 3),
249
+ // prints operation stats (GPU times, run count, etc.) before process terminates
250
+ OPERATION_STATS = (1 << 4),
251
+ // prints copies stats (GPU times, copy kinds, sizes, etc.) before process terminates
252
+ COPY_STATS = (1 << 5),
253
+ // prints CPU Fallback stats (CPU times, run times, size of MPS<->CPU copies
254
+ // for tensors, etc.) before process terminates
255
+ CPU_FALLBACK_STATS = (1 << 6),
256
+
257
+ // Metadata format options when logging the info
258
+ // ---------------------------------------------
259
+ // if enabled, includes GPU run time in metadata (i.e., GPUEndTime-GPUStartTime
260
+ // from Metal Command Buffers) (e.g., [GPU=0.324 ms])
261
+ INCLUDE_GPU_TIME = (1 << 7),
262
+ // if enabled, includes GPU scheduling time in metadata separately
263
+ // (i.e., KernelEndTime-KernelStartTime from Metal Command Buffers)
264
+ // e.g., [GPU=0.324 ms, KRNL=0.036 ms]
265
+ INCLUDE_KERNEL_TIME = (1 << 8),
266
+ // if enabled, includes the unique buffer ID in metadata for the storage
267
+ // of a tensor that was allocated on MPSAllocator. This is useful (along with
268
+ // the EV "PYTORCH_DEBUG_MPS_ALLOCATOR") to identify buffers that are involved
269
+ // with various operations.
270
+ INCLUDE_BUFFER_ID = (1 << 9),
271
+
272
+ // used for sanity check (Change this when new option added)
273
+ LOG_COUNT = (INCLUDE_BUFFER_ID << 1) - 1,
274
+ };
275
+
276
+ explicit MPSProfiler();
277
+ ~MPSProfiler();
278
+
279
+ // the handle is either "MPSGraph*" or "id<MTLComputePipelineState>" for Metal Kernels
280
+ // the beginProfile*() functions return a profileId which is unique per graph/kernel/copy
281
+ uint64_t beginProfileKernel(const void* handle, const std::string& strKey, bool isGraph);
282
+ uint64_t beginProfileKernel(const void* handle, const std::string& kernelName, const TensorList& tensors);
283
+ uint64_t beginProfileCopy(const void* srcBuffer, const void* dstBuffer,
284
+ const OptionalTensorRef srcTensor,
285
+ const OptionalTensorRef dstTensor,
286
+ size_t length, bool isNonBlocking, bool usesBlitter = true);
287
+ uint64_t beginProfileCPUFallback(const std::string& opName, const TensorList& tensors);
288
+ void beginProfileGPUInterval(const void* handle);
289
+
290
+ void endProfileCopy(uint64_t profileId, SyncType syncType);
291
+ void endProfileKernel(const void* handle, SyncType syncType = SyncType::NONE);
292
+ void endProfileCPUFallback(const std::string& opName);
293
+
294
+ // these are used to hook into Python bindings for torch.mps.profiler module.
295
+ // this enables generating OS Signpost traces from MPSProfiler on-demand
296
+ // during runtime (instead of environment variables).
297
+ // The "mode" could be either "interval", "event", or both "interval,event"
298
+ // for interval-based and/or event-based signpost tracing.
299
+ void StartTrace(const string& mode, bool waitUntilCompleted);
300
+ void StopTrace();
301
+
302
+ // convenience functions to indicate whether signpost tracing or
303
+ // logging are enabled for the SignpostTypes
304
+ bool isOperationProfilingEnabled() const {
305
+ return (m_signpost_types & SignpostTypes::RUN_OPERATION) ||
306
+ (m_log_options & (LogOptions::OPERATION_INFO | LogOptions::OPERATION_STATS));
307
+ }
308
+ bool isCopyProfilingEnabled() const {
309
+ return (m_signpost_types & SignpostTypes::BLIT_COPY) ||
310
+ (m_log_options & (LogOptions::COPY_INFO | LogOptions::COPY_STATS));
311
+ }
312
+ bool isCPUFallbackProfilingEnabled() const {
313
+ return (m_signpost_types & SignpostTypes::CPU_FALLBACK) ||
314
+ (m_log_options & (LogOptions::CPU_FALLBACK_INFO | LogOptions::CPU_FALLBACK_STATS));
315
+ }
316
+ bool isSignpostTracingEnabled() const {
317
+ return (m_signpost_types != SignpostTypes::SIGNPOST_NONE);
318
+ }
319
+
320
+ private:
321
+ // indicates what type of signpost types are enabled and traced by MPS profiler.
322
+ uint32_t m_signpost_types = 0;
323
+ uint32_t m_profile_options = 0;
324
+ uint32_t m_log_options = 0;
325
+ uint64_t m_kernel_counter = 0;
326
+ uint64_t m_graph_counter = 0;
327
+ uint64_t m_cpu_fb_counter = 0;
328
+ uint64_t m_copy_counter = 0;
329
+ // technically, it's possible to trace both events and intervals at the same time
330
+ // so we use separate os_log categories for them
331
+ os_log_t m_os_log_events;
332
+ os_log_t m_os_log_intervals;
333
+ // stats logging could run either from destructor or signal handler
334
+ // so this is used to check if logging has already started.
335
+ std::atomic_bool hasLoggedStats{false};
336
+ // indicates there are pending completionHandler callbacks that haven't been called yet.
337
+ std::atomic_bool hasPendingCompletionHandlers{false};
338
+ // used to capture sigint signal to log profiling stats
339
+ static struct sigaction currentSigint, previousSigint;
340
+
341
+ // We use the following lists for two reasons:
342
+ // 1- for interval-based signposts the "begin" point won't be in same function
343
+ // as the "end" point where we need to be able to retrieve signpost's info
344
+ // 2- if Operations info need to be logged when process ends using LogOptions::OPERATION_INFO.
345
+
346
+ // the pointer key for this map is either "MPSGraph*" or "id<MTLComputePipelineState>" for Metal Kernels
347
+ // this list is retained and could be logged along with aggregate profiling numbers when the process ends.
348
+ std::unordered_map<uintptr_t, std::unique_ptr<OperationInfo>> m_op_info_list{};
349
+ // the string key for this map is the op name that we fall back to execute on CPU
350
+ // this list is retained and could be logged along with aggregate profiling numbers when the process ends.
351
+ std::unordered_map<std::string, std::unique_ptr<CpuFbInfo>> m_cpu_fb_info_list{};
352
+ // this list contains the info for copies, and its key is the unique profileId
353
+ // which is generated from m_copy_counter
354
+ // The copyInfo list is not retained.
355
+ std::unordered_map<uint64_t, std::unique_ptr<CopyInfo>> m_copy_info_list{};
356
+ // a short list that contains copy stats
357
+ std::unordered_map<CopyInfo::Kind, std::unique_ptr<CopyStat>> m_copy_stat_list{};
358
+
359
+ void initialize();
360
+ void beginProfileExecution(BaseInfo& info, bool cpuExecution = false);
361
+ void endProfileExecution(BaseInfo& info, os_signpost_id_t event_signpost_id,
362
+ os_signpost_id_t interval_signpost_id,
363
+ double gpuTime, double schedulingTime);
364
+ void addProfilerScheduledHandler(BaseInfo& info);
365
+ void addProfilerCompletedHandler(BaseInfo& info, SyncType syncType);
366
+ void emitSignpostEvent(SignpostTypes signpost_type, os_signpost_id_t signpost_id,
367
+ const std::string& msg) const;
368
+ void beginSignpostInterval(SignpostTypes signpost_type, os_signpost_id_t signpost_id,
369
+ const std::string& msg) const;
370
+ void endSignpostInterval(SignpostTypes signpost_type, os_signpost_id_t signpost_id) const;
371
+
372
+ void updateCopyStats(const CopyInfo& copyInfo, double gpuTime, double schedulingTime);
373
+ // returns true if logging the profiling info "during the execution" is enabled
374
+ bool isProfileInfoLoggingEnabled(BaseInfo::Type infoType, bool isExecutionEnded);
375
+ // logs all the profiling stats that are enabled
376
+ void logProfilingStats();
377
+ // logs kernel profiling stats when the process ends.
378
+ void logOperationsProfilingStats(std::FILE* f) const;
379
+ // logs CPU Fallback profiling stats when the process ends.
380
+ void logCPUFallbackProfilingStats(std::FILE* f) const;
381
+ // logs copy profiling stats when the process ends.
382
+ void logCopyProfilingStats(std::FILE* f) const;
383
+
384
+ os_signpost_id_t generateSignpostId(os_signpost_type_t signpostType, const void* ptr = nullptr);
385
+ static SignpostTypes getSignpostType(BaseInfo::Type infoType);
386
+ static void handleIntSignal(int signal);
387
+ };
388
+
389
+ } // namespace Profiler
390
+
391
+ Profiler::MPSProfiler& getMPSProfiler();
392
+
393
+ } // namespace at::mps
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/mps/MPSStream.h ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright © 2022 Apple Inc.
2
+
3
+ #pragma once
4
+
5
+ #include <cstdint>
6
+ #include <utility>
7
+
8
+ #include <c10/core/DeviceGuard.h>
9
+ #include <c10/util/Exception.h>
10
+ #include <c10/core/Stream.h>
11
+ #include <ATen/mps/MPSDevice.h>
12
+
13
+ #ifdef __OBJC__
14
+ #include <Foundation/Foundation.h>
15
+ #include <Metal/Metal.h>
16
+ #include <MetalPerformanceShaders/MetalPerformanceShaders.h>
17
+ #include <MetalPerformanceShadersGraph/MetalPerformanceShadersGraph.h>
18
+ typedef id<MTLCommandQueue> MTLCommandQueue_t;
19
+ typedef id<MTLCommandBuffer> MTLCommandBuffer_t;
20
+ typedef id<MTLComputeCommandEncoder> MTLComputeCommandEncoder_t;
21
+ typedef id<MTLSharedEvent> MTLSharedEvent_t;
22
+ typedef id<MTLDevice> MTLDevice_t;
23
+ #else
24
+ typedef void* MTLCommandQueue_t;
25
+ typedef void* MTLCommandQueue;
26
+ typedef void* MTLCommandBuffer_t;
27
+ typedef void* MTLCommandBuffer;
28
+ typedef void* MTLComputeCommandEncoder_t;
29
+ typedef void* MTLSharedEvent_t;
30
+ typedef void* dispatch_queue_t;
31
+ typedef void* MTLDevice_t;
32
+ #define nil NULL;
33
+ #endif
34
+
35
+
36
+ namespace at::mps {
37
+
38
+ //-----------------------------------------------------------------
39
+ // MPSStream
40
+ //-----------------------------------------------------------------
41
+
42
+ enum class SyncType {
43
+ NONE, // no commit to command buffer
44
+ COMMIT, // commit and flush the command buffer
45
+ COMMIT_AND_WAIT, // flush and wait for command buffer execution to finish
46
+ COMMIT_AND_CONTINUE,// commit and continue with a new underlying command buffer
47
+ COMMIT_ADAPTIVE, // commit adaptively based on available memory
48
+ };
49
+
50
+ class TORCH_API MPSStream
51
+ {
52
+ public:
53
+ enum Unchecked { UNCHECKED };
54
+
55
+ /// Construct a MPSStream from a Stream. This construction is checked,
56
+ /// and will raise an error if the Stream is not, in fact, a MPS stream.
57
+ explicit MPSStream(Stream stream);
58
+
59
+ ~MPSStream();
60
+ MTLCommandQueue_t commandQueue() const { return _commandQueue; };
61
+ dispatch_queue_t queue() const { return _serialQueue; }
62
+
63
+ MPSCommandBuffer* commandBuffer();
64
+ MTLComputeCommandEncoder_t commandEncoder();
65
+ void endKernelCoalescing();
66
+ void synchronize(SyncType syncType);
67
+ void fill(id<MTLBuffer> buffer, uint8_t value, size_t length, size_t offset, SyncType syncType = SyncType::NONE);
68
+ void copy(id<MTLBuffer> srcBuffer, id<MTLBuffer> dstBuffer,
69
+ size_t length, size_t srcOffset, size_t dstOffset,
70
+ uint64_t profileId, SyncType syncType = SyncType::NONE);
71
+ void copy_and_sync(id<MTLBuffer> srcBuffer, id<MTLBuffer> dstBuffer,
72
+ size_t length, size_t srcOffset, size_t dstOffset,
73
+ bool non_blocking, uint64_t profileId);
74
+ void executeMPSGraph(MPSGraph* mpsGraph, NSDictionary* feeds, NSDictionary* results, SyncType syncType = SyncType::NONE);
75
+ void addCompletedHandler(MTLCommandBufferHandler block);
76
+
77
+ /// Get the MPS device index that this stream is associated with.
78
+ c10::DeviceIndex device_index() const { return _stream.device_index(); }
79
+
80
+ MTLCommandQueue_t stream() const { return _commandQueue; };
81
+
82
+ MTLDevice_t device() const { return [_commandQueue device];}
83
+
84
+ /// Explicit conversion to Stream.
85
+ Stream unwrap() const { return _stream; }
86
+
87
+ private:
88
+ Stream _stream;
89
+ MTLCommandQueue_t _commandQueue = nil;
90
+ MPSCommandBuffer* _commandBuffer = nil;
91
+ MPSCommandBuffer* _prevCommandBuffer = nil;
92
+ MTLComputeCommandEncoder_t _commandEncoder = nil;
93
+ MPSGraphExecutionDescriptor *_executionDescriptor = nil;
94
+ MPSGraphCompilationDescriptor *_compilationDescriptor = nil;
95
+ dispatch_queue_t _serialQueue = nullptr;
96
+ // CommitAndContinue is enabled by default
97
+ bool _enableCommitAndContinue = true;
98
+
99
+ // use synchronize() to access any of these commit functions outside MPSStream
100
+ void commit();
101
+ void commitAndWait();
102
+ void commitAndContinue();
103
+ void flush();
104
+ };
105
+
106
+ /**
107
+ * Get the current MPS stream
108
+ */
109
+ TORCH_API MPSStream* getCurrentMPSStream();
110
+
111
+ /**
112
+ * Get the default MPS stream
113
+ */
114
+ TORCH_API MPSStream* getDefaultMPSStream();
115
+
116
+ //-----------------------------------------------------------------
117
+ // MPSStreamImpl
118
+ //-----------------------------------------------------------------
119
+
120
+ class TORCH_API MPSStreamImpl
121
+ {
122
+ public:
123
+ /**
124
+ * Gets single instance of the MPSStream.
125
+ */
126
+ static MPSStream* getInstance();
127
+
128
+ private:
129
+ static MPSStream* _stream;
130
+ MPSStreamImpl();
131
+ };
132
+
133
+ } // namespace at::mps
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/CuFFTUtils.h ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/Config.h>
4
+
5
+ #include <string>
6
+ #include <stdexcept>
7
+ #include <sstream>
8
+ #include <cufft.h>
9
+ #include <cufftXt.h>
10
+
11
+ namespace at { namespace native {
12
+
13
+ // This means that max dim is 3 + 2 = 5 with batch dimension and possible
14
+ // complex dimension
15
+ constexpr int max_rank = 3;
16
+
17
+ static inline std::string _cudaGetErrorEnum(cufftResult error)
18
+ {
19
+ switch (error)
20
+ {
21
+ case CUFFT_SUCCESS:
22
+ return "CUFFT_SUCCESS";
23
+ case CUFFT_INVALID_PLAN:
24
+ return "CUFFT_INVALID_PLAN";
25
+ case CUFFT_ALLOC_FAILED:
26
+ return "CUFFT_ALLOC_FAILED";
27
+ case CUFFT_INVALID_TYPE:
28
+ return "CUFFT_INVALID_TYPE";
29
+ case CUFFT_INVALID_VALUE:
30
+ return "CUFFT_INVALID_VALUE";
31
+ case CUFFT_INTERNAL_ERROR:
32
+ return "CUFFT_INTERNAL_ERROR";
33
+ case CUFFT_EXEC_FAILED:
34
+ return "CUFFT_EXEC_FAILED";
35
+ case CUFFT_SETUP_FAILED:
36
+ return "CUFFT_SETUP_FAILED";
37
+ case CUFFT_INVALID_SIZE:
38
+ return "CUFFT_INVALID_SIZE";
39
+ case CUFFT_UNALIGNED_DATA:
40
+ return "CUFFT_UNALIGNED_DATA";
41
+ case CUFFT_INCOMPLETE_PARAMETER_LIST:
42
+ return "CUFFT_INCOMPLETE_PARAMETER_LIST";
43
+ case CUFFT_INVALID_DEVICE:
44
+ return "CUFFT_INVALID_DEVICE";
45
+ case CUFFT_PARSE_ERROR:
46
+ return "CUFFT_PARSE_ERROR";
47
+ case CUFFT_NO_WORKSPACE:
48
+ return "CUFFT_NO_WORKSPACE";
49
+ case CUFFT_NOT_IMPLEMENTED:
50
+ return "CUFFT_NOT_IMPLEMENTED";
51
+ #if !defined(USE_ROCM)
52
+ case CUFFT_LICENSE_ERROR:
53
+ return "CUFFT_LICENSE_ERROR";
54
+ #endif
55
+ case CUFFT_NOT_SUPPORTED:
56
+ return "CUFFT_NOT_SUPPORTED";
57
+ default:
58
+ std::ostringstream ss;
59
+ ss << "unknown error " << error;
60
+ return ss.str();
61
+ }
62
+ }
63
+
64
+ static inline void CUFFT_CHECK(cufftResult error)
65
+ {
66
+ if (error != CUFFT_SUCCESS) {
67
+ std::ostringstream ss;
68
+ ss << "cuFFT error: " << _cudaGetErrorEnum(error);
69
+ AT_ERROR(ss.str());
70
+ }
71
+ }
72
+
73
+ }} // at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/ForeachMinMaxFunctors.cuh ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <ATen/NumericUtils.h>
4
+
5
+ namespace at::native {
6
+
7
+ // std:: does not have clamp functors
8
+ template <typename T>
9
+ struct minimum {
10
+ __device__ T operator()(const T& a, const T& b) const {
11
+ return (_isnan(a) || a < b) ? a : b;
12
+ }
13
+ };
14
+
15
+ template <typename T>
16
+ struct maximum {
17
+ __device__ T operator()(const T& a, const T& b) const {
18
+ return (_isnan(a) || a > b) ? a : b;
19
+ }
20
+ };
21
+
22
+ } // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/MultiTensorApply.cuh ADDED
@@ -0,0 +1,379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/Tensor.h>
3
+ #include <ATen/cuda/CUDAContext.h>
4
+ #include <c10/cuda/CUDAGuard.h>
5
+ #include <ATen/native/cuda/Loops.cuh>
6
+ #include <ATen/native/cuda/MemoryAccess.cuh>
7
+ #include <vector>
8
+
9
+ namespace at::native {
10
+
11
+ namespace {
12
+
13
+ static constexpr int64_t kILP = 4;
14
+ static constexpr int64_t kChunkSize = 65536;
15
+ static constexpr int64_t kBlockSize = 512;
16
+
17
+ // TODO(crcrpar): Add `n>5` for `low prec params & their higher prec copy`
18
+ // TensorListMetadata has to be < 4KB - the limit for kernel launch argument
19
+ static constexpr int depth_to_max_tensors[5] = {110, 64, 48, 36, 30};
20
+ static constexpr int depth_to_max_blocks[5] = {320, 320, 320, 320, 320};
21
+ static constexpr int depth_to_max_tensors_scalarlist[5] = {96, 64, 48, 36, 30};
22
+ static constexpr int depth_to_max_tensors_scalarlist_of_complex_double[2] = {
23
+ 72,
24
+ 60};
25
+
26
+ template <typename T>
27
+ __device__ __forceinline__ bool is_aligned(T* p) {
28
+ return ((uint64_t)p) % (kILP * sizeof(T)) == 0;
29
+ }
30
+
31
+ template <typename T>
32
+ __device__ __forceinline__ void load_store(
33
+ T* dst,
34
+ T* src,
35
+ int64_t dst_offset,
36
+ int64_t src_offset) {
37
+ using LT = at::native::memory::aligned_vector<T, kILP>;
38
+ ((LT*)dst)[dst_offset] = ((LT*)src)[src_offset];
39
+ }
40
+
41
+ template <int n>
42
+ struct TensorListMetadata {
43
+ const void* addresses[n][depth_to_max_tensors[n - 1]];
44
+ int64_t numel_for_tensor[depth_to_max_tensors[n - 1]];
45
+ unsigned char block_to_tensor[depth_to_max_blocks[n - 1]];
46
+ int block_to_chunk[depth_to_max_blocks[n - 1]];
47
+ int start_tensor_this_launch;
48
+ };
49
+
50
+ template <typename scalar_vals_t, int n>
51
+ struct TensorListScalarListMetadata {
52
+ const void* addresses[n][depth_to_max_tensors_scalarlist[n - 1]];
53
+ int64_t numel_for_tensor[depth_to_max_tensors_scalarlist[n - 1]];
54
+ scalar_vals_t scalar_vals[depth_to_max_tensors_scalarlist[n - 1]];
55
+ unsigned char block_to_tensor[depth_to_max_blocks[n - 1]];
56
+ int block_to_chunk[depth_to_max_blocks[n - 1]];
57
+ };
58
+
59
+ // note(mkozuki): `n` of 1&2 violate the limit of cuda kernel argument size of
60
+ // 4kb with `c10::complex<double>`
61
+ template <>
62
+ struct TensorListScalarListMetadata<c10::complex<double>, 1> {
63
+ const void* addresses[1]
64
+ [depth_to_max_tensors_scalarlist_of_complex_double[0]];
65
+ int64_t
66
+ numel_for_tensor[depth_to_max_tensors_scalarlist_of_complex_double[0]];
67
+ c10::complex<double>
68
+ scalar_vals[depth_to_max_tensors_scalarlist_of_complex_double[0]];
69
+ unsigned char block_to_tensor[depth_to_max_blocks[1 - 1]];
70
+ int block_to_chunk[depth_to_max_blocks[1 - 1]];
71
+ };
72
+
73
+ template <>
74
+ struct TensorListScalarListMetadata<c10::complex<double>, 2> {
75
+ const void* addresses[2]
76
+ [depth_to_max_tensors_scalarlist_of_complex_double[1]];
77
+ int64_t
78
+ numel_for_tensor[depth_to_max_tensors_scalarlist_of_complex_double[1]];
79
+ c10::complex<double>
80
+ scalar_vals[depth_to_max_tensors_scalarlist_of_complex_double[1]];
81
+ unsigned char block_to_tensor[depth_to_max_blocks[2 - 1]];
82
+ int block_to_chunk[depth_to_max_blocks[2 - 1]];
83
+ };
84
+
85
+ // NOTE(crcrpar): This is a conservative resolution to handle `state_steps`
86
+ // whose each element is `at::Tensor` of 1 element representing the number of
87
+ // `step`s called so far.
88
+ template <int n>
89
+ struct FusedOptimizerTensorListMetadata {
90
+ const void* addresses[n][depth_to_max_tensors[n - 1]];
91
+ int64_t numel_for_tensor[depth_to_max_tensors[n - 1]];
92
+ const void* state_steps_addresses[depth_to_max_tensors_scalarlist[n - 1]];
93
+ unsigned char block_to_tensor[depth_to_max_blocks[n - 1]];
94
+ int block_to_chunk[depth_to_max_blocks[n - 1]];
95
+ int start_tensor_this_launch;
96
+ };
97
+
98
+ template <typename T, typename U, typename... ArgTypes>
99
+ C10_LAUNCH_BOUNDS_1(kBlockSize)
100
+ __global__ void multi_tensor_apply_kernel(
101
+ T tensorListMeta,
102
+ U callable,
103
+ ArgTypes... args) {
104
+ // Hand the chunk information to the user-supplied functor to process however
105
+ // it likes.
106
+ callable(kChunkSize, tensorListMeta, args...);
107
+ }
108
+
109
+ } // namespace
110
+
111
+ // multi_tensor_apply enables horizontal fusion across lists of tensors.
112
+ // For example, whereas you once had a for-loop of a + b = c, where a, b,
113
+ // and c are individual tensors in lists as, bs, and cs, you can now with
114
+ // fewer kernel launches compute as + bs = cs.
115
+ //
116
+ // You can also imagine bs to be a scalar list vs a tensor list.
117
+ //
118
+ // The function below takes in tensor lists, scalars, and a callable and
119
+ // chunks up the computation to launch as few kernels as possible by iterating
120
+ // through every "chunk" in every tensor (thus the nested for loops). In the
121
+ // simplest case, everything gets bundled into just one kernel launch, but
122
+ // due to blocksize constraints, we may need to launch multiple kernels.
123
+ // Each kernel launch is defined by one tensorListMeta construct, which we
124
+ // use to track and reset the necessary metadata for each launch.
125
+ template <int depth, typename scalar_T, typename T, typename... ArgTypes>
126
+ void multi_tensor_apply(
127
+ std::vector<std::vector<at::Tensor>>& tensor_lists,
128
+ at::ArrayRef<Scalar> scalars,
129
+ T callable,
130
+ ArgTypes... args) {
131
+ TORCH_CHECK(
132
+ tensor_lists.size() == depth,
133
+ "Number of tensor lists has to match the depth.");
134
+ const size_t n_tensors = tensor_lists[0].size();
135
+ using scalar_vals_t = typename T::opmath_t;
136
+ TensorListScalarListMetadata<scalar_vals_t, depth> tensorListMeta;
137
+
138
+ int loc_block_info = 0;
139
+ int loc_tensor_info = 0;
140
+ for (size_t t = 0; t < n_tensors; t++) {
141
+ // short-circuit to avoid adding empty tensors to tensorListMeta
142
+ if (tensor_lists[0][t].numel() == 0) {
143
+ continue;
144
+ }
145
+ tensorListMeta.scalar_vals[loc_tensor_info] = scalars[t].to<scalar_T>();
146
+ tensorListMeta.numel_for_tensor[loc_tensor_info] =
147
+ tensor_lists[0][t].numel();
148
+ for (int d = 0; d < depth; d++) {
149
+ tensorListMeta.addresses[d][loc_tensor_info] =
150
+ tensor_lists[d][t].const_data_ptr();
151
+ }
152
+ loc_tensor_info++;
153
+
154
+ // now we enter [chunking territory].
155
+ // we will launch a kernel when EITHER the blocks get filled up OR
156
+ // the tensors get filled up. There will always be at least one block
157
+ // per tensor since the zero-sized ones will not enter the loop, so
158
+ // the nested forloop within represents iterating through the chunks
159
+ // of a single tensor.
160
+ const auto numel = tensor_lists[0][t].numel();
161
+ const auto chunks = numel / kChunkSize + (numel % kChunkSize != 0);
162
+ for (auto chunk = 0; chunk < chunks; chunk++) {
163
+ tensorListMeta.block_to_tensor[loc_block_info] = loc_tensor_info - 1;
164
+ tensorListMeta.block_to_chunk[loc_block_info] = chunk;
165
+ loc_block_info++;
166
+
167
+ // a tensor is not considered full unless all its chunks have been
168
+ // processed
169
+ const bool tensors_full =
170
+ (loc_tensor_info == depth_to_max_tensors_scalarlist[depth - 1] &&
171
+ chunk == chunks - 1);
172
+ const bool blocks_full =
173
+ (loc_block_info == depth_to_max_blocks[depth - 1]);
174
+
175
+ if (tensors_full || blocks_full) {
176
+ multi_tensor_apply_kernel<<<
177
+ loc_block_info,
178
+ kBlockSize,
179
+ 0,
180
+ at::cuda::getCurrentCUDAStream()>>>(
181
+ tensorListMeta, callable, args...);
182
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
183
+
184
+ // Reset.
185
+ loc_block_info = 0;
186
+ // all chunks have already been handled in the kernel
187
+ if (chunk == chunks - 1) {
188
+ loc_tensor_info = 0;
189
+ } else { // blocks were full and tensor chunks remain
190
+ tensorListMeta.numel_for_tensor[0] =
191
+ tensorListMeta.numel_for_tensor[loc_tensor_info - 1];
192
+ tensorListMeta.scalar_vals[0] =
193
+ tensorListMeta.scalar_vals[loc_tensor_info - 1];
194
+ for (int d = 0; d < depth; d++) {
195
+ tensorListMeta.addresses[d][0] =
196
+ tensorListMeta.addresses[d][loc_tensor_info - 1];
197
+ }
198
+ loc_tensor_info = 1;
199
+ }
200
+ }
201
+ }
202
+ }
203
+
204
+ // note: [finishing what we started]
205
+ // if there's remaining work to be done but the tensors/blocks aren't full
206
+ // yet we are at the end, submit the kernel to do the work!
207
+ if (loc_block_info != 0) {
208
+ multi_tensor_apply_kernel<<<
209
+ loc_block_info,
210
+ kBlockSize,
211
+ 0,
212
+ at::cuda::getCurrentCUDAStream()>>>(tensorListMeta, callable, args...);
213
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
214
+ }
215
+ }
216
+
217
+ template <int depth, typename T, typename... ArgTypes>
218
+ void multi_tensor_apply(
219
+ std::vector<std::vector<at::Tensor>>& tensor_lists,
220
+ T callable,
221
+ ArgTypes... args) {
222
+ TORCH_CHECK(
223
+ tensor_lists.size() == depth,
224
+ "Number of tensor lists has to match the depth.");
225
+ const size_t n_tensors = tensor_lists[0].size();
226
+ TensorListMetadata<depth> tensorListMeta;
227
+ tensorListMeta.start_tensor_this_launch = 0;
228
+
229
+ int loc_block_info = 0;
230
+ int loc_tensor_info = 0;
231
+ for (size_t t = 0; t < n_tensors; t++) {
232
+ // short-circuit to avoid adding empty tensors to tensorListMeta
233
+ if (tensor_lists[0][t].numel() == 0) {
234
+ continue;
235
+ }
236
+ tensorListMeta.numel_for_tensor[loc_tensor_info] =
237
+ tensor_lists[0][t].numel();
238
+ for (int d = 0; d < depth; d++) {
239
+ tensorListMeta.addresses[d][loc_tensor_info] =
240
+ tensor_lists[d][t].const_data_ptr();
241
+ }
242
+ loc_tensor_info++;
243
+
244
+ // see note: [chunking territory].
245
+ const auto numel = tensor_lists[0][t].numel();
246
+ const auto chunks = numel / kChunkSize + (numel % kChunkSize != 0);
247
+ for (auto chunk = 0; chunk < chunks; chunk++) {
248
+ tensorListMeta.block_to_tensor[loc_block_info] = loc_tensor_info - 1;
249
+ tensorListMeta.block_to_chunk[loc_block_info] = chunk;
250
+ loc_block_info++;
251
+
252
+ const bool tensors_full =
253
+ (loc_tensor_info == depth_to_max_tensors[depth - 1] &&
254
+ chunk == chunks - 1);
255
+ const bool blocks_full =
256
+ (loc_block_info == depth_to_max_blocks[depth - 1]);
257
+
258
+ if (tensors_full || blocks_full) {
259
+ multi_tensor_apply_kernel<<<
260
+ loc_block_info,
261
+ kBlockSize,
262
+ 0,
263
+ at::cuda::getCurrentCUDAStream()>>>(
264
+ tensorListMeta, callable, args...);
265
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
266
+
267
+ // Reset.
268
+ loc_block_info = 0;
269
+ if (chunk == chunks - 1) {
270
+ loc_tensor_info = 0;
271
+ tensorListMeta.start_tensor_this_launch = t + 1;
272
+ } else {
273
+ tensorListMeta.numel_for_tensor[0] =
274
+ tensorListMeta.numel_for_tensor[loc_tensor_info - 1];
275
+ for (int d = 0; d < depth; d++) {
276
+ tensorListMeta.addresses[d][0] =
277
+ tensorListMeta.addresses[d][loc_tensor_info - 1];
278
+ }
279
+ loc_tensor_info = 1;
280
+ tensorListMeta.start_tensor_this_launch = t;
281
+ }
282
+ }
283
+ }
284
+ }
285
+
286
+ // see note: [finishing what we started]
287
+ if (loc_block_info != 0) {
288
+ multi_tensor_apply_kernel<<<
289
+ loc_block_info,
290
+ kBlockSize,
291
+ 0,
292
+ at::cuda::getCurrentCUDAStream()>>>(tensorListMeta, callable, args...);
293
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
294
+ }
295
+ }
296
+
297
+ template <int depth, typename T, typename... ArgTypes>
298
+ void multi_tensor_apply_for_fused_optimizer(
299
+ std::vector<std::vector<at::Tensor>>& tensor_lists,
300
+ at::TensorList state_steps,
301
+ T callable,
302
+ ArgTypes... args) {
303
+ TORCH_CHECK(
304
+ tensor_lists.size() == depth,
305
+ "Number of tensor lists has to match the depth");
306
+ const auto num_tensors = tensor_lists[0].size();
307
+ FusedOptimizerTensorListMetadata<depth> tensorListMeta;
308
+
309
+ int loc_block_info = 0;
310
+ int loc_tensor_info = 0;
311
+ for (const auto& tensor_index : c10::irange(num_tensors)) {
312
+ // short-circuit to avoid adding empty tensors to tensorListMeta
313
+ if (tensor_lists[0][tensor_index].numel() == 0) {
314
+ continue;
315
+ }
316
+ tensorListMeta.state_steps_addresses[loc_tensor_info] =
317
+ state_steps[tensor_index].const_data_ptr();
318
+ tensorListMeta.numel_for_tensor[loc_tensor_info] =
319
+ tensor_lists[0][tensor_index].numel();
320
+ for (const auto& d : c10::irange(depth)) {
321
+ tensorListMeta.addresses[d][loc_tensor_info] =
322
+ tensor_lists[d][tensor_index].const_data_ptr();
323
+ }
324
+ loc_tensor_info++;
325
+
326
+ // see above note: [chunking territory]
327
+ const auto numel = tensor_lists[0][tensor_index].numel();
328
+ const auto chunks = numel / kChunkSize + (numel % kChunkSize != 0);
329
+ TORCH_CHECK(chunks > -1);
330
+ for (const auto& chunk : c10::irange(chunks)) {
331
+ tensorListMeta.block_to_tensor[loc_block_info] = loc_tensor_info - 1;
332
+ tensorListMeta.block_to_chunk[loc_block_info] = chunk;
333
+ loc_block_info++;
334
+
335
+ const auto tensor_full =
336
+ (loc_tensor_info == depth_to_max_tensors[depth - 1] &&
337
+ chunk == chunks - 1);
338
+ const auto blocks_full = loc_block_info == depth_to_max_blocks[depth - 1];
339
+
340
+ if (tensor_full || blocks_full) {
341
+ multi_tensor_apply_kernel<<<
342
+ loc_block_info,
343
+ kBlockSize,
344
+ 0,
345
+ at::cuda::getCurrentCUDAStream()>>>(
346
+ tensorListMeta, callable, args...);
347
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
348
+
349
+ // Reset.
350
+ loc_block_info = 0;
351
+ if (chunk == chunks - 1) {
352
+ loc_tensor_info = 0;
353
+ } else {
354
+ tensorListMeta.numel_for_tensor[0] =
355
+ tensorListMeta.numel_for_tensor[loc_tensor_info - 1];
356
+ tensorListMeta.state_steps_addresses[0] =
357
+ tensorListMeta.state_steps_addresses[loc_tensor_info - 1];
358
+ for (const auto& d : c10::irange(depth)) {
359
+ tensorListMeta.addresses[d][0] =
360
+ tensorListMeta.addresses[d][loc_tensor_info - 1];
361
+ }
362
+ loc_tensor_info = 1;
363
+ }
364
+ }
365
+ }
366
+ }
367
+
368
+ // see above note: [finishing what we've started]
369
+ if (loc_block_info != 0) {
370
+ multi_tensor_apply_kernel<<<
371
+ loc_block_info,
372
+ kBlockSize,
373
+ 0,
374
+ at::cuda::getCurrentCUDAStream()>>>(tensorListMeta, callable, args...);
375
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
376
+ }
377
+ }
378
+
379
+ } // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/ROCmLoops.cuh ADDED
@@ -0,0 +1,364 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // This file provides two functions to help write GPU elementwise kernels:
4
+ //
5
+ // gpu_kernel(TensorIterator iter, <lambda>)
6
+ // gpu_kernel_with_scalars(TensorIterator iter, <lambda>)
7
+ //
8
+ // The gpu_kernel_with_scalars generates specializations that support a
9
+ // single scalar CPU argument, such as from `cuda_tensor + 5`. The CPU scalar
10
+ // is lifted to a kernel parameter instead of copying to device memory.
11
+ // This should be used in conjunction with TensorIterator::allow_cpu_scalars_,
12
+ // which is the default for TensorIterator::binary_op. Otherwise, all inputs
13
+ // and the output must be on the GPU.
14
+ //
15
+ // For example, to write a reciprocal kernel for GPU float Tensors:
16
+ //
17
+ // gpu_kernel(iter, []GPU_LAMBDA(float a) {
18
+ // return 1.0f / a;
19
+ // });
20
+ //
21
+ // To write a multiplication kernel for GPU float Tensors where one argument
22
+ // may be a CPU scalar:
23
+ //
24
+ // gpu_kernel_with_scalars(iter, []GPU_LAMBDA(float a, float b) {
25
+ // return a * b;
26
+ // });
27
+ //
28
+ // See BinaryOpsKernel.cu for the complete implementation
29
+ //
30
+
31
+ #include <type_traits>
32
+
33
+ #include <ATen/cuda/CUDAContext.h>
34
+ #include <ATen/core/Array.h>
35
+ #include <ATen/cuda/detail/OffsetCalculator.cuh>
36
+ #include <ATen/detail/FunctionTraits.h>
37
+ #include <ATen/native/TensorIterator.h>
38
+ #include <c10/macros/Macros.h>
39
+ #include <c10/core/ScalarType.h>
40
+ #include <c10/core/DynamicCast.h>
41
+
42
+
43
+ #ifdef __NVCC__
44
+ #define ASSERT_HOST_DEVICE_LAMBDA(type) \
45
+ static_assert(__nv_is_extended_host_device_lambda_closure_type(type), \
46
+ #type " must be a __host__ __device__ lambda")
47
+ #else
48
+ #define ASSERT_HOST_DEVICE_LAMBDA(type)
49
+ #endif
50
+
51
+ static constexpr int launch_size_1d = 512;
52
+ static constexpr int launch_size_nd = 128;
53
+ static constexpr int launch_bound2 = 4;
54
+
55
+
56
+ namespace at { namespace native {
57
+
58
+ // See [NOTE: Complex Operator Unification]
59
+ // std::complex and thrust::complex don't work with some !needs_dynamic_casting optimizations.
60
+ // They always currently map to !needs_dynamic_casting even though we sometimes rely on the ability
61
+ // to reinterpret_cast between these representations.
62
+ // In order to separate these concerns, we have a check for non-c10 complex separately.
63
+ template<typename func_t, int nargs=function_traits<func_t>::arity>
64
+ struct uses_non_c10_complex {
65
+ constexpr static bool check() {
66
+ using traits = function_traits<func_t>;
67
+ using type = typename traits::template arg<nargs - 1>::type;
68
+ constexpr bool non_c10_complex =
69
+ std::is_same<std::complex<float>, type>::value
70
+ || std::is_same<std::complex<double>, type>::value
71
+ || std::is_same<thrust::complex<float>, type>::value
72
+ || std::is_same<thrust::complex<double>, type>::value;
73
+
74
+ if constexpr (non_c10_complex) {
75
+ return true;
76
+ } else {
77
+ return uses_non_c10_complex<func_t, nargs - 1>::check();
78
+ }
79
+ }
80
+ };
81
+
82
+ template<typename func_t>
83
+ struct uses_non_c10_complex<func_t, 0> {
84
+ constexpr static bool check() {
85
+ using traits = function_traits<func_t>;
86
+ using type = typename traits::result_type;
87
+ constexpr bool non_c10_complex =
88
+ std::is_same<std::complex<float>, type>::value
89
+ || std::is_same<std::complex<double>, type>::value
90
+ || std::is_same<thrust::complex<float>, type>::value
91
+ || std::is_same<thrust::complex<double>, type>::value;
92
+
93
+ return non_c10_complex;
94
+ }
95
+ };
96
+
97
+ // NOTE: @zasdfgbnm is currently working on rewriting the gpu loops.
98
+ // Some of the old codes has been moved to namespace legacy, and
99
+ // new codes will be put into namespace modern. These two namespaces
100
+ // will coexists for a while until the rewrite is done. Once the rewrite
101
+ // is done, we will remove the legacy and modern namespace and everything
102
+ // will be in at::native directly.
103
+ namespace legacy {
104
+
105
+ template<int nt, int vt, typename func_t>
106
+ C10_LAUNCH_BOUNDS_2(nt, launch_bound2)
107
+ __global__ void elementwise_kernel(int N, func_t f) {
108
+ int tid = threadIdx.x;
109
+ int nv = nt * vt;
110
+ int idx = nv * blockIdx.x + tid;
111
+ #pragma unroll
112
+ for (int i = 0; i < vt; i++) {
113
+ if (idx < N) {
114
+ f(idx);
115
+ idx += nt;
116
+ }
117
+ }
118
+ }
119
+
120
+ template<int nt, int vt, typename func_t>
121
+ static void launch_kernel(int64_t N, const func_t& f) {
122
+ TORCH_INTERNAL_ASSERT(N >= 0 && N <= std::numeric_limits<int32_t>::max());
123
+ if (N == 0) {
124
+ return;
125
+ }
126
+ dim3 block(nt);
127
+ dim3 grid((N + block.x * vt - 1) / (block.x * vt));
128
+ auto stream = at::cuda::getCurrentCUDAStream();
129
+ elementwise_kernel<nt, vt, func_t><<<grid, block, 0, stream>>>(N, f);
130
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
131
+ }
132
+
133
+ template <typename traits, typename func_t, typename index_t, size_t... INDEX>
134
+ C10_HOST_DEVICE typename traits::result_type
135
+ invoke_impl(const func_t &f, char *const C10_RESTRICT data[], const index_t strides[], int i,
136
+ std::index_sequence<INDEX...>) {
137
+ return f(c10::load<typename traits::template arg<INDEX>::type>(data[INDEX] + i * strides[INDEX])...);
138
+ }
139
+
140
+ template <typename func_t, typename index_t, typename traits = function_traits<func_t>>
141
+ C10_HOST_DEVICE typename traits::result_type
142
+ invoke(const func_t &f, char *const C10_RESTRICT data[], const index_t strides[], int i) {
143
+ using Indices = std::make_index_sequence<traits::arity>;
144
+ return invoke_impl<traits>(f, data, strides, i, Indices{});
145
+ }
146
+
147
+ template <typename traits, typename func_t, typename index_t, size_t... I>
148
+ C10_HOST_DEVICE typename traits::result_type
149
+ invoke_impl(const func_t &f, char *const C10_RESTRICT data[], const index_t strides[], const ScalarType dtypes[], int i,
150
+ std::index_sequence<I...>) {
151
+ return f(c10::fetch_and_cast<typename traits::template arg<I>::type>(dtypes[I], data[I] + i * strides[I])...);
152
+ }
153
+
154
+ template <typename func_t, typename index_t, typename traits = function_traits<func_t>>
155
+ C10_HOST_DEVICE typename traits::result_type
156
+ invoke(const func_t &f, char *const C10_RESTRICT data[], const index_t strides[], const ScalarType dtypes[], int i) {
157
+ using Indices = std::make_index_sequence<traits::arity>;
158
+ return invoke_impl<traits>(f, data, strides, dtypes, i, Indices{});
159
+ }
160
+
161
+ } // namespace legacy
162
+
163
+ // See the note for namespace legacy above.
164
+ namespace modern {
165
+
166
+ namespace detail {
167
+
168
+ template <typename func_t, typename array_t, std::size_t... I>
169
+ __device__ inline constexpr decltype(auto) invoke_with_array_impl(func_t f, array_t t, std::index_sequence<I...>)
170
+ {
171
+ return f(t[I]...);
172
+ }
173
+ template <typename func_t, typename array_t>
174
+ __device__ inline constexpr decltype(auto) invoke_with_array(func_t f, array_t a) {
175
+ constexpr auto arity = function_traits<func_t>::arity;
176
+ return invoke_with_array_impl(f, a, std::make_index_sequence<arity>{});
177
+ }
178
+
179
+ namespace arg_type {
180
+
181
+ // We need a way to compute the argument type of a function. But
182
+ // for nullary function, it does not really have an argument type
183
+ // in this case, we still need to return a valid type, but we don't
184
+ // really care what type this is.
185
+
186
+ struct dont_care {};
187
+
188
+ template <typename func_t, std::size_t arity>
189
+ struct arg_type_helper {
190
+ using type = typename function_traits<func_t>::template arg<0>::type;
191
+ };
192
+
193
+ template <typename func_t>
194
+ struct arg_type_helper<func_t, 0> {
195
+ using type = dont_care;
196
+ };
197
+
198
+ template <typename func_t>
199
+ using type = typename arg_type_helper<func_t, function_traits<func_t>::arity>::type;
200
+
201
+ } // namespace arg_type
202
+
203
+ template<typename func_t, int remaining=function_traits<func_t>::arity-1>
204
+ struct has_same_arg_types {
205
+ using traits = function_traits<func_t>;
206
+ static constexpr bool value = std::is_same<
207
+ typename traits::template arg<remaining>::type,
208
+ typename traits::template arg<remaining-1>::type
209
+ >::value && has_same_arg_types<func_t, remaining-1>::value;
210
+ };
211
+
212
+ template<typename func_t>
213
+ struct has_same_arg_types<func_t, 0> {
214
+ static constexpr bool value = true;
215
+ };
216
+
217
+ template<typename func_t>
218
+ struct has_same_arg_types<func_t, -1> {
219
+ static constexpr bool value = true;
220
+ };
221
+
222
+ } // namespace detail
223
+
224
+ template<typename func_t, typename array_t>
225
+ C10_LAUNCH_BOUNDS_1(num_threads())
226
+ __global__ void elementwise_kernel(int N, func_t f, array_t data) {
227
+ // Assumption:
228
+ // 1. all arguments of `f` have the same type, which could be different from the return type of `f`
229
+ // 2. all tensors are contiguous, that is: stride == sizeof(type) for all tensors
230
+
231
+ using traits = function_traits<func_t>;
232
+ using return_t = typename traits::result_type;
233
+ using arg_t = detail::arg_type::type<func_t>;
234
+ constexpr int arity = traits::arity;
235
+
236
+ // We need to create array to hold all the arguments, for nullary `f`, this means array of size 0.
237
+ // Unfortunately the compiler don't allow us to create array of 0 size, so for this case, we create
238
+ // an array of size 1 and just don't use it.
239
+ constexpr int nargs = traits::arity == 0 ? 1 : traits::arity;
240
+
241
+ int tid = threadIdx.x;
242
+ int idx = block_work_size() * blockIdx.x + tid;
243
+
244
+ // compute base pointers
245
+ return_t *result_base = reinterpret_cast<return_t *>(data[0]) + idx;
246
+ arg_t *args_base[nargs];
247
+ #pragma unroll
248
+ for (int i = 0; i < arity; i++) {
249
+ args_base[i] = reinterpret_cast<arg_t *>(data[i + 1]) + idx;
250
+ }
251
+
252
+ // fetch data
253
+ return_t results[thread_work_size()];
254
+ arg_t args[thread_work_size()][nargs];
255
+ #pragma unroll
256
+ for (int i = 0; i < thread_work_size(); i++) {
257
+ if (idx + num_threads() * i < N) {
258
+ #pragma unroll
259
+ for (int j = 0; j < arity; j++) {
260
+ args[i][j] = c10::load(args_base[j] + i * num_threads());
261
+ }
262
+ }
263
+ }
264
+
265
+ // compute
266
+ #pragma unroll
267
+ for (int i = 0; i < thread_work_size(); i++) {
268
+ if (idx + num_threads() * i < N) {
269
+ results[i] = detail::invoke_with_array<func_t, arg_t[nargs]>(f, args[i]);
270
+ }
271
+ }
272
+
273
+ // store data
274
+ #pragma unroll
275
+ for (int i = 0; i < thread_work_size(); i++) {
276
+ if (idx + num_threads() * i < N) {
277
+ *(result_base + i * num_threads()) = results[i];
278
+ }
279
+ }
280
+ }
281
+
282
+ // TODO (@zasdfgbnm): this function assume trivial 1d and no dynamic casting
283
+ template<typename func_t, typename array_t, std::enable_if_t<detail::has_same_arg_types<func_t>::value, int> = 0>
284
+ static void launch_kernel(int64_t N, const func_t& f, array_t data) {
285
+ TORCH_INTERNAL_ASSERT(N >= 0 && N <= std::numeric_limits<int32_t>::max());
286
+ if (N == 0) {
287
+ return;
288
+ }
289
+ int64_t grid = (N + block_work_size() - 1) / block_work_size();
290
+ auto stream = at::cuda::getCurrentCUDAStream();
291
+ elementwise_kernel<func_t, array_t><<<grid, num_threads(), 0, stream>>>(N, f, data);
292
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
293
+ }
294
+
295
+ template<typename func_t, typename array_t, std::enable_if_t<!detail::has_same_arg_types<func_t>::value, int> = 0>
296
+ static void launch_kernel(int64_t N, const func_t& f, array_t data) {}
297
+
298
+ } // namespace modern
299
+
300
+
301
+ template <typename func_t>
302
+ void gpu_kernel_impl(TensorIteratorBase& iter, const func_t& f) {
303
+ using traits = function_traits<func_t>;
304
+ using arg0_t = typename traits::result_type;
305
+ constexpr int ntensors = traits::arity + 1;
306
+
307
+ TORCH_INTERNAL_ASSERT(iter.can_use_32bit_indexing());
308
+ TORCH_INTERNAL_ASSERT(iter.ntensors() == traits::arity + 1);
309
+ bool non_c10_complex = uses_non_c10_complex<func_t>::check();
310
+
311
+ at::detail::Array<char*, ntensors> data;
312
+ for (int i = 0; i < ntensors; i++) {
313
+ data[i] = (char*)iter.data_ptr(i);
314
+ }
315
+
316
+ at::detail::Array<ScalarType, ntensors> dtypes;
317
+ for (int i = 0; i < ntensors; i++) {
318
+ dtypes[i] = iter.dtype(i);
319
+ }
320
+
321
+ int64_t numel = iter.numel();
322
+ if (iter.is_trivial_1d()) {
323
+ auto inner_strides = iter.get_inner_strides();
324
+ at::detail::Array<int, ntensors> strides;
325
+ for (int i = 0; i < ntensors; i++) {
326
+ strides[i] = inner_strides[i];
327
+ }
328
+
329
+ // TODO: can non_c10_complex go through the other path? Need to verify.
330
+ if (needs_dynamic_casting<func_t>::check(iter) || non_c10_complex) {
331
+ legacy::launch_kernel<launch_size_1d, 1>(numel, [=]GPU_LAMBDA(int idx) {
332
+ void* out = data[0] + strides[0] * idx;
333
+ arg0_t result = legacy::invoke(f, &data.data[1], &strides.data[1], &dtypes.data[1], idx);
334
+ c10::cast_and_store<arg0_t>(dtypes[0], out, result);
335
+ });
336
+ } else if (iter.has_contiguous_first_dim() && modern::detail::has_same_arg_types<func_t>::value) {
337
+ modern::launch_kernel(numel, f, data);
338
+ } else {
339
+ legacy::launch_kernel<launch_size_1d, 1>(numel, [=]GPU_LAMBDA(int idx) {
340
+ arg0_t* out = (arg0_t*)(data[0] + strides[0] * idx);
341
+ *out = legacy::invoke(f, &data.data[1], &strides.data[1], idx);
342
+ });
343
+ }
344
+ } else {
345
+ auto offset_calc = ::make_offset_calculator<traits::arity + 1>(iter);
346
+ // TODO: can non_c10_complex go through the other path? Need to verify.
347
+ if (needs_dynamic_casting<func_t>::check(iter) || non_c10_complex) {
348
+ legacy::launch_kernel<launch_size_nd, launch_bound2>(numel, [=]GPU_LAMBDA(int idx) {
349
+ auto offsets = offset_calc.get(idx);
350
+ void* out = data[0] + offsets[0];
351
+ arg0_t result = legacy::invoke(f, &data.data[1], &offsets.data[1], &dtypes.data[1], 1);
352
+ c10::cast_and_store<arg0_t>(dtypes[0], out, result);
353
+ });
354
+ } else {
355
+ legacy::launch_kernel<launch_size_nd, launch_bound2>(numel, [=]GPU_LAMBDA(int idx) {
356
+ auto offsets = offset_calc.get(idx);
357
+ arg0_t* out = (arg0_t*)(data[0] + offsets[0]);
358
+ *out = legacy::invoke(f, &data.data[1], &offsets.data[1], 1);
359
+ });
360
+ }
361
+ }
362
+ }
363
+
364
+ }} // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/ReduceOps.h ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ namespace at {
3
+ struct TensorIterator;
4
+ }
5
+
6
+ namespace c10 {
7
+ class Scalar;
8
+ }
9
+
10
+ namespace at { namespace native {
11
+
12
+ void norm_launch_kernel(TensorIterator &iter, double val);
13
+ void min_launch_kernel(TensorIterator &iter);
14
+ void max_launch_kernel(TensorIterator &iter);
15
+ void aminmax_launch_kernel(TensorIterator &iter);
16
+ void min_all_launch_kernel(TensorIterator &iter);
17
+ void max_all_launch_kernel(TensorIterator &iter);
18
+ void aminmax_allreduce_launch_kernel(TensorIterator &iter);
19
+
20
+ }} // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/TensorModeKernel.h ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <cstdint>
3
+
4
+ namespace at {
5
+ class TensorBase;
6
+ }
7
+
8
+ namespace at {
9
+ namespace native {
10
+
11
+ void launch_fused_mode_kernel(
12
+ const TensorBase &values, const TensorBase &indices,
13
+ const TensorBase &self, int64_t slice_size, int64_t slices);
14
+
15
+ void launch_apply_mode_kernel(
16
+ const TensorBase &values, const TensorBase &indices,
17
+ const TensorBase &self, int64_t dim, int64_t ndim);
18
+
19
+ }} // namespace at::native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/TensorTopK.h ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <cstdint>
3
+
4
+ namespace at {
5
+ class TensorBase;
6
+ }
7
+
8
+ namespace at {
9
+ namespace native {
10
+ void launch_gather_topk_kernel(
11
+ const TensorBase& self,
12
+ int64_t k, int64_t dim, bool largest,
13
+ const TensorBase& values, const TensorBase& indices);
14
+ }}
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/UniqueCub.cuh ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/core/Tensor.h>
2
+
3
+ namespace at {
4
+ namespace native {
5
+ namespace internal {
6
+
7
+ template <typename scalar_t>
8
+ std::tuple<Tensor, Tensor, Tensor> unique_cuda_template(
9
+ const Tensor& self,
10
+ const bool consecutive,
11
+ const bool return_inverse,
12
+ const bool return_counts);
13
+
14
+ } // namespace internal
15
+ } // namespace at
16
+ } // namespace native
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/block_reduce.cuh ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <thrust/tuple.h>
4
+
5
+ #include <ATen/native/SharedReduceOps.h>
6
+ #include <ATen/cuda/DeviceUtils.cuh>
7
+
8
+ namespace at {
9
+ namespace native {
10
+ namespace cuda_utils {
11
+
12
+ constexpr int kCUDABlockReduceNumThreads = 512;
13
+ // Algorithmic limitation: BlockReduce does two WarpReduce calls, each
14
+ // of which reduces C10_WARP_SIZE elements. So, at most
15
+ // C10_WARP_SIZE**2 elements can be reduced at a time.
16
+ // NOTE: This is >= the max block size on current hardware anyway (1024).
17
+ constexpr int kCUDABlockReduceMaxThreads = C10_WARP_SIZE * C10_WARP_SIZE;
18
+
19
+ // Sums `val` accross all threads in a warp.
20
+ //
21
+ // Assumptions:
22
+ // - The size of each block should be a multiple of `C10_WARP_SIZE`
23
+ template <typename T>
24
+ __inline__ __device__ T WarpReduceSum(T val) {
25
+ #pragma unroll
26
+ for (int offset = (C10_WARP_SIZE >> 1); offset > 0; offset >>= 1) {
27
+ val += WARP_SHFL_DOWN(val, offset);
28
+ }
29
+ return val;
30
+ }
31
+
32
+ struct Block1D {
33
+ static __forceinline__ __device__ int Tid() { return threadIdx.x; }
34
+
35
+ static __forceinline__ __device__ int Warps() {
36
+ return blockDim.x / C10_WARP_SIZE;
37
+ }
38
+ };
39
+
40
+ struct Block2D {
41
+ static __forceinline__ __device__ int Tid() {
42
+ return threadIdx.x + threadIdx.y * blockDim.x;
43
+ }
44
+
45
+ static __forceinline__ __device__ int Warps() {
46
+ return blockDim.x * blockDim.y / C10_WARP_SIZE;
47
+ }
48
+ };
49
+
50
+ // Sums `val` across all threads in a block.
51
+ //
52
+ // Warning: the return value is only valid for thread 0.
53
+ // Assumptions:
54
+ // - The size of each block should be a multiple of `C10_WARP_SIZE`
55
+ // - `shared` should be a pointer to shared memory with size of, at least,
56
+ // `sizeof(T) * number_of_warps`
57
+ template <typename T, typename B = Block1D>
58
+ __inline__ __device__ T BlockReduceSum(T val, T* shared) {
59
+ const int tid = B::Tid();
60
+ const int lid = tid % C10_WARP_SIZE;
61
+ const int wid = tid / C10_WARP_SIZE;
62
+ val = WarpReduceSum(val);
63
+ __syncthreads(); // prevent races when BlockReduces are called in a row.
64
+ if (lid == 0) {
65
+ shared[wid] = val;
66
+ }
67
+ __syncthreads();
68
+ val = (tid < B::Warps()) ? shared[lid] : T(0);
69
+ if (wid == 0) {
70
+ val = WarpReduceSum(val);
71
+ }
72
+ return val;
73
+ }
74
+
75
+ template <typename T, class ReduceOp>
76
+ __inline__ __device__ T WarpReduce(T val, const ReduceOp& op) {
77
+ #pragma unroll
78
+ for (int offset = (C10_WARP_SIZE >> 1); offset > 0; offset >>= 1) {
79
+ val = op.combine(val, op.warp_shfl_down(val, offset));
80
+ }
81
+ return val;
82
+ }
83
+
84
+ template <typename T, class ReduceOp, typename B = Block1D>
85
+ __inline__ __device__ T
86
+ BlockReduce(T val, const ReduceOp& op, const T& identity_element, T* shared) {
87
+ const int tid = B::Tid();
88
+ const int lid = tid % C10_WARP_SIZE;
89
+ const int wid = tid / C10_WARP_SIZE;
90
+ val = WarpReduce(val, op);
91
+ __syncthreads(); // prevent races when BlockReduces are called in a row.
92
+ if (lid == 0) {
93
+ shared[wid] = val;
94
+ }
95
+ __syncthreads();
96
+ val = (tid < B::Warps()) ? shared[lid] : identity_element;
97
+ if (wid == 0) {
98
+ val = WarpReduce(val, op);
99
+ }
100
+ return val;
101
+ }
102
+
103
+ } // namespace cuda_utils
104
+ } // namespace native
105
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/cuda/jit_utils.h ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #include <string>
4
+ #include <sstream>
5
+ #include <unordered_map>
6
+ #include <vector>
7
+
8
+ #include <c10/util/irange.h>
9
+ #include <ATen/jit_macros.h>
10
+ #include <ATen/cuda/detail/LazyNVRTC.h>
11
+
12
+ namespace at { namespace cuda { namespace jit {
13
+
14
+ enum class BinaryFuncVariant {NoScalar, RhsScalar, LhsScalar};
15
+
16
+ struct NvrtcFunction {
17
+ CUmodule module = CUmodule();
18
+ CUfunction function = nullptr;
19
+ };
20
+
21
+ struct KernelDescriptor {
22
+ std::string name;
23
+ std::string f;
24
+ c10::ScalarType f_inputs_type;
25
+ c10::ScalarType result_type;
26
+ c10::SmallVector<c10::ScalarType> extra_args_types;
27
+ int nInputs, nOutputs;
28
+ };
29
+
30
+ // Helper function to return a vector<string>
31
+ // corresponding to the type of the arguments in parameter pack.
32
+ template <typename... Args>
33
+ c10::SmallVector<at::ScalarType> get_extra_args_types() {
34
+ return {c10::CppTypeToScalarType<Args>::value ...};
35
+ }
36
+
37
+ template <
38
+ typename result_type,
39
+ typename f_inputs_type,
40
+ typename... ExtraArgs>
41
+ KernelDescriptor make_kernel_descriptor(
42
+ std::string name,
43
+ std::string f,
44
+ int nInputs,
45
+ int nOutputs) {
46
+ KernelDescriptor ret;
47
+ ret.name = std::move(name);
48
+ ret.f = std::move(f);
49
+ ret.f_inputs_type = c10::CppTypeToScalarType<f_inputs_type>::value;
50
+ ret.result_type = c10::CppTypeToScalarType<result_type>::value;
51
+ ret.extra_args_types = get_extra_args_types<ExtraArgs...>();
52
+ ret.nInputs = nInputs;
53
+ ret.nOutputs = nOutputs;
54
+ return ret;
55
+ }
56
+
57
+ inline int can_vectorize_up_to(size_t default_alignment, void *pointer) {
58
+ auto ip = reinterpret_cast<uintptr_t>(pointer);
59
+ if (ip % (4 * default_alignment) == 0) {
60
+ return 4;
61
+ }
62
+ if (ip % (2 * default_alignment) == 0) {
63
+ return 2;
64
+ }
65
+ return 1;
66
+ }
67
+
68
+ inline int can_vectorize_up_to(const KernelDescriptor &desc, c10::ArrayRef<char*> pointers) {
69
+ TORCH_INTERNAL_ASSERT(desc.nOutputs == 1);
70
+ TORCH_INTERNAL_ASSERT(static_cast<int64_t>(pointers.size()) == 1 + desc.nInputs);
71
+
72
+ // Deals with output
73
+ auto result_size = c10::scalarTypeToTypeMeta(desc.result_type).itemsize();
74
+ int result = can_vectorize_up_to(result_size, pointers[0]);
75
+
76
+ // Incorporates input(s)
77
+ auto input_size = c10::scalarTypeToTypeMeta(desc.f_inputs_type).itemsize();
78
+ for (auto i : c10::irange(1, pointers.size())) {
79
+ result = std::min(result, can_vectorize_up_to(input_size, pointers[i]));
80
+ }
81
+
82
+ return result;
83
+ }
84
+
85
+ std::string generate_code(
86
+ int nInputs,
87
+ int nOutputs,
88
+ const std::string& func,
89
+ const std::string& name,
90
+ const std::string& f_input_type,
91
+ const std::string& compute_type,
92
+ const std::string& result_type,
93
+ bool contiguous,
94
+ bool dynamic_casting,
95
+ BinaryFuncVariant scalar_pos,
96
+ c10::SmallVector<std::string>& extra_args_typenames,
97
+ bool vectorized=false,
98
+ int vec_size=0,
99
+ bool return_by_ref=false);
100
+
101
+ std::string generate_code(
102
+ const KernelDescriptor &desc,
103
+ bool contiguous,
104
+ bool dynamic_casting,
105
+ BinaryFuncVariant scalar_pos,
106
+ bool vectorized=false,
107
+ int vec_size=0,
108
+ bool return_by_ref=false);
109
+
110
+ std::string generate_reduction_code(
111
+ int nOutputs,
112
+ const std::string& func,
113
+ const std::string& name,
114
+ const int vt0,
115
+ const std::string& f_inputs_type,
116
+ const std::string& reduction_accum_type,
117
+ const std::string& result_type,
118
+ bool contiguous,
119
+ bool vectorized,
120
+ int vec_size,
121
+ int max_threads_codegen);
122
+
123
+ std::string generate_reduction_code(
124
+ const KernelDescriptor &desc,
125
+ const int vt0,
126
+ bool contiguous,
127
+ bool vectorized,
128
+ int vec_size,
129
+ int max_threads_codegen);
130
+
131
+ NvrtcFunction jit_pwise_function(
132
+ const std::string& code,
133
+ const std::string& kernel_name);
134
+
135
+ void launch_jitted_pwise_function(
136
+ NvrtcFunction function,
137
+ void* args[],
138
+ const dim3 nBlocks,
139
+ const dim3 kBlockSize,
140
+ const int smem=0);
141
+
142
+ template <typename T>
143
+ struct delayed_false : std::false_type {
144
+ };
145
+
146
+ // Defines type names
147
+ // NOTE: General case is instantiated only for invalid types.
148
+ // All the valid types have specialization using the TYPE_NAME_FN
149
+ // macro below.
150
+ template <typename T>
151
+ inline std::string typeName() {
152
+ // we can't use static_assert(false) directly as the
153
+ // program will be not compiled even if the template is not
154
+ // instantiated, so we use `delayed_false`
155
+ // to make sure compiler doesn't eagerly raise
156
+ // fail this assertion.
157
+ static_assert(delayed_false<T>::value, "invalid type for jiterator");
158
+ return "void";
159
+ }
160
+
161
+ #define TYPE_NAME_FN(ctype, name) \
162
+ template <> inline std::string typeName<ctype>(){ \
163
+ return std::string(#ctype); \
164
+ }
165
+
166
+ AT_FORALL_SCALAR_TYPES(TYPE_NAME_FN)
167
+ #undef TYPE_NAME_FN
168
+ // JIT uses std::complex directly, because nvRTC compile programs
169
+ // with -default-device, so there is no such issue like:
170
+ // "std::sin(complex) is __host__ only"
171
+ template <> inline std::string typeName<bool>(){
172
+ return "bool";
173
+ }
174
+ template <> inline std::string typeName<c10::complex<at::Half>>(){
175
+ return "std::complex<at::Half>";
176
+ }
177
+ template <> inline std::string typeName<c10::complex<float>>(){
178
+ return "std::complex<float>";
179
+ }
180
+ template <> inline std::string typeName<c10::complex<double>>(){
181
+ return "std::complex<double>";
182
+ }
183
+ template <> inline std::string typeName<at::Half>(){
184
+ return "at::Half";
185
+ }
186
+ template <> inline std::string typeName<at::BFloat16>(){
187
+ return "at::BFloat16";
188
+ }
189
+ template <> inline std::string typeName<at::Float8_e5m2>(){
190
+ return "at::Float8_e5m2";
191
+ }
192
+ template <> inline std::string typeName<at::Float8_e4m3fn>(){
193
+ return "at::Float8_e4m3fn";
194
+ }
195
+ template <> inline std::string typeName<at::Float8_e5m2fnuz>() {
196
+ return "at::Float8_e5m2fnuz";
197
+ }
198
+ template <> inline std::string typeName<at::Float8_e4m3fnuz>() {
199
+ return "at::Float8_e4m3fnuz";
200
+ }
201
+
202
+ #define TYPE_NAME_CASE(ctype, scalartype) \
203
+ case ScalarType::scalartype: return typeName<ctype>();
204
+ inline std::string typeName(ScalarType t) {
205
+ switch (t) {
206
+ AT_FORALL_SCALAR_TYPES_WITH_COMPLEX(TYPE_NAME_CASE)
207
+ default:
208
+ TORCH_CHECK(false, "invalid type for jiterator");
209
+ }
210
+ }
211
+ #undef TYPE_NAME_CASE
212
+
213
+ TORCH_CUDA_CPP_API void initializeCudaContext();
214
+
215
+ }}} // namespace at::cuda::jit
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/BinaryOps.h ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/core/Tensor.h>
2
+
3
+ namespace at {
4
+ namespace native {
5
+ TORCH_API Tensor
6
+ quantized_add(Tensor qa, Tensor qb, double scale, int64_t zero_point);
7
+ }
8
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/QuantizedOps.h ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+ #include <ATen/core/Tensor.h>
3
+ #include <ATen/core/IListRef.h>
4
+ #include <ATen/Dispatch.h>
5
+ #include <ATen/TensorIterator.h>
6
+ #include <ATen/native/Activation.h>
7
+ #include <ATen/native/DispatchStub.h>
8
+
9
+ namespace at {
10
+ namespace native {
11
+
12
+ using qrelu_fn = void (*)(const at::Tensor& /*qx*/, at::Tensor& /*qy*/);
13
+ using qrelu_leaky_fn = void (*)(Tensor& /*out*/, const Tensor& /*qx*/,
14
+ const Scalar& /*negval_*/);
15
+ using qgelu_fn = void (*)(const at::Tensor& /*qx*/, at::Tensor& /*qy*/, GeluType /* approximate */);
16
+ using qsigmoid_fn = void (*)(const at::Tensor& /*qx*/, at::Tensor& /*qy*/, double output_scale, int64_t output_zero_point);
17
+ using qhardsigmoid_fn = void (*)(const at::Tensor& /*qx*/, at::Tensor& /*qy*/);
18
+ using qclamp_fn = void (*)(
19
+ const at::Tensor& /*qx*/,
20
+ const Scalar& min,
21
+ const Scalar& max,
22
+ at::Tensor& /*qy*/);
23
+ using qclamp_minmax_fn = void (*)(
24
+ const at::Tensor& /*qx*/,
25
+ const Scalar& /*min or max*/,
26
+ at::Tensor& /*qy*/);
27
+ using qthreshold_fn = void (*)(
28
+ const at::Tensor& /*qx*/,
29
+ const Scalar& threshold,
30
+ const Scalar& value,
31
+ at::Tensor& /*qy*/);
32
+ using qtanh_fn = void (*)(const at::Tensor& /*qx*/, at::Tensor& /*qy*/);
33
+ using qelu_fn = void(*)(
34
+ const at::Tensor& /*qx*/,
35
+ const Scalar& /*alpha*/,
36
+ const Scalar& /*scale*/,
37
+ const Scalar& /*input_scale*/,
38
+ at::Tensor& /*qy*/);
39
+ using qbinary_fn =
40
+ void (*)(Tensor& /*out*/, const Tensor& /*self*/, const Tensor& /*other*/);
41
+ using qadd_scalar_fn =
42
+ void (*)(Tensor& /*out*/, const Tensor& /*self*/, const Scalar& other /*other*/);
43
+ using qhardswish_fn = void (*)(const at::Tensor& /*qx*/, at::Tensor& /*qy*/);
44
+ using qdropout_fn = void(*)(
45
+ const at::Tensor& /*qx*/,
46
+ const Scalar& /*p*/,
47
+ bool training /*training*/,
48
+ at::Tensor& /*qy*/);
49
+ using qmaxpool_2d_fn = void (*)(
50
+ const Tensor& qx,
51
+ int64_t iC, // input/output channels
52
+ int64_t iH,
53
+ int64_t iW, // input sizes
54
+ int64_t oH,
55
+ int64_t oW, // output sizes
56
+ int64_t kH,
57
+ int64_t kW, // kernel size
58
+ int64_t sH,
59
+ int64_t sW, // strides
60
+ int64_t pH,
61
+ int64_t pW, // padding
62
+ int64_t dH,
63
+ int64_t dW, // dilation
64
+ Tensor& qy);
65
+ using qmaxpool_3d_fn = void (*)(
66
+ const Tensor& qx,
67
+ int64_t iC, // input/output channels
68
+ int64_t iT,
69
+ int64_t iH,
70
+ int64_t iW, // input sizes
71
+ int64_t oT,
72
+ int64_t oH,
73
+ int64_t oW, // output sizes
74
+ int64_t kT,
75
+ int64_t kH,
76
+ int64_t kW, // kernel size
77
+ int64_t sT,
78
+ int64_t sH,
79
+ int64_t sW, // strides
80
+ int64_t pT,
81
+ int64_t pH,
82
+ int64_t pW, // padding
83
+ int64_t dT,
84
+ int64_t dH,
85
+ int64_t dW, // dilation
86
+ Tensor& qy);
87
+ using qadaptive_avg_pool2d_fn = void (*)(
88
+ const Tensor& qx,
89
+ Tensor& qy,
90
+ int64_t sizeB,
91
+ int64_t sizeC,
92
+ int64_t isizeH,
93
+ int64_t isizeW,
94
+ int64_t osizeH,
95
+ int64_t osizeW,
96
+ int64_t istrideB,
97
+ int64_t istrideC,
98
+ int64_t istrideH,
99
+ int64_t istrideW);
100
+ using qadaptive_avg_pool3d_fn = void (*)(
101
+ const Tensor& qx,
102
+ Tensor& qy,
103
+ int64_t sizeB,
104
+ int64_t sizeC,
105
+ int64_t isizeD,
106
+ int64_t isizeH,
107
+ int64_t isizeW,
108
+ int64_t osizeD,
109
+ int64_t osizeH,
110
+ int64_t osizeW,
111
+ int64_t istrideB,
112
+ int64_t istrideC,
113
+ int64_t istrideD,
114
+ int64_t istrideH,
115
+ int64_t istrideW);
116
+ using qavg_pool2d_fn = void (*)(
117
+ const Tensor& qx,
118
+ Tensor& qy,
119
+ int64_t nBatch,
120
+ int64_t nInputPlane,
121
+ int64_t inputWidth,
122
+ int64_t inputHeight,
123
+ int64_t outputWidth,
124
+ int64_t outputHeight,
125
+ int kW,
126
+ int kH,
127
+ int dW,
128
+ int dH,
129
+ int padW,
130
+ int padH,
131
+ bool count_include_pad,
132
+ c10::optional<int64_t> divisor_override);
133
+
134
+ using qavg_pool3d_fn = void (*)(
135
+ const Tensor& qx,
136
+ Tensor& qy,
137
+ int64_t nBatch,
138
+ int64_t nInputPlane,
139
+ int64_t inputWidth,
140
+ int64_t inputHeight,
141
+ int64_t inputDepth,
142
+ int64_t outputWidth,
143
+ int64_t outputHeight,
144
+ int64_t outputDepth,
145
+ int kW,
146
+ int kH,
147
+ int kD,
148
+ int dW,
149
+ int dH,
150
+ int dD,
151
+ int padW,
152
+ int padH,
153
+ int padD,
154
+ bool count_include_pad,
155
+ c10::optional<int64_t> divisor_override);
156
+
157
+ using qupsample_bilinear2d_fn = void (*)(
158
+ Tensor& output,
159
+ const Tensor& input,
160
+ int64_t input_height,
161
+ int64_t input_width,
162
+ int64_t output_height,
163
+ int64_t output_width,
164
+ int64_t nbatch,
165
+ int64_t channels,
166
+ bool align_corners,
167
+ c10::optional<double> scales_h,
168
+ c10::optional<double> scales_w);
169
+
170
+ using qcat_nhwc_fn = Tensor (*)(
171
+ const MaterializedITensorListRef& qxs,
172
+ int64_t dim,
173
+ double scale,
174
+ int64_t zero_point);
175
+ using qtopk_fn = void(*)(Tensor&, Tensor&, const Tensor&, int64_t, int64_t, bool, bool);
176
+
177
+ using qbatch_norm_fn = void(*)(int64_t, int64_t, int64_t, int64_t, int64_t, const Tensor&, const Tensor&, const Tensor&, Tensor&);
178
+
179
+ using qnormalize_fn = void (*)(
180
+ const Tensor& /* X */,
181
+ const Tensor& /* gamma */,
182
+ const Tensor& /* beta */,
183
+ bool /* affine_per_channel */,
184
+ int /* num_channels */,
185
+ int /* num_groups */,
186
+ int64_t /* M */,
187
+ int64_t /* N */,
188
+ double /* eps */,
189
+ Tensor* /* Y */);
190
+
191
+ using qmean_inner_dim_fn = void (*)(
192
+ const Tensor& /* X */,
193
+ OptionalIntArrayRef /* opt_dim */,
194
+ bool /* keepdim */,
195
+ c10::optional<ScalarType> /* opt_dtype */,
196
+ Tensor& /* Y */);
197
+
198
+ using qstd_inner_dim_fn = void (*)(
199
+ const Tensor& /* X */,
200
+ OptionalIntArrayRef /* dim */,
201
+ const c10::optional<Scalar>& /* correction */,
202
+ bool /* keepdim */,
203
+ Tensor& /* Y */);
204
+
205
+ using qnormalize_nhwc_fn = void (*)(
206
+ const Tensor& /* X */,
207
+ const Tensor& /* gamma */,
208
+ const Tensor& /* beta */,
209
+ bool /* affine_per_channel */,
210
+ int /* num_channels */,
211
+ int /* num_groups */,
212
+ int64_t /* M */,
213
+ int64_t /* N */,
214
+ double /* eps */,
215
+ Tensor* /* Y */);
216
+
217
+ using qprelu_fn = void (*)(Tensor& /*out*/, const Tensor& /*qx*/,
218
+ const Tensor& /*qw*/);
219
+
220
+ DECLARE_DISPATCH(qadaptive_avg_pool2d_fn, qadaptive_avg_pool2d_nhwc_stub);
221
+ DECLARE_DISPATCH(qadaptive_avg_pool3d_fn, qadaptive_avg_pool3d_ndhwc_stub);
222
+ DECLARE_DISPATCH(qadd_scalar_fn, qadd_scalar_relu_stub);
223
+ DECLARE_DISPATCH(qadd_scalar_fn, qadd_scalar_stub);
224
+ DECLARE_DISPATCH(qavg_pool2d_fn, qavg_pool2d_nhwc_stub);
225
+ DECLARE_DISPATCH(qavg_pool3d_fn, qavg_pool3d_nhwc_stub);
226
+ DECLARE_DISPATCH(qbatch_norm_fn, qbatch_norm_relu_stub);
227
+ DECLARE_DISPATCH(qbatch_norm_fn, qbatch_norm_stub);
228
+ DECLARE_DISPATCH(qbinary_fn, qadd_relu_stub);
229
+ DECLARE_DISPATCH(qbinary_fn, qadd_stub);
230
+ DECLARE_DISPATCH(qbinary_fn, qmul_relu_stub);
231
+ DECLARE_DISPATCH(qbinary_fn, qmul_stub);
232
+ DECLARE_DISPATCH(qcat_nhwc_fn, qcat_nhwc_stub);
233
+ DECLARE_DISPATCH(qcat_nhwc_fn, qcat_relu_nhwc_stub);
234
+ DECLARE_DISPATCH(qclamp_fn, qclamp_stub);
235
+ DECLARE_DISPATCH(qclamp_minmax_fn, qclamp_min_stub);
236
+ DECLARE_DISPATCH(qclamp_minmax_fn, qclamp_max_stub);
237
+ DECLARE_DISPATCH(qelu_fn, qelu_stub);
238
+ DECLARE_DISPATCH(qhardsigmoid_fn, qhardsigmoid_stub);
239
+ DECLARE_DISPATCH(qhardswish_fn, qhardswish_stub);
240
+ DECLARE_DISPATCH(qdropout_fn, qdropout_stub);
241
+ DECLARE_DISPATCH(qmaxpool_2d_fn, qmaxpool_2d_nhwc_stub);
242
+ DECLARE_DISPATCH(qmaxpool_3d_fn, qmaxpool_3d_nthwc_stub);
243
+ DECLARE_DISPATCH(qnormalize_fn, quantized_normalize_stub);
244
+ DECLARE_DISPATCH(qnormalize_nhwc_fn, quantized_groupnorm_nhwc_stub);
245
+ DECLARE_DISPATCH(qrelu_fn, qrelu_stub);
246
+ DECLARE_DISPATCH(qrelu_leaky_fn, qrelu_leaky_stub);
247
+ DECLARE_DISPATCH(qgelu_fn, qgelu_stub);
248
+ DECLARE_DISPATCH(qsigmoid_fn, qsigmoid_stub);
249
+ DECLARE_DISPATCH(qtanh_fn, qtanh_stub);
250
+ DECLARE_DISPATCH(qthreshold_fn, qthreshold_stub);
251
+ DECLARE_DISPATCH(qtopk_fn, qtopk_stub);
252
+ DECLARE_DISPATCH(qupsample_bilinear2d_fn, qupsample_bilinear2d_nhwc_stub);
253
+ DECLARE_DISPATCH(qmean_inner_dim_fn, qmean_inner_dim_stub);
254
+ DECLARE_DISPATCH(qstd_inner_dim_fn, qstd_inner_dim_stub);
255
+ DECLARE_DISPATCH(qprelu_fn, qprelu_stub);
256
+
257
+ } // namespace native
258
+ } // namespace at
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/native/quantized/cpu/RuyUtils.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ #ifdef USE_RUY_QMATMUL
4
+
5
+ #include <ruy/ruy.h>
6
+
7
+ namespace at {
8
+ namespace native {
9
+ namespace ruy_utils {
10
+
11
+ ruy::Context* get_ruy_context();
12
+
13
+ void quantize_multiplier(double scale,
14
+ int* multiplier_fixedpoint,
15
+ int* multiplier_exponent);
16
+
17
+ } // namespace ruy_utils
18
+ } // namespace native
19
+ } // namesplace
20
+
21
+ #endif // USE_RUY_QMATMUL
env-llmeval/lib/python3.10/site-packages/torch/include/ATen/ops/_coalesced_ops.h ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #pragma once
2
+
3
+ // @generated by torchgen/gen.py from Operator.h
4
+
5
+ #include <tuple>
6
+ #include <vector>
7
+
8
+ // Forward declarations of any types needed in the operator signatures.
9
+ // We can't directly include these classes because it will cause circular include dependencies.
10
+ // This file is included by TensorBody.h, which defines the Tensor class.
11
+ #include <ATen/core/ATen_fwd.h>
12
+
13
+ namespace at {
14
+ namespace _ops {
15
+
16
+
17
+ struct TORCH_API _coalesced_ {
18
+ using schema = at::Tensor & (at::Tensor &, bool);
19
+ using ptr_schema = schema*;
20
+ // See Note [static constexpr char* members for windows NVCC]
21
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_coalesced_")
22
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
23
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_coalesced_(Tensor(a!) self, bool coalesced) -> Tensor(a!)")
24
+ static at::Tensor & call(at::Tensor & self, bool coalesced);
25
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, bool coalesced);
26
+ };
27
+
28
+ struct TORCH_API _coalesced_out {
29
+ using schema = at::Tensor & (const at::Tensor &, bool, at::Tensor &);
30
+ using ptr_schema = schema*;
31
+ // See Note [static constexpr char* members for windows NVCC]
32
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_coalesced")
33
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
34
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_coalesced.out(Tensor self, bool coalesced, *, Tensor(a!) out) -> Tensor(a!)")
35
+ static at::Tensor & call(const at::Tensor & self, bool coalesced, at::Tensor & out);
36
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool coalesced, at::Tensor & out);
37
+ };
38
+
39
+ struct TORCH_API _coalesced {
40
+ using schema = at::Tensor (const at::Tensor &, bool);
41
+ using ptr_schema = schema*;
42
+ // See Note [static constexpr char* members for windows NVCC]
43
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_coalesced")
44
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
45
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_coalesced(Tensor self, bool coalesced) -> Tensor")
46
+ static at::Tensor call(const at::Tensor & self, bool coalesced);
47
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool coalesced);
48
+ };
49
+
50
+ }} // namespace at::_ops