Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/universal/global_step60/zero/18.mlp.dense_4h_to_h.weight/fp32.pt +3 -0
- ckpts/universal/global_step60/zero/5.attention.query_key_value.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step60/zero/5.attention.query_key_value.weight/fp32.pt +3 -0
- venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_asimd.c +27 -0
- venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_asimddp.c +16 -0
- venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_asimdfhm.c +19 -0
- venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_asimdhp.c +15 -0
- venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx.c +20 -0
- venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx2.c +20 -0
- venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_cnl.c +24 -0
- venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_knl.c +25 -0
- venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_knm.c +30 -0
- venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_skx.c +26 -0
- venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512f.c +20 -0
- venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_f16c.c +22 -0
- venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_fma3.c +22 -0
- venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_fma4.c +13 -0
- venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_neon.c +19 -0
- venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_neon_fp16.c +11 -0
- venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_neon_vfpv4.c +21 -0
- venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_popcnt.c +32 -0
- venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse.c +20 -0
- venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse2.c +20 -0
- venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse3.c +20 -0
- venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse41.c +20 -0
- venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse42.c +20 -0
- venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_ssse3.c +20 -0
- venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vsx.c +21 -0
- venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vsx2.c +13 -0
- venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vsx3.c +13 -0
- venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vxe.c +25 -0
- venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vxe2.c +21 -0
- venv/lib/python3.10/site-packages/numpy/distutils/checks/extra_avx512dq_mask.c +16 -0
- venv/lib/python3.10/site-packages/numpy/distutils/checks/extra_avx512f_reduce.c +41 -0
- venv/lib/python3.10/site-packages/numpy/distutils/checks/extra_vsx3_half_double.c +12 -0
- venv/lib/python3.10/site-packages/numpy/distutils/checks/extra_vsx4_mma.c +21 -0
- venv/lib/python3.10/site-packages/numpy/distutils/checks/extra_vsx_asm.c +36 -0
- venv/lib/python3.10/site-packages/numpy/distutils/checks/test_flags.c +1 -0
- venv/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/absoft.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/compaq.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/environment.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/fujitsu.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/ibm.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/lahey.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/mips.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/nv.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/pathf95.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/pg.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/numpy/distutils/tests/__init__.py +0 -0
- venv/lib/python3.10/site-packages/numpy/distutils/tests/__pycache__/__init__.cpython-310.pyc +0 -0
ckpts/universal/global_step60/zero/18.mlp.dense_4h_to_h.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:acd4f001edbd93efd8db6bca1d771d11b5f257aea4f4d6a65fa2b86bdf8f5dcc
|
3 |
+
size 33555533
|
ckpts/universal/global_step60/zero/5.attention.query_key_value.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fa2665fd710cad7c7a682e9e8249199fcf30148f827be171d32d1ddc7ceffddc
|
3 |
+
size 50332828
|
ckpts/universal/global_step60/zero/5.attention.query_key_value.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:de567c9dab4bfa853a808e5f81d47b6ffae31dd62941b161c5595701def600db
|
3 |
+
size 50332749
|
venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_asimd.c
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#ifdef _MSC_VER
|
2 |
+
#include <Intrin.h>
|
3 |
+
#endif
|
4 |
+
#include <arm_neon.h>
|
5 |
+
|
6 |
+
int main(int argc, char **argv)
|
7 |
+
{
|
8 |
+
float *src = (float*)argv[argc-1];
|
9 |
+
float32x4_t v1 = vdupq_n_f32(src[0]), v2 = vdupq_n_f32(src[1]);
|
10 |
+
/* MAXMIN */
|
11 |
+
int ret = (int)vgetq_lane_f32(vmaxnmq_f32(v1, v2), 0);
|
12 |
+
ret += (int)vgetq_lane_f32(vminnmq_f32(v1, v2), 0);
|
13 |
+
/* ROUNDING */
|
14 |
+
ret += (int)vgetq_lane_f32(vrndq_f32(v1), 0);
|
15 |
+
#ifdef __aarch64__
|
16 |
+
{
|
17 |
+
double *src2 = (double*)argv[argc-1];
|
18 |
+
float64x2_t vd1 = vdupq_n_f64(src2[0]), vd2 = vdupq_n_f64(src2[1]);
|
19 |
+
/* MAXMIN */
|
20 |
+
ret += (int)vgetq_lane_f64(vmaxnmq_f64(vd1, vd2), 0);
|
21 |
+
ret += (int)vgetq_lane_f64(vminnmq_f64(vd1, vd2), 0);
|
22 |
+
/* ROUNDING */
|
23 |
+
ret += (int)vgetq_lane_f64(vrndq_f64(vd1), 0);
|
24 |
+
}
|
25 |
+
#endif
|
26 |
+
return ret;
|
27 |
+
}
|
venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_asimddp.c
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#ifdef _MSC_VER
|
2 |
+
#include <Intrin.h>
|
3 |
+
#endif
|
4 |
+
#include <arm_neon.h>
|
5 |
+
|
6 |
+
int main(int argc, char **argv)
|
7 |
+
{
|
8 |
+
unsigned char *src = (unsigned char*)argv[argc-1];
|
9 |
+
uint8x16_t v1 = vdupq_n_u8(src[0]), v2 = vdupq_n_u8(src[1]);
|
10 |
+
uint32x4_t va = vdupq_n_u32(3);
|
11 |
+
int ret = (int)vgetq_lane_u32(vdotq_u32(va, v1, v2), 0);
|
12 |
+
#ifdef __aarch64__
|
13 |
+
ret += (int)vgetq_lane_u32(vdotq_laneq_u32(va, v1, v2, 0), 0);
|
14 |
+
#endif
|
15 |
+
return ret;
|
16 |
+
}
|
venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_asimdfhm.c
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#ifdef _MSC_VER
|
2 |
+
#include <Intrin.h>
|
3 |
+
#endif
|
4 |
+
#include <arm_neon.h>
|
5 |
+
|
6 |
+
int main(int argc, char **argv)
|
7 |
+
{
|
8 |
+
float16_t *src = (float16_t*)argv[argc-1];
|
9 |
+
float *src2 = (float*)argv[argc-2];
|
10 |
+
float16x8_t vhp = vdupq_n_f16(src[0]);
|
11 |
+
float16x4_t vlhp = vdup_n_f16(src[1]);
|
12 |
+
float32x4_t vf = vdupq_n_f32(src2[0]);
|
13 |
+
float32x2_t vlf = vdup_n_f32(src2[1]);
|
14 |
+
|
15 |
+
int ret = (int)vget_lane_f32(vfmlal_low_f16(vlf, vlhp, vlhp), 0);
|
16 |
+
ret += (int)vgetq_lane_f32(vfmlslq_high_f16(vf, vhp, vhp), 0);
|
17 |
+
|
18 |
+
return ret;
|
19 |
+
}
|
venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_asimdhp.c
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#ifdef _MSC_VER
|
2 |
+
#include <Intrin.h>
|
3 |
+
#endif
|
4 |
+
#include <arm_neon.h>
|
5 |
+
|
6 |
+
int main(int argc, char **argv)
|
7 |
+
{
|
8 |
+
float16_t *src = (float16_t*)argv[argc-1];
|
9 |
+
float16x8_t vhp = vdupq_n_f16(src[0]);
|
10 |
+
float16x4_t vlhp = vdup_n_f16(src[1]);
|
11 |
+
|
12 |
+
int ret = (int)vgetq_lane_f16(vabdq_f16(vhp, vhp), 0);
|
13 |
+
ret += (int)vget_lane_f16(vabd_f16(vlhp, vlhp), 0);
|
14 |
+
return ret;
|
15 |
+
}
|
venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx.c
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
2 |
+
/*
|
3 |
+
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
4 |
+
* whether or not the build options for those features are specified.
|
5 |
+
* Therefore, we must test #definitions of CPU features when option native/host
|
6 |
+
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
7 |
+
* the test will be broken and leads to enable all possible features.
|
8 |
+
*/
|
9 |
+
#ifndef __AVX__
|
10 |
+
#error "HOST/ARCH doesn't support AVX"
|
11 |
+
#endif
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#include <immintrin.h>
|
15 |
+
|
16 |
+
int main(int argc, char **argv)
|
17 |
+
{
|
18 |
+
__m256 a = _mm256_add_ps(_mm256_loadu_ps((const float*)argv[argc-1]), _mm256_loadu_ps((const float*)argv[1]));
|
19 |
+
return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a));
|
20 |
+
}
|
venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx2.c
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
2 |
+
/*
|
3 |
+
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
4 |
+
* whether or not the build options for those features are specified.
|
5 |
+
* Therefore, we must test #definitions of CPU features when option native/host
|
6 |
+
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
7 |
+
* the test will be broken and leads to enable all possible features.
|
8 |
+
*/
|
9 |
+
#ifndef __AVX2__
|
10 |
+
#error "HOST/ARCH doesn't support AVX2"
|
11 |
+
#endif
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#include <immintrin.h>
|
15 |
+
|
16 |
+
int main(int argc, char **argv)
|
17 |
+
{
|
18 |
+
__m256i a = _mm256_abs_epi16(_mm256_loadu_si256((const __m256i*)argv[argc-1]));
|
19 |
+
return _mm_cvtsi128_si32(_mm256_castsi256_si128(a));
|
20 |
+
}
|
venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_cnl.c
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
2 |
+
/*
|
3 |
+
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
4 |
+
* whether or not the build options for those features are specified.
|
5 |
+
* Therefore, we must test #definitions of CPU features when option native/host
|
6 |
+
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
7 |
+
* the test will be broken and leads to enable all possible features.
|
8 |
+
*/
|
9 |
+
#if !defined(__AVX512VBMI__) || !defined(__AVX512IFMA__)
|
10 |
+
#error "HOST/ARCH doesn't support CannonLake AVX512 features"
|
11 |
+
#endif
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#include <immintrin.h>
|
15 |
+
|
16 |
+
int main(int argc, char **argv)
|
17 |
+
{
|
18 |
+
__m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]);
|
19 |
+
/* IFMA */
|
20 |
+
a = _mm512_madd52hi_epu64(a, a, _mm512_setzero_si512());
|
21 |
+
/* VMBI */
|
22 |
+
a = _mm512_permutex2var_epi8(a, _mm512_setzero_si512(), a);
|
23 |
+
return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
|
24 |
+
}
|
venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_knl.c
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
2 |
+
/*
|
3 |
+
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
4 |
+
* whether or not the build options for those features are specified.
|
5 |
+
* Therefore, we must test #definitions of CPU features when option native/host
|
6 |
+
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
7 |
+
* the test will be broken and leads to enable all possible features.
|
8 |
+
*/
|
9 |
+
#if !defined(__AVX512ER__) || !defined(__AVX512PF__)
|
10 |
+
#error "HOST/ARCH doesn't support Knights Landing AVX512 features"
|
11 |
+
#endif
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#include <immintrin.h>
|
15 |
+
|
16 |
+
int main(int argc, char **argv)
|
17 |
+
{
|
18 |
+
int base[128]={};
|
19 |
+
__m512d ad = _mm512_loadu_pd((const __m512d*)argv[argc-1]);
|
20 |
+
/* ER */
|
21 |
+
__m512i a = _mm512_castpd_si512(_mm512_exp2a23_pd(ad));
|
22 |
+
/* PF */
|
23 |
+
_mm512_mask_prefetch_i64scatter_pd(base, _mm512_cmpeq_epi64_mask(a, a), a, 1, _MM_HINT_T1);
|
24 |
+
return base[0];
|
25 |
+
}
|
venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_knm.c
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
2 |
+
/*
|
3 |
+
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
4 |
+
* whether or not the build options for those features are specified.
|
5 |
+
* Therefore, we must test #definitions of CPU features when option native/host
|
6 |
+
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
7 |
+
* the test will be broken and leads to enable all possible features.
|
8 |
+
*/
|
9 |
+
#if !defined(__AVX5124FMAPS__) || !defined(__AVX5124VNNIW__) || !defined(__AVX512VPOPCNTDQ__)
|
10 |
+
#error "HOST/ARCH doesn't support Knights Mill AVX512 features"
|
11 |
+
#endif
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#include <immintrin.h>
|
15 |
+
|
16 |
+
int main(int argc, char **argv)
|
17 |
+
{
|
18 |
+
__m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]);
|
19 |
+
__m512 b = _mm512_loadu_ps((const __m512*)argv[argc-2]);
|
20 |
+
|
21 |
+
/* 4FMAPS */
|
22 |
+
b = _mm512_4fmadd_ps(b, b, b, b, b, NULL);
|
23 |
+
/* 4VNNIW */
|
24 |
+
a = _mm512_4dpwssd_epi32(a, a, a, a, a, NULL);
|
25 |
+
/* VPOPCNTDQ */
|
26 |
+
a = _mm512_popcnt_epi64(a);
|
27 |
+
|
28 |
+
a = _mm512_add_epi32(a, _mm512_castps_si512(b));
|
29 |
+
return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
|
30 |
+
}
|
venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_skx.c
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
2 |
+
/*
|
3 |
+
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
4 |
+
* whether or not the build options for those features are specified.
|
5 |
+
* Therefore, we must test #definitions of CPU features when option native/host
|
6 |
+
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
7 |
+
* the test will be broken and leads to enable all possible features.
|
8 |
+
*/
|
9 |
+
#if !defined(__AVX512VL__) || !defined(__AVX512BW__) || !defined(__AVX512DQ__)
|
10 |
+
#error "HOST/ARCH doesn't support SkyLake AVX512 features"
|
11 |
+
#endif
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#include <immintrin.h>
|
15 |
+
|
16 |
+
int main(int argc, char **argv)
|
17 |
+
{
|
18 |
+
__m512i aa = _mm512_abs_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1]));
|
19 |
+
/* VL */
|
20 |
+
__m256i a = _mm256_abs_epi64(_mm512_extracti64x4_epi64(aa, 1));
|
21 |
+
/* DQ */
|
22 |
+
__m512i b = _mm512_broadcast_i32x8(a);
|
23 |
+
/* BW */
|
24 |
+
b = _mm512_abs_epi16(b);
|
25 |
+
return _mm_cvtsi128_si32(_mm512_castsi512_si128(b));
|
26 |
+
}
|
venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512f.c
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
2 |
+
/*
|
3 |
+
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
4 |
+
* whether or not the build options for those features are specified.
|
5 |
+
* Therefore, we must test #definitions of CPU features when option native/host
|
6 |
+
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
7 |
+
* the test will be broken and leads to enable all possible features.
|
8 |
+
*/
|
9 |
+
#ifndef __AVX512F__
|
10 |
+
#error "HOST/ARCH doesn't support AVX512F"
|
11 |
+
#endif
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#include <immintrin.h>
|
15 |
+
|
16 |
+
int main(int argc, char **argv)
|
17 |
+
{
|
18 |
+
__m512i a = _mm512_abs_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1]));
|
19 |
+
return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
|
20 |
+
}
|
venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_f16c.c
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
2 |
+
/*
|
3 |
+
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
4 |
+
* whether or not the build options for those features are specified.
|
5 |
+
* Therefore, we must test #definitions of CPU features when option native/host
|
6 |
+
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
7 |
+
* the test will be broken and leads to enable all possible features.
|
8 |
+
*/
|
9 |
+
#ifndef __F16C__
|
10 |
+
#error "HOST/ARCH doesn't support F16C"
|
11 |
+
#endif
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#include <emmintrin.h>
|
15 |
+
#include <immintrin.h>
|
16 |
+
|
17 |
+
int main(int argc, char **argv)
|
18 |
+
{
|
19 |
+
__m128 a = _mm_cvtph_ps(_mm_loadu_si128((const __m128i*)argv[argc-1]));
|
20 |
+
__m256 a8 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*)argv[argc-2]));
|
21 |
+
return (int)(_mm_cvtss_f32(a) + _mm_cvtss_f32(_mm256_castps256_ps128(a8)));
|
22 |
+
}
|
venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_fma3.c
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
2 |
+
/*
|
3 |
+
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
4 |
+
* whether or not the build options for those features are specified.
|
5 |
+
* Therefore, we must test #definitions of CPU features when option native/host
|
6 |
+
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
7 |
+
* the test will be broken and leads to enable all possible features.
|
8 |
+
*/
|
9 |
+
#if !defined(__FMA__) && !defined(__AVX2__)
|
10 |
+
#error "HOST/ARCH doesn't support FMA3"
|
11 |
+
#endif
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#include <xmmintrin.h>
|
15 |
+
#include <immintrin.h>
|
16 |
+
|
17 |
+
int main(int argc, char **argv)
|
18 |
+
{
|
19 |
+
__m256 a = _mm256_loadu_ps((const float*)argv[argc-1]);
|
20 |
+
a = _mm256_fmadd_ps(a, a, a);
|
21 |
+
return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a));
|
22 |
+
}
|
venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_fma4.c
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include <immintrin.h>
|
2 |
+
#ifdef _MSC_VER
|
3 |
+
#include <ammintrin.h>
|
4 |
+
#else
|
5 |
+
#include <x86intrin.h>
|
6 |
+
#endif
|
7 |
+
|
8 |
+
int main(int argc, char **argv)
|
9 |
+
{
|
10 |
+
__m256 a = _mm256_loadu_ps((const float*)argv[argc-1]);
|
11 |
+
a = _mm256_macc_ps(a, a, a);
|
12 |
+
return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a));
|
13 |
+
}
|
venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_neon.c
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#ifdef _MSC_VER
|
2 |
+
#include <Intrin.h>
|
3 |
+
#endif
|
4 |
+
#include <arm_neon.h>
|
5 |
+
|
6 |
+
int main(int argc, char **argv)
|
7 |
+
{
|
8 |
+
// passing from untraced pointers to avoid optimizing out any constants
|
9 |
+
// so we can test against the linker.
|
10 |
+
float *src = (float*)argv[argc-1];
|
11 |
+
float32x4_t v1 = vdupq_n_f32(src[0]), v2 = vdupq_n_f32(src[1]);
|
12 |
+
int ret = (int)vgetq_lane_f32(vmulq_f32(v1, v2), 0);
|
13 |
+
#ifdef __aarch64__
|
14 |
+
double *src2 = (double*)argv[argc-2];
|
15 |
+
float64x2_t vd1 = vdupq_n_f64(src2[0]), vd2 = vdupq_n_f64(src2[1]);
|
16 |
+
ret += (int)vgetq_lane_f64(vmulq_f64(vd1, vd2), 0);
|
17 |
+
#endif
|
18 |
+
return ret;
|
19 |
+
}
|
venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_neon_fp16.c
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#ifdef _MSC_VER
|
2 |
+
#include <Intrin.h>
|
3 |
+
#endif
|
4 |
+
#include <arm_neon.h>
|
5 |
+
|
6 |
+
int main(int argc, char **argv)
|
7 |
+
{
|
8 |
+
short *src = (short*)argv[argc-1];
|
9 |
+
float32x4_t v_z4 = vcvt_f32_f16((float16x4_t)vld1_s16(src));
|
10 |
+
return (int)vgetq_lane_f32(v_z4, 0);
|
11 |
+
}
|
venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_neon_vfpv4.c
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#ifdef _MSC_VER
|
2 |
+
#include <Intrin.h>
|
3 |
+
#endif
|
4 |
+
#include <arm_neon.h>
|
5 |
+
|
6 |
+
int main(int argc, char **argv)
|
7 |
+
{
|
8 |
+
float *src = (float*)argv[argc-1];
|
9 |
+
float32x4_t v1 = vdupq_n_f32(src[0]);
|
10 |
+
float32x4_t v2 = vdupq_n_f32(src[1]);
|
11 |
+
float32x4_t v3 = vdupq_n_f32(src[2]);
|
12 |
+
int ret = (int)vgetq_lane_f32(vfmaq_f32(v1, v2, v3), 0);
|
13 |
+
#ifdef __aarch64__
|
14 |
+
double *src2 = (double*)argv[argc-2];
|
15 |
+
float64x2_t vd1 = vdupq_n_f64(src2[0]);
|
16 |
+
float64x2_t vd2 = vdupq_n_f64(src2[1]);
|
17 |
+
float64x2_t vd3 = vdupq_n_f64(src2[2]);
|
18 |
+
ret += (int)vgetq_lane_f64(vfmaq_f64(vd1, vd2, vd3), 0);
|
19 |
+
#endif
|
20 |
+
return ret;
|
21 |
+
}
|
venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_popcnt.c
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
2 |
+
/*
|
3 |
+
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
4 |
+
* whether or not the build options for those features are specified.
|
5 |
+
* Therefore, we must test #definitions of CPU features when option native/host
|
6 |
+
* is enabled via `--cpu-baseline` or through env vr `CFLAGS` otherwise
|
7 |
+
* the test will be broken and leads to enable all possible features.
|
8 |
+
*/
|
9 |
+
#if !defined(__SSE4_2__) && !defined(__POPCNT__)
|
10 |
+
#error "HOST/ARCH doesn't support POPCNT"
|
11 |
+
#endif
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#ifdef _MSC_VER
|
15 |
+
#include <nmmintrin.h>
|
16 |
+
#else
|
17 |
+
#include <popcntintrin.h>
|
18 |
+
#endif
|
19 |
+
|
20 |
+
int main(int argc, char **argv)
|
21 |
+
{
|
22 |
+
// To make sure popcnt instructions are generated
|
23 |
+
// and been tested against the assembler
|
24 |
+
unsigned long long a = *((unsigned long long*)argv[argc-1]);
|
25 |
+
unsigned int b = *((unsigned int*)argv[argc-2]);
|
26 |
+
|
27 |
+
#if defined(_M_X64) || defined(__x86_64__)
|
28 |
+
a = _mm_popcnt_u64(a);
|
29 |
+
#endif
|
30 |
+
b = _mm_popcnt_u32(b);
|
31 |
+
return (int)a + b;
|
32 |
+
}
|
venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse.c
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
2 |
+
/*
|
3 |
+
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
4 |
+
* whether or not the build options for those features are specified.
|
5 |
+
* Therefore, we must test #definitions of CPU features when option native/host
|
6 |
+
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
7 |
+
* the test will be broken and leads to enable all possible features.
|
8 |
+
*/
|
9 |
+
#ifndef __SSE__
|
10 |
+
#error "HOST/ARCH doesn't support SSE"
|
11 |
+
#endif
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#include <xmmintrin.h>
|
15 |
+
|
16 |
+
int main(void)
|
17 |
+
{
|
18 |
+
__m128 a = _mm_add_ps(_mm_setzero_ps(), _mm_setzero_ps());
|
19 |
+
return (int)_mm_cvtss_f32(a);
|
20 |
+
}
|
venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse2.c
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
2 |
+
/*
|
3 |
+
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
4 |
+
* whether or not the build options for those features are specified.
|
5 |
+
* Therefore, we must test #definitions of CPU features when option native/host
|
6 |
+
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
7 |
+
* the test will be broken and leads to enable all possible features.
|
8 |
+
*/
|
9 |
+
#ifndef __SSE2__
|
10 |
+
#error "HOST/ARCH doesn't support SSE2"
|
11 |
+
#endif
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#include <emmintrin.h>
|
15 |
+
|
16 |
+
int main(void)
|
17 |
+
{
|
18 |
+
__m128i a = _mm_add_epi16(_mm_setzero_si128(), _mm_setzero_si128());
|
19 |
+
return _mm_cvtsi128_si32(a);
|
20 |
+
}
|
venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse3.c
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
2 |
+
/*
|
3 |
+
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
4 |
+
* whether or not the build options for those features are specified.
|
5 |
+
* Therefore, we must test #definitions of CPU features when option native/host
|
6 |
+
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
7 |
+
* the test will be broken and leads to enable all possible features.
|
8 |
+
*/
|
9 |
+
#ifndef __SSE3__
|
10 |
+
#error "HOST/ARCH doesn't support SSE3"
|
11 |
+
#endif
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#include <pmmintrin.h>
|
15 |
+
|
16 |
+
int main(void)
|
17 |
+
{
|
18 |
+
__m128 a = _mm_hadd_ps(_mm_setzero_ps(), _mm_setzero_ps());
|
19 |
+
return (int)_mm_cvtss_f32(a);
|
20 |
+
}
|
venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse41.c
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
2 |
+
/*
|
3 |
+
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
4 |
+
* whether or not the build options for those features are specified.
|
5 |
+
* Therefore, we must test #definitions of CPU features when option native/host
|
6 |
+
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
7 |
+
* the test will be broken and leads to enable all possible features.
|
8 |
+
*/
|
9 |
+
#ifndef __SSE4_1__
|
10 |
+
#error "HOST/ARCH doesn't support SSE41"
|
11 |
+
#endif
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#include <smmintrin.h>
|
15 |
+
|
16 |
+
int main(void)
|
17 |
+
{
|
18 |
+
__m128 a = _mm_floor_ps(_mm_setzero_ps());
|
19 |
+
return (int)_mm_cvtss_f32(a);
|
20 |
+
}
|
venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse42.c
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
2 |
+
/*
|
3 |
+
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
4 |
+
* whether or not the build options for those features are specified.
|
5 |
+
* Therefore, we must test #definitions of CPU features when option native/host
|
6 |
+
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
7 |
+
* the test will be broken and leads to enable all possible features.
|
8 |
+
*/
|
9 |
+
#ifndef __SSE4_2__
|
10 |
+
#error "HOST/ARCH doesn't support SSE42"
|
11 |
+
#endif
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#include <smmintrin.h>
|
15 |
+
|
16 |
+
int main(void)
|
17 |
+
{
|
18 |
+
__m128 a = _mm_hadd_ps(_mm_setzero_ps(), _mm_setzero_ps());
|
19 |
+
return (int)_mm_cvtss_f32(a);
|
20 |
+
}
|
venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_ssse3.c
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
2 |
+
/*
|
3 |
+
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
4 |
+
* whether or not the build options for those features are specified.
|
5 |
+
* Therefore, we must test #definitions of CPU features when option native/host
|
6 |
+
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
7 |
+
* the test will be broken and leads to enable all possible features.
|
8 |
+
*/
|
9 |
+
#ifndef __SSSE3__
|
10 |
+
#error "HOST/ARCH doesn't support SSSE3"
|
11 |
+
#endif
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#include <tmmintrin.h>
|
15 |
+
|
16 |
+
int main(void)
|
17 |
+
{
|
18 |
+
__m128i a = _mm_hadd_epi16(_mm_setzero_si128(), _mm_setzero_si128());
|
19 |
+
return (int)_mm_cvtsi128_si32(a);
|
20 |
+
}
|
venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vsx.c
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#ifndef __VSX__
|
2 |
+
#error "VSX is not supported"
|
3 |
+
#endif
|
4 |
+
#include <altivec.h>
|
5 |
+
|
6 |
+
#if (defined(__GNUC__) && !defined(vec_xl)) || (defined(__clang__) && !defined(__IBMC__))
|
7 |
+
#define vsx_ld vec_vsx_ld
|
8 |
+
#define vsx_st vec_vsx_st
|
9 |
+
#else
|
10 |
+
#define vsx_ld vec_xl
|
11 |
+
#define vsx_st vec_xst
|
12 |
+
#endif
|
13 |
+
|
14 |
+
int main(void)
|
15 |
+
{
|
16 |
+
unsigned int zout[4];
|
17 |
+
unsigned int z4[] = {0, 0, 0, 0};
|
18 |
+
__vector unsigned int v_z4 = vsx_ld(0, z4);
|
19 |
+
vsx_st(v_z4, 0, zout);
|
20 |
+
return zout[0];
|
21 |
+
}
|
venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vsx2.c
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#ifndef __VSX__
|
2 |
+
#error "VSX is not supported"
|
3 |
+
#endif
|
4 |
+
#include <altivec.h>
|
5 |
+
|
6 |
+
typedef __vector unsigned long long v_uint64x2;
|
7 |
+
|
8 |
+
int main(void)
|
9 |
+
{
|
10 |
+
v_uint64x2 z2 = (v_uint64x2){0, 0};
|
11 |
+
z2 = (v_uint64x2)vec_cmpeq(z2, z2);
|
12 |
+
return (int)vec_extract(z2, 0);
|
13 |
+
}
|
venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vsx3.c
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#ifndef __VSX__
|
2 |
+
#error "VSX is not supported"
|
3 |
+
#endif
|
4 |
+
#include <altivec.h>
|
5 |
+
|
6 |
+
typedef __vector unsigned int v_uint32x4;
|
7 |
+
|
8 |
+
int main(void)
|
9 |
+
{
|
10 |
+
v_uint32x4 z4 = (v_uint32x4){0, 0, 0, 0};
|
11 |
+
z4 = vec_absd(z4, z4);
|
12 |
+
return (int)vec_extract(z4, 0);
|
13 |
+
}
|
venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vxe.c
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#if (__VEC__ < 10302) || (__ARCH__ < 12)
|
2 |
+
#error VXE not supported
|
3 |
+
#endif
|
4 |
+
|
5 |
+
#include <vecintrin.h>
|
6 |
+
int main(int argc, char **argv)
|
7 |
+
{
|
8 |
+
__vector float x = vec_nabs(vec_xl(argc, (float*)argv));
|
9 |
+
__vector float y = vec_load_len((float*)argv, (unsigned int)argc);
|
10 |
+
|
11 |
+
x = vec_round(vec_ceil(x) + vec_floor(y));
|
12 |
+
__vector bool int m = vec_cmpge(x, y);
|
13 |
+
x = vec_sel(x, y, m);
|
14 |
+
|
15 |
+
// need to test the existence of intrin "vflls" since vec_doublee
|
16 |
+
// is vec_doublee maps to wrong intrin "vfll".
|
17 |
+
// see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100871
|
18 |
+
#if defined(__GNUC__) && !defined(__clang__)
|
19 |
+
__vector long long i = vec_signed(__builtin_s390_vflls(x));
|
20 |
+
#else
|
21 |
+
__vector long long i = vec_signed(vec_doublee(x));
|
22 |
+
#endif
|
23 |
+
|
24 |
+
return (int)vec_extract(i, 0);
|
25 |
+
}
|
venv/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vxe2.c
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#if (__VEC__ < 10303) || (__ARCH__ < 13)
|
2 |
+
#error VXE2 not supported
|
3 |
+
#endif
|
4 |
+
|
5 |
+
#include <vecintrin.h>
|
6 |
+
|
7 |
+
int main(int argc, char **argv)
|
8 |
+
{
|
9 |
+
int val;
|
10 |
+
__vector signed short large = { 'a', 'b', 'c', 'a', 'g', 'h', 'g', 'o' };
|
11 |
+
__vector signed short search = { 'g', 'h', 'g', 'o' };
|
12 |
+
__vector unsigned char len = { 0 };
|
13 |
+
__vector unsigned char res = vec_search_string_cc(large, search, len, &val);
|
14 |
+
__vector float x = vec_xl(argc, (float*)argv);
|
15 |
+
__vector int i = vec_signed(x);
|
16 |
+
|
17 |
+
i = vec_srdb(vec_sldb(i, i, 2), i, 3);
|
18 |
+
val += (int)vec_extract(res, 1);
|
19 |
+
val += vec_extract(i, 0);
|
20 |
+
return val;
|
21 |
+
}
|
venv/lib/python3.10/site-packages/numpy/distutils/checks/extra_avx512dq_mask.c
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include <immintrin.h>
|
2 |
+
/**
|
3 |
+
* Test DQ mask operations due to:
|
4 |
+
* - MSVC has supported it since vs2019 see,
|
5 |
+
* https://developercommunity.visualstudio.com/content/problem/518298/missing-avx512bw-mask-intrinsics.html
|
6 |
+
* - Clang >= v8.0
|
7 |
+
* - GCC >= v7.1
|
8 |
+
*/
|
9 |
+
int main(void)
|
10 |
+
{
|
11 |
+
__mmask8 m8 = _mm512_cmpeq_epi64_mask(_mm512_set1_epi64(1), _mm512_set1_epi64(1));
|
12 |
+
m8 = _kor_mask8(m8, m8);
|
13 |
+
m8 = _kxor_mask8(m8, m8);
|
14 |
+
m8 = _cvtu32_mask8(_cvtmask8_u32(m8));
|
15 |
+
return (int)_cvtmask8_u32(m8);
|
16 |
+
}
|
venv/lib/python3.10/site-packages/numpy/distutils/checks/extra_avx512f_reduce.c
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include <immintrin.h>
|
2 |
+
/**
|
3 |
+
* The following intrinsics don't have direct native support but compilers
|
4 |
+
* tend to emulate them.
|
5 |
+
* They're usually supported by gcc >= 7.1, clang >= 4 and icc >= 19
|
6 |
+
*/
|
7 |
+
int main(void)
|
8 |
+
{
|
9 |
+
__m512 one_ps = _mm512_set1_ps(1.0f);
|
10 |
+
__m512d one_pd = _mm512_set1_pd(1.0);
|
11 |
+
__m512i one_i64 = _mm512_set1_epi64(1);
|
12 |
+
// add
|
13 |
+
float sum_ps = _mm512_reduce_add_ps(one_ps);
|
14 |
+
double sum_pd = _mm512_reduce_add_pd(one_pd);
|
15 |
+
int sum_int = (int)_mm512_reduce_add_epi64(one_i64);
|
16 |
+
sum_int += (int)_mm512_reduce_add_epi32(one_i64);
|
17 |
+
// mul
|
18 |
+
sum_ps += _mm512_reduce_mul_ps(one_ps);
|
19 |
+
sum_pd += _mm512_reduce_mul_pd(one_pd);
|
20 |
+
sum_int += (int)_mm512_reduce_mul_epi64(one_i64);
|
21 |
+
sum_int += (int)_mm512_reduce_mul_epi32(one_i64);
|
22 |
+
// min
|
23 |
+
sum_ps += _mm512_reduce_min_ps(one_ps);
|
24 |
+
sum_pd += _mm512_reduce_min_pd(one_pd);
|
25 |
+
sum_int += (int)_mm512_reduce_min_epi32(one_i64);
|
26 |
+
sum_int += (int)_mm512_reduce_min_epu32(one_i64);
|
27 |
+
sum_int += (int)_mm512_reduce_min_epi64(one_i64);
|
28 |
+
// max
|
29 |
+
sum_ps += _mm512_reduce_max_ps(one_ps);
|
30 |
+
sum_pd += _mm512_reduce_max_pd(one_pd);
|
31 |
+
sum_int += (int)_mm512_reduce_max_epi32(one_i64);
|
32 |
+
sum_int += (int)_mm512_reduce_max_epu32(one_i64);
|
33 |
+
sum_int += (int)_mm512_reduce_max_epi64(one_i64);
|
34 |
+
// and
|
35 |
+
sum_int += (int)_mm512_reduce_and_epi32(one_i64);
|
36 |
+
sum_int += (int)_mm512_reduce_and_epi64(one_i64);
|
37 |
+
// or
|
38 |
+
sum_int += (int)_mm512_reduce_or_epi32(one_i64);
|
39 |
+
sum_int += (int)_mm512_reduce_or_epi64(one_i64);
|
40 |
+
return (int)sum_ps + (int)sum_pd + sum_int;
|
41 |
+
}
|
venv/lib/python3.10/site-packages/numpy/distutils/checks/extra_vsx3_half_double.c
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/**
|
2 |
+
* Assembler may not fully support the following VSX3 scalar
|
3 |
+
* instructions, even though compilers report VSX3 support.
|
4 |
+
*/
|
5 |
+
int main(void)
|
6 |
+
{
|
7 |
+
unsigned short bits = 0xFF;
|
8 |
+
double f;
|
9 |
+
__asm__ __volatile__("xscvhpdp %x0,%x1" : "=wa"(f) : "wa"(bits));
|
10 |
+
__asm__ __volatile__ ("xscvdphp %x0,%x1" : "=wa" (bits) : "wa" (f));
|
11 |
+
return bits;
|
12 |
+
}
|
venv/lib/python3.10/site-packages/numpy/distutils/checks/extra_vsx4_mma.c
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#ifndef __VSX__
|
2 |
+
#error "VSX is not supported"
|
3 |
+
#endif
|
4 |
+
#include <altivec.h>
|
5 |
+
|
6 |
+
typedef __vector float fv4sf_t;
|
7 |
+
typedef __vector unsigned char vec_t;
|
8 |
+
|
9 |
+
int main(void)
|
10 |
+
{
|
11 |
+
__vector_quad acc0;
|
12 |
+
float a[4] = {0,1,2,3};
|
13 |
+
float b[4] = {0,1,2,3};
|
14 |
+
vec_t *va = (vec_t *) a;
|
15 |
+
vec_t *vb = (vec_t *) b;
|
16 |
+
__builtin_mma_xvf32ger(&acc0, va[0], vb[0]);
|
17 |
+
fv4sf_t result[4];
|
18 |
+
__builtin_mma_disassemble_acc((void *)result, &acc0);
|
19 |
+
fv4sf_t c0 = result[0];
|
20 |
+
return (int)((float*)&c0)[0];
|
21 |
+
}
|
venv/lib/python3.10/site-packages/numpy/distutils/checks/extra_vsx_asm.c
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
/**
|
2 |
+
* Testing ASM VSX register number fixer '%x<n>'
|
3 |
+
*
|
4 |
+
* old versions of CLANG doesn't support %x<n> in the inline asm template
|
5 |
+
* which fixes register number when using any of the register constraints wa, wd, wf.
|
6 |
+
*
|
7 |
+
* xref:
|
8 |
+
* - https://bugs.llvm.org/show_bug.cgi?id=31837
|
9 |
+
* - https://gcc.gnu.org/onlinedocs/gcc/Machine-Constraints.html
|
10 |
+
*/
|
11 |
+
#ifndef __VSX__
|
12 |
+
#error "VSX is not supported"
|
13 |
+
#endif
|
14 |
+
#include <altivec.h>
|
15 |
+
|
16 |
+
#if (defined(__GNUC__) && !defined(vec_xl)) || (defined(__clang__) && !defined(__IBMC__))
|
17 |
+
#define vsx_ld vec_vsx_ld
|
18 |
+
#define vsx_st vec_vsx_st
|
19 |
+
#else
|
20 |
+
#define vsx_ld vec_xl
|
21 |
+
#define vsx_st vec_xst
|
22 |
+
#endif
|
23 |
+
|
24 |
+
int main(void)
|
25 |
+
{
|
26 |
+
float z4[] = {0, 0, 0, 0};
|
27 |
+
signed int zout[] = {0, 0, 0, 0};
|
28 |
+
|
29 |
+
__vector float vz4 = vsx_ld(0, z4);
|
30 |
+
__vector signed int asm_ret = vsx_ld(0, zout);
|
31 |
+
|
32 |
+
__asm__ ("xvcvspsxws %x0,%x1" : "=wa" (vz4) : "wa" (asm_ret));
|
33 |
+
|
34 |
+
vsx_st(asm_ret, 0, zout);
|
35 |
+
return zout[0];
|
36 |
+
}
|
venv/lib/python3.10/site-packages/numpy/distutils/checks/test_flags.c
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
int test_flags;
|
venv/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/absoft.cpython-310.pyc
ADDED
Binary file (4.42 kB). View file
|
|
venv/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/compaq.cpython-310.pyc
ADDED
Binary file (4.16 kB). View file
|
|
venv/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/environment.cpython-310.pyc
ADDED
Binary file (3.1 kB). View file
|
|
venv/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/fujitsu.cpython-310.pyc
ADDED
Binary file (1.87 kB). View file
|
|
venv/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/ibm.cpython-310.pyc
ADDED
Binary file (3.44 kB). View file
|
|
venv/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/lahey.cpython-310.pyc
ADDED
Binary file (1.85 kB). View file
|
|
venv/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/mips.cpython-310.pyc
ADDED
Binary file (2.2 kB). View file
|
|
venv/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/nv.cpython-310.pyc
ADDED
Binary file (2.24 kB). View file
|
|
venv/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/pathf95.cpython-310.pyc
ADDED
Binary file (1.4 kB). View file
|
|
venv/lib/python3.10/site-packages/numpy/distutils/fcompiler/__pycache__/pg.cpython-310.pyc
ADDED
Binary file (4.21 kB). View file
|
|
venv/lib/python3.10/site-packages/numpy/distutils/tests/__init__.py
ADDED
File without changes
|
venv/lib/python3.10/site-packages/numpy/distutils/tests/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (189 Bytes). View file
|
|