diff --git a/ckpts/universal/global_step40/zero/12.attention.query_key_value.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/12.attention.query_key_value.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..5cd30ce679f8b83a2ef3ecdc025585769fc1029a --- /dev/null +++ b/ckpts/universal/global_step40/zero/12.attention.query_key_value.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58de09e8fbaf0f4d960197f4a498a7398ad067b07f332edfe717718ef01530a2 +size 50332828 diff --git a/ckpts/universal/global_step40/zero/12.attention.query_key_value.weight/fp32.pt b/ckpts/universal/global_step40/zero/12.attention.query_key_value.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..a359d26a495530f60b800cbc14c79ac108825f78 --- /dev/null +++ b/ckpts/universal/global_step40/zero/12.attention.query_key_value.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a682f878ac4c5cfcf03a221f68168cd1c0f8c9dda07fcc28fba91e00a1329630 +size 50332749 diff --git a/ckpts/universal/global_step40/zero/5.attention.query_key_value.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/5.attention.query_key_value.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..1e7eaa947df322519379e11e37b9d577b7894309 --- /dev/null +++ b/ckpts/universal/global_step40/zero/5.attention.query_key_value.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4a982f870c12d4d6ad3c888be7a38add8529f91b462285165e3bfe72fabe2d4 +size 50332843 diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/_basic.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/_basic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22f0cb4c76d48b05cbd35c75baf8daa9d46f2f2f Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/_basic.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/_decomp_cossin.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/_decomp_cossin.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..013b4ad06f6478e5bdc5341012eea6d72c4727e2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/_decomp_cossin.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/_decomp_lu.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/_decomp_lu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af46e21d19508cf4843f32b7ee4c69e8c94f8979 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/_decomp_lu.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/_decomp_qr.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/_decomp_qr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e03f11cf5a72ff4b6014d13409d016ff37077915 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/_decomp_qr.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/_decomp_qz.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/_decomp_qz.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cbcb91e1e60360975cee6b3ca701ea5dc8430368 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/_decomp_qz.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/_interpolative_backend.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/_interpolative_backend.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba240d01da7cd5e663bc22894f85aa78eb7e4eba Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/_interpolative_backend.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/_matfuncs.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/_matfuncs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61fbd418f6fdc9a8d81cc14f5dd27edd7dc1d90f Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/_matfuncs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/_matfuncs_inv_ssq.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/_matfuncs_inv_ssq.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c3644c30f580c12c30de91b44d14ac83cf36f43 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/_matfuncs_inv_ssq.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/_matfuncs_sqrtm.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/_matfuncs_sqrtm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a200b5c2f40b4400f13f08a24b86380bf0f88adf Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/_matfuncs_sqrtm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/_misc.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/_misc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65431efb8502425f31c071f35dd86f0b32765ae9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/_misc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/_sketches.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/_sketches.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d93ab4f008a402e98c2f1ecea4291642e7ef9e61 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/_sketches.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/_testutils.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/_testutils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..08c834b5465142e29b5adf766467e6dc390f6d32 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/_testutils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/basic.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/basic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb6321b1696fcbdb9b52fafea0ee1a8989e1027d Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/basic.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/blas.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/blas.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a3851787c5afaaa1d98794b07320ef8d1ec746f Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/blas.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/decomp_lu.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/decomp_lu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8ef8a392a09a1c8d85e5400b482eddf68565c18 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/decomp_lu.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/decomp_qr.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/decomp_qr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83629c15ae7b55ccf62a4bec0a0f5a36f53933d1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/decomp_qr.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/decomp_schur.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/decomp_schur.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..367252ab8374a4bdefa4bf6cdbdc0f14f5e00048 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/decomp_schur.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/decomp_svd.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/decomp_svd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8e5040815c19a5b0f1b11e429e250eae618c781 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/decomp_svd.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/interpolative.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/interpolative.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5d34ec8516794e002b0bcc5b1dea9845955e2132 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/interpolative.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/matfuncs.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/matfuncs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..afdff2f269ce0f9b5e1951cc72265459c38c2f9f Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/__pycache__/matfuncs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/__init__.py b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9eeb02dcb7c8178822f67b98245f36417ea45c70 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_basic.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_basic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b9f4c8ea7f50992a05b4dd6aaa63f2d3ec9a0f6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_basic.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_blas.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_blas.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..497e559e576dfb4634366c1f233d5bd4d8ba3a3f Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_blas.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_cython_blas.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_cython_blas.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89a00dab5ee86f0f005aa6ab930071ca939e65df Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_cython_blas.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_cython_lapack.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_cython_lapack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d96ec92cdb16f579b656b5177dc33aca830caec6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_cython_lapack.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_cythonized_array_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_cythonized_array_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84425f83d17eab18722f1691c2f7f1984e96b37c Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_cythonized_array_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_decomp.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_decomp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b93e1ef75bf5a0a6b38ffde9f3554d6535ca52c Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_decomp.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_decomp_cholesky.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_decomp_cholesky.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3f3792618614d9ba2749c2fd4b254162c65c809 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_decomp_cholesky.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_decomp_cossin.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_decomp_cossin.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b4a2f1d3d3722543ae04e2f37b6900c66b2948a Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_decomp_cossin.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_decomp_ldl.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_decomp_ldl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..afa641284924073ab7cb6671fabb353ca5a8a3a7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_decomp_ldl.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_decomp_lu.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_decomp_lu.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6bbf83fd953f8f7dd77c83a6b000fb3a7dac386 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_decomp_lu.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_decomp_polar.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_decomp_polar.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a81ac7a7d52ef9f53597cd3db4a064c42327708f Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_decomp_polar.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_decomp_update.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_decomp_update.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..894dc30df95bc8a6a3d2d3ee8a2f3f6b3c685c92 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_decomp_update.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_fblas.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_fblas.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..214c8ba5e27f6d3c08836fe57f9f94d98ea77ff2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_fblas.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_interpolative.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_interpolative.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f16e7e08ba92e5f11d1608381b0d80808755644b Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_interpolative.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_lapack.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_lapack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a42e2a7b308108b3bdf52017ccf0c37adcad33ca Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_lapack.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_matfuncs.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_matfuncs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d357dc55ff1b9033e7e1809071ac229b510c509c Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_matfuncs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_matmul_toeplitz.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_matmul_toeplitz.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c5d11cd30c8eda9082f7f89d43bc6508b51d0a9e Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_matmul_toeplitz.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_misc.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_misc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3357193b3c1d0d68d86e3cb166e0f70acb73874c Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_misc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_procrustes.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_procrustes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5161a6bdbedce4ef9901e13dc2fe80b0c03ae692 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_procrustes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_sketches.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_sketches.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73bbb1ac3e9bc5b1784e54c94fb3c3cd04f74a63 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_sketches.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_solve_toeplitz.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_solve_toeplitz.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95454c23ae667df3275e47e253bcf86800c853a7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_solve_toeplitz.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_solvers.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_solvers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba930daefe94d3a13a81263db70e9211accbc3fb Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_solvers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_special_matrices.cpython-310.pyc b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_special_matrices.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3cdfeb61f4a60cd23a2a1dcb4f1737dcae46f1a3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/scipy/linalg/tests/__pycache__/test_special_matrices.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_basic.py b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_basic.py new file mode 100644 index 0000000000000000000000000000000000000000..3dd316ff82dfa58ef120e1560275db8f92fe887c --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_basic.py @@ -0,0 +1,1817 @@ +import itertools +import warnings + +import numpy as np +from numpy import (arange, array, dot, zeros, identity, conjugate, transpose, + float32) +from numpy.random import random + +from numpy.testing import (assert_equal, assert_almost_equal, assert_, + assert_array_almost_equal, assert_allclose, + assert_array_equal, suppress_warnings) +import pytest +from pytest import raises as assert_raises + +from scipy.linalg import (solve, inv, det, lstsq, pinv, pinvh, norm, + solve_banded, solveh_banded, solve_triangular, + solve_circulant, circulant, LinAlgError, block_diag, + matrix_balance, qr, LinAlgWarning) + +from scipy.linalg._testutils import assert_no_overwrite +from scipy._lib._testutils import check_free_memory, IS_MUSL +from scipy.linalg.blas import HAS_ILP64 +from scipy._lib.deprecation import _NoValue + +REAL_DTYPES = (np.float32, np.float64, np.longdouble) +COMPLEX_DTYPES = (np.complex64, np.complex128, np.clongdouble) +DTYPES = REAL_DTYPES + COMPLEX_DTYPES + + +def _eps_cast(dtyp): + """Get the epsilon for dtype, possibly downcast to BLAS types.""" + dt = dtyp + if dt == np.longdouble: + dt = np.float64 + elif dt == np.clongdouble: + dt = np.complex128 + return np.finfo(dt).eps + + +class TestSolveBanded: + + def test_real(self): + a = array([[1.0, 20, 0, 0], + [-30, 4, 6, 0], + [2, 1, 20, 2], + [0, -1, 7, 14]]) + ab = array([[0.0, 20, 6, 2], + [1, 4, 20, 14], + [-30, 1, 7, 0], + [2, -1, 0, 0]]) + l, u = 2, 1 + b4 = array([10.0, 0.0, 2.0, 14.0]) + b4by1 = b4.reshape(-1, 1) + b4by2 = array([[2, 1], + [-30, 4], + [2, 3], + [1, 3]]) + b4by4 = array([[1, 0, 0, 0], + [0, 0, 0, 1], + [0, 1, 0, 0], + [0, 1, 0, 0]]) + for b in [b4, b4by1, b4by2, b4by4]: + x = solve_banded((l, u), ab, b) + assert_array_almost_equal(dot(a, x), b) + + def test_complex(self): + a = array([[1.0, 20, 0, 0], + [-30, 4, 6, 0], + [2j, 1, 20, 2j], + [0, -1, 7, 14]]) + ab = array([[0.0, 20, 6, 2j], + [1, 4, 20, 14], + [-30, 1, 7, 0], + [2j, -1, 0, 0]]) + l, u = 2, 1 + b4 = array([10.0, 0.0, 2.0, 14.0j]) + b4by1 = b4.reshape(-1, 1) + b4by2 = array([[2, 1], + [-30, 4], + [2, 3], + [1, 3]]) + b4by4 = array([[1, 0, 0, 0], + [0, 0, 0, 1j], + [0, 1, 0, 0], + [0, 1, 0, 0]]) + for b in [b4, b4by1, b4by2, b4by4]: + x = solve_banded((l, u), ab, b) + assert_array_almost_equal(dot(a, x), b) + + def test_tridiag_real(self): + ab = array([[0.0, 20, 6, 2], + [1, 4, 20, 14], + [-30, 1, 7, 0]]) + a = np.diag(ab[0, 1:], 1) + np.diag(ab[1, :], 0) + np.diag( + ab[2, :-1], -1) + b4 = array([10.0, 0.0, 2.0, 14.0]) + b4by1 = b4.reshape(-1, 1) + b4by2 = array([[2, 1], + [-30, 4], + [2, 3], + [1, 3]]) + b4by4 = array([[1, 0, 0, 0], + [0, 0, 0, 1], + [0, 1, 0, 0], + [0, 1, 0, 0]]) + for b in [b4, b4by1, b4by2, b4by4]: + x = solve_banded((1, 1), ab, b) + assert_array_almost_equal(dot(a, x), b) + + def test_tridiag_complex(self): + ab = array([[0.0, 20, 6, 2j], + [1, 4, 20, 14], + [-30, 1, 7, 0]]) + a = np.diag(ab[0, 1:], 1) + np.diag(ab[1, :], 0) + np.diag( + ab[2, :-1], -1) + b4 = array([10.0, 0.0, 2.0, 14.0j]) + b4by1 = b4.reshape(-1, 1) + b4by2 = array([[2, 1], + [-30, 4], + [2, 3], + [1, 3]]) + b4by4 = array([[1, 0, 0, 0], + [0, 0, 0, 1], + [0, 1, 0, 0], + [0, 1, 0, 0]]) + for b in [b4, b4by1, b4by2, b4by4]: + x = solve_banded((1, 1), ab, b) + assert_array_almost_equal(dot(a, x), b) + + def test_check_finite(self): + a = array([[1.0, 20, 0, 0], + [-30, 4, 6, 0], + [2, 1, 20, 2], + [0, -1, 7, 14]]) + ab = array([[0.0, 20, 6, 2], + [1, 4, 20, 14], + [-30, 1, 7, 0], + [2, -1, 0, 0]]) + l, u = 2, 1 + b4 = array([10.0, 0.0, 2.0, 14.0]) + x = solve_banded((l, u), ab, b4, check_finite=False) + assert_array_almost_equal(dot(a, x), b4) + + def test_bad_shape(self): + ab = array([[0.0, 20, 6, 2], + [1, 4, 20, 14], + [-30, 1, 7, 0], + [2, -1, 0, 0]]) + l, u = 2, 1 + bad = array([1.0, 2.0, 3.0, 4.0]).reshape(-1, 4) + assert_raises(ValueError, solve_banded, (l, u), ab, bad) + assert_raises(ValueError, solve_banded, (l, u), ab, [1.0, 2.0]) + + # Values of (l,u) are not compatible with ab. + assert_raises(ValueError, solve_banded, (1, 1), ab, [1.0, 2.0]) + + def test_1x1(self): + b = array([[1., 2., 3.]]) + x = solve_banded((1, 1), [[0], [2], [0]], b) + assert_array_equal(x, [[0.5, 1.0, 1.5]]) + assert_equal(x.dtype, np.dtype('f8')) + assert_array_equal(b, [[1.0, 2.0, 3.0]]) + + def test_native_list_arguments(self): + a = [[1.0, 20, 0, 0], + [-30, 4, 6, 0], + [2, 1, 20, 2], + [0, -1, 7, 14]] + ab = [[0.0, 20, 6, 2], + [1, 4, 20, 14], + [-30, 1, 7, 0], + [2, -1, 0, 0]] + l, u = 2, 1 + b = [10.0, 0.0, 2.0, 14.0] + x = solve_banded((l, u), ab, b) + assert_array_almost_equal(dot(a, x), b) + + +class TestSolveHBanded: + + def test_01_upper(self): + # Solve + # [ 4 1 2 0] [1] + # [ 1 4 1 2] X = [4] + # [ 2 1 4 1] [1] + # [ 0 2 1 4] [2] + # with the RHS as a 1D array. + ab = array([[0.0, 0.0, 2.0, 2.0], + [-99, 1.0, 1.0, 1.0], + [4.0, 4.0, 4.0, 4.0]]) + b = array([1.0, 4.0, 1.0, 2.0]) + x = solveh_banded(ab, b) + assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0]) + + def test_02_upper(self): + # Solve + # [ 4 1 2 0] [1 6] + # [ 1 4 1 2] X = [4 2] + # [ 2 1 4 1] [1 6] + # [ 0 2 1 4] [2 1] + # + ab = array([[0.0, 0.0, 2.0, 2.0], + [-99, 1.0, 1.0, 1.0], + [4.0, 4.0, 4.0, 4.0]]) + b = array([[1.0, 6.0], + [4.0, 2.0], + [1.0, 6.0], + [2.0, 1.0]]) + x = solveh_banded(ab, b) + expected = array([[0.0, 1.0], + [1.0, 0.0], + [0.0, 1.0], + [0.0, 0.0]]) + assert_array_almost_equal(x, expected) + + def test_03_upper(self): + # Solve + # [ 4 1 2 0] [1] + # [ 1 4 1 2] X = [4] + # [ 2 1 4 1] [1] + # [ 0 2 1 4] [2] + # with the RHS as a 2D array with shape (3,1). + ab = array([[0.0, 0.0, 2.0, 2.0], + [-99, 1.0, 1.0, 1.0], + [4.0, 4.0, 4.0, 4.0]]) + b = array([1.0, 4.0, 1.0, 2.0]).reshape(-1, 1) + x = solveh_banded(ab, b) + assert_array_almost_equal(x, array([0., 1., 0., 0.]).reshape(-1, 1)) + + def test_01_lower(self): + # Solve + # [ 4 1 2 0] [1] + # [ 1 4 1 2] X = [4] + # [ 2 1 4 1] [1] + # [ 0 2 1 4] [2] + # + ab = array([[4.0, 4.0, 4.0, 4.0], + [1.0, 1.0, 1.0, -99], + [2.0, 2.0, 0.0, 0.0]]) + b = array([1.0, 4.0, 1.0, 2.0]) + x = solveh_banded(ab, b, lower=True) + assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0]) + + def test_02_lower(self): + # Solve + # [ 4 1 2 0] [1 6] + # [ 1 4 1 2] X = [4 2] + # [ 2 1 4 1] [1 6] + # [ 0 2 1 4] [2 1] + # + ab = array([[4.0, 4.0, 4.0, 4.0], + [1.0, 1.0, 1.0, -99], + [2.0, 2.0, 0.0, 0.0]]) + b = array([[1.0, 6.0], + [4.0, 2.0], + [1.0, 6.0], + [2.0, 1.0]]) + x = solveh_banded(ab, b, lower=True) + expected = array([[0.0, 1.0], + [1.0, 0.0], + [0.0, 1.0], + [0.0, 0.0]]) + assert_array_almost_equal(x, expected) + + def test_01_float32(self): + # Solve + # [ 4 1 2 0] [1] + # [ 1 4 1 2] X = [4] + # [ 2 1 4 1] [1] + # [ 0 2 1 4] [2] + # + ab = array([[0.0, 0.0, 2.0, 2.0], + [-99, 1.0, 1.0, 1.0], + [4.0, 4.0, 4.0, 4.0]], dtype=float32) + b = array([1.0, 4.0, 1.0, 2.0], dtype=float32) + x = solveh_banded(ab, b) + assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0]) + + def test_02_float32(self): + # Solve + # [ 4 1 2 0] [1 6] + # [ 1 4 1 2] X = [4 2] + # [ 2 1 4 1] [1 6] + # [ 0 2 1 4] [2 1] + # + ab = array([[0.0, 0.0, 2.0, 2.0], + [-99, 1.0, 1.0, 1.0], + [4.0, 4.0, 4.0, 4.0]], dtype=float32) + b = array([[1.0, 6.0], + [4.0, 2.0], + [1.0, 6.0], + [2.0, 1.0]], dtype=float32) + x = solveh_banded(ab, b) + expected = array([[0.0, 1.0], + [1.0, 0.0], + [0.0, 1.0], + [0.0, 0.0]]) + assert_array_almost_equal(x, expected) + + def test_01_complex(self): + # Solve + # [ 4 -j 2 0] [2-j] + # [ j 4 -j 2] X = [4-j] + # [ 2 j 4 -j] [4+j] + # [ 0 2 j 4] [2+j] + # + ab = array([[0.0, 0.0, 2.0, 2.0], + [-99, -1.0j, -1.0j, -1.0j], + [4.0, 4.0, 4.0, 4.0]]) + b = array([2-1.0j, 4.0-1j, 4+1j, 2+1j]) + x = solveh_banded(ab, b) + assert_array_almost_equal(x, [0.0, 1.0, 1.0, 0.0]) + + def test_02_complex(self): + # Solve + # [ 4 -j 2 0] [2-j 2+4j] + # [ j 4 -j 2] X = [4-j -1-j] + # [ 2 j 4 -j] [4+j 4+2j] + # [ 0 2 j 4] [2+j j] + # + ab = array([[0.0, 0.0, 2.0, 2.0], + [-99, -1.0j, -1.0j, -1.0j], + [4.0, 4.0, 4.0, 4.0]]) + b = array([[2-1j, 2+4j], + [4.0-1j, -1-1j], + [4.0+1j, 4+2j], + [2+1j, 1j]]) + x = solveh_banded(ab, b) + expected = array([[0.0, 1.0j], + [1.0, 0.0], + [1.0, 1.0], + [0.0, 0.0]]) + assert_array_almost_equal(x, expected) + + def test_tridiag_01_upper(self): + # Solve + # [ 4 1 0] [1] + # [ 1 4 1] X = [4] + # [ 0 1 4] [1] + # with the RHS as a 1D array. + ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]]) + b = array([1.0, 4.0, 1.0]) + x = solveh_banded(ab, b) + assert_array_almost_equal(x, [0.0, 1.0, 0.0]) + + def test_tridiag_02_upper(self): + # Solve + # [ 4 1 0] [1 4] + # [ 1 4 1] X = [4 2] + # [ 0 1 4] [1 4] + # + ab = array([[-99, 1.0, 1.0], + [4.0, 4.0, 4.0]]) + b = array([[1.0, 4.0], + [4.0, 2.0], + [1.0, 4.0]]) + x = solveh_banded(ab, b) + expected = array([[0.0, 1.0], + [1.0, 0.0], + [0.0, 1.0]]) + assert_array_almost_equal(x, expected) + + def test_tridiag_03_upper(self): + # Solve + # [ 4 1 0] [1] + # [ 1 4 1] X = [4] + # [ 0 1 4] [1] + # with the RHS as a 2D array with shape (3,1). + ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]]) + b = array([1.0, 4.0, 1.0]).reshape(-1, 1) + x = solveh_banded(ab, b) + assert_array_almost_equal(x, array([0.0, 1.0, 0.0]).reshape(-1, 1)) + + def test_tridiag_01_lower(self): + # Solve + # [ 4 1 0] [1] + # [ 1 4 1] X = [4] + # [ 0 1 4] [1] + # + ab = array([[4.0, 4.0, 4.0], + [1.0, 1.0, -99]]) + b = array([1.0, 4.0, 1.0]) + x = solveh_banded(ab, b, lower=True) + assert_array_almost_equal(x, [0.0, 1.0, 0.0]) + + def test_tridiag_02_lower(self): + # Solve + # [ 4 1 0] [1 4] + # [ 1 4 1] X = [4 2] + # [ 0 1 4] [1 4] + # + ab = array([[4.0, 4.0, 4.0], + [1.0, 1.0, -99]]) + b = array([[1.0, 4.0], + [4.0, 2.0], + [1.0, 4.0]]) + x = solveh_banded(ab, b, lower=True) + expected = array([[0.0, 1.0], + [1.0, 0.0], + [0.0, 1.0]]) + assert_array_almost_equal(x, expected) + + def test_tridiag_01_float32(self): + # Solve + # [ 4 1 0] [1] + # [ 1 4 1] X = [4] + # [ 0 1 4] [1] + # + ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]], dtype=float32) + b = array([1.0, 4.0, 1.0], dtype=float32) + x = solveh_banded(ab, b) + assert_array_almost_equal(x, [0.0, 1.0, 0.0]) + + def test_tridiag_02_float32(self): + # Solve + # [ 4 1 0] [1 4] + # [ 1 4 1] X = [4 2] + # [ 0 1 4] [1 4] + # + ab = array([[-99, 1.0, 1.0], + [4.0, 4.0, 4.0]], dtype=float32) + b = array([[1.0, 4.0], + [4.0, 2.0], + [1.0, 4.0]], dtype=float32) + x = solveh_banded(ab, b) + expected = array([[0.0, 1.0], + [1.0, 0.0], + [0.0, 1.0]]) + assert_array_almost_equal(x, expected) + + def test_tridiag_01_complex(self): + # Solve + # [ 4 -j 0] [ -j] + # [ j 4 -j] X = [4-j] + # [ 0 j 4] [4+j] + # + ab = array([[-99, -1.0j, -1.0j], [4.0, 4.0, 4.0]]) + b = array([-1.0j, 4.0-1j, 4+1j]) + x = solveh_banded(ab, b) + assert_array_almost_equal(x, [0.0, 1.0, 1.0]) + + def test_tridiag_02_complex(self): + # Solve + # [ 4 -j 0] [ -j 4j] + # [ j 4 -j] X = [4-j -1-j] + # [ 0 j 4] [4+j 4 ] + # + ab = array([[-99, -1.0j, -1.0j], + [4.0, 4.0, 4.0]]) + b = array([[-1j, 4.0j], + [4.0-1j, -1.0-1j], + [4.0+1j, 4.0]]) + x = solveh_banded(ab, b) + expected = array([[0.0, 1.0j], + [1.0, 0.0], + [1.0, 1.0]]) + assert_array_almost_equal(x, expected) + + def test_check_finite(self): + # Solve + # [ 4 1 0] [1] + # [ 1 4 1] X = [4] + # [ 0 1 4] [1] + # with the RHS as a 1D array. + ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]]) + b = array([1.0, 4.0, 1.0]) + x = solveh_banded(ab, b, check_finite=False) + assert_array_almost_equal(x, [0.0, 1.0, 0.0]) + + def test_bad_shapes(self): + ab = array([[-99, 1.0, 1.0], + [4.0, 4.0, 4.0]]) + b = array([[1.0, 4.0], + [4.0, 2.0]]) + assert_raises(ValueError, solveh_banded, ab, b) + assert_raises(ValueError, solveh_banded, ab, [1.0, 2.0]) + assert_raises(ValueError, solveh_banded, ab, [1.0]) + + def test_1x1(self): + x = solveh_banded([[1]], [[1, 2, 3]]) + assert_array_equal(x, [[1.0, 2.0, 3.0]]) + assert_equal(x.dtype, np.dtype('f8')) + + def test_native_list_arguments(self): + # Same as test_01_upper, using python's native list. + ab = [[0.0, 0.0, 2.0, 2.0], + [-99, 1.0, 1.0, 1.0], + [4.0, 4.0, 4.0, 4.0]] + b = [1.0, 4.0, 1.0, 2.0] + x = solveh_banded(ab, b) + assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0]) + + +class TestSolve: + def setup_method(self): + np.random.seed(1234) + + def test_20Feb04_bug(self): + a = [[1, 1], [1.0, 0]] # ok + x0 = solve(a, [1, 0j]) + assert_array_almost_equal(dot(a, x0), [1, 0]) + + # gives failure with clapack.zgesv(..,rowmajor=0) + a = [[1, 1], [1.2, 0]] + b = [1, 0j] + x0 = solve(a, b) + assert_array_almost_equal(dot(a, x0), [1, 0]) + + def test_simple(self): + a = [[1, 20], [-30, 4]] + for b in ([[1, 0], [0, 1]], + [1, 0], + [[2, 1], [-30, 4]] + ): + x = solve(a, b) + assert_array_almost_equal(dot(a, x), b) + + def test_simple_complex(self): + a = array([[5, 2], [2j, 4]], 'D') + for b in ([1j, 0], + [[1j, 1j], [0, 2]], + [1, 0j], + array([1, 0], 'D'), + ): + x = solve(a, b) + assert_array_almost_equal(dot(a, x), b) + + def test_simple_pos(self): + a = [[2, 3], [3, 5]] + for lower in [0, 1]: + for b in ([[1, 0], [0, 1]], + [1, 0] + ): + x = solve(a, b, assume_a='pos', lower=lower) + assert_array_almost_equal(dot(a, x), b) + + def test_simple_pos_complexb(self): + a = [[5, 2], [2, 4]] + for b in ([1j, 0], + [[1j, 1j], [0, 2]], + ): + x = solve(a, b, assume_a='pos') + assert_array_almost_equal(dot(a, x), b) + + def test_simple_sym(self): + a = [[2, 3], [3, -5]] + for lower in [0, 1]: + for b in ([[1, 0], [0, 1]], + [1, 0] + ): + x = solve(a, b, assume_a='sym', lower=lower) + assert_array_almost_equal(dot(a, x), b) + + def test_simple_sym_complexb(self): + a = [[5, 2], [2, -4]] + for b in ([1j, 0], + [[1j, 1j], [0, 2]] + ): + x = solve(a, b, assume_a='sym') + assert_array_almost_equal(dot(a, x), b) + + def test_simple_sym_complex(self): + a = [[5, 2+1j], [2+1j, -4]] + for b in ([1j, 0], + [1, 0], + [[1j, 1j], [0, 2]] + ): + x = solve(a, b, assume_a='sym') + assert_array_almost_equal(dot(a, x), b) + + def test_simple_her_actuallysym(self): + a = [[2, 3], [3, -5]] + for lower in [0, 1]: + for b in ([[1, 0], [0, 1]], + [1, 0], + [1j, 0], + ): + x = solve(a, b, assume_a='her', lower=lower) + assert_array_almost_equal(dot(a, x), b) + + def test_simple_her(self): + a = [[5, 2+1j], [2-1j, -4]] + for b in ([1j, 0], + [1, 0], + [[1j, 1j], [0, 2]] + ): + x = solve(a, b, assume_a='her') + assert_array_almost_equal(dot(a, x), b) + + def test_nils_20Feb04(self): + n = 2 + A = random([n, n])+random([n, n])*1j + X = zeros((n, n), 'D') + Ainv = inv(A) + R = identity(n)+identity(n)*0j + for i in arange(0, n): + r = R[:, i] + X[:, i] = solve(A, r) + assert_array_almost_equal(X, Ainv) + + def test_random(self): + + n = 20 + a = random([n, n]) + for i in range(n): + a[i, i] = 20*(.1+a[i, i]) + for i in range(4): + b = random([n, 3]) + x = solve(a, b) + assert_array_almost_equal(dot(a, x), b) + + def test_random_complex(self): + n = 20 + a = random([n, n]) + 1j * random([n, n]) + for i in range(n): + a[i, i] = 20*(.1+a[i, i]) + for i in range(2): + b = random([n, 3]) + x = solve(a, b) + assert_array_almost_equal(dot(a, x), b) + + def test_random_sym(self): + n = 20 + a = random([n, n]) + for i in range(n): + a[i, i] = abs(20*(.1+a[i, i])) + for j in range(i): + a[i, j] = a[j, i] + for i in range(4): + b = random([n]) + x = solve(a, b, assume_a="pos") + assert_array_almost_equal(dot(a, x), b) + + def test_random_sym_complex(self): + n = 20 + a = random([n, n]) + a = a + 1j*random([n, n]) + for i in range(n): + a[i, i] = abs(20*(.1+a[i, i])) + for j in range(i): + a[i, j] = conjugate(a[j, i]) + b = random([n])+2j*random([n]) + for i in range(2): + x = solve(a, b, assume_a="pos") + assert_array_almost_equal(dot(a, x), b) + + def test_check_finite(self): + a = [[1, 20], [-30, 4]] + for b in ([[1, 0], [0, 1]], [1, 0], + [[2, 1], [-30, 4]]): + x = solve(a, b, check_finite=False) + assert_array_almost_equal(dot(a, x), b) + + def test_scalar_a_and_1D_b(self): + a = 1 + b = [1, 2, 3] + x = solve(a, b) + assert_array_almost_equal(x.ravel(), b) + assert_(x.shape == (3,), 'Scalar_a_1D_b test returned wrong shape') + + def test_simple2(self): + a = np.array([[1.80, 2.88, 2.05, -0.89], + [525.00, -295.00, -95.00, -380.00], + [1.58, -2.69, -2.90, -1.04], + [-1.11, -0.66, -0.59, 0.80]]) + + b = np.array([[9.52, 18.47], + [2435.00, 225.00], + [0.77, -13.28], + [-6.22, -6.21]]) + + x = solve(a, b) + assert_array_almost_equal(x, np.array([[1., -1, 3, -5], + [3, 2, 4, 1]]).T) + + def test_simple_complex2(self): + a = np.array([[-1.34+2.55j, 0.28+3.17j, -6.39-2.20j, 0.72-0.92j], + [-1.70-14.10j, 33.10-1.50j, -1.50+13.40j, 12.90+13.80j], + [-3.29-2.39j, -1.91+4.42j, -0.14-1.35j, 1.72+1.35j], + [2.41+0.39j, -0.56+1.47j, -0.83-0.69j, -1.96+0.67j]]) + + b = np.array([[26.26+51.78j, 31.32-6.70j], + [64.30-86.80j, 158.60-14.20j], + [-5.75+25.31j, -2.15+30.19j], + [1.16+2.57j, -2.56+7.55j]]) + + x = solve(a, b) + assert_array_almost_equal(x, np. array([[1+1.j, -1-2.j], + [2-3.j, 5+1.j], + [-4-5.j, -3+4.j], + [6.j, 2-3.j]])) + + def test_hermitian(self): + # An upper triangular matrix will be used for hermitian matrix a + a = np.array([[-1.84, 0.11-0.11j, -1.78-1.18j, 3.91-1.50j], + [0, -4.63, -1.84+0.03j, 2.21+0.21j], + [0, 0, -8.87, 1.58-0.90j], + [0, 0, 0, -1.36]]) + b = np.array([[2.98-10.18j, 28.68-39.89j], + [-9.58+3.88j, -24.79-8.40j], + [-0.77-16.05j, 4.23-70.02j], + [7.79+5.48j, -35.39+18.01j]]) + res = np.array([[2.+1j, -8+6j], + [3.-2j, 7-2j], + [-1+2j, -1+5j], + [1.-1j, 3-4j]]) + x = solve(a, b, assume_a='her') + assert_array_almost_equal(x, res) + # Also conjugate a and test for lower triangular data + x = solve(a.conj().T, b, assume_a='her', lower=True) + assert_array_almost_equal(x, res) + + def test_pos_and_sym(self): + A = np.arange(1, 10).reshape(3, 3) + x = solve(np.tril(A)/9, np.ones(3), assume_a='pos') + assert_array_almost_equal(x, [9., 1.8, 1.]) + x = solve(np.tril(A)/9, np.ones(3), assume_a='sym') + assert_array_almost_equal(x, [9., 1.8, 1.]) + + def test_singularity(self): + a = np.array([[1, 0, 0, 0, 0, 0, 1, 0, 1], + [1, 1, 1, 0, 0, 0, 1, 0, 1], + [0, 1, 1, 0, 0, 0, 1, 0, 1], + [1, 0, 1, 1, 1, 1, 0, 0, 0], + [1, 0, 1, 1, 1, 1, 0, 0, 0], + [1, 0, 1, 1, 1, 1, 0, 0, 0], + [1, 0, 1, 1, 1, 1, 0, 0, 0], + [1, 1, 1, 1, 1, 1, 1, 1, 1], + [1, 1, 1, 1, 1, 1, 1, 1, 1]]) + b = np.arange(9)[:, None] + assert_raises(LinAlgError, solve, a, b) + + def test_ill_condition_warning(self): + a = np.array([[1, 1], [1+1e-16, 1-1e-16]]) + b = np.ones(2) + with warnings.catch_warnings(): + warnings.simplefilter('error') + assert_raises(LinAlgWarning, solve, a, b) + + def test_empty_rhs(self): + a = np.eye(2) + b = [[], []] + x = solve(a, b) + assert_(x.size == 0, 'Returned array is not empty') + assert_(x.shape == (2, 0), 'Returned empty array shape is wrong') + + def test_multiple_rhs(self): + a = np.eye(2) + b = np.random.rand(2, 3, 4) + x = solve(a, b) + assert_array_almost_equal(x, b) + + def test_transposed_keyword(self): + A = np.arange(9).reshape(3, 3) + 1 + x = solve(np.tril(A)/9, np.ones(3), transposed=True) + assert_array_almost_equal(x, [1.2, 0.2, 1]) + x = solve(np.tril(A)/9, np.ones(3), transposed=False) + assert_array_almost_equal(x, [9, -5.4, -1.2]) + + def test_transposed_notimplemented(self): + a = np.eye(3).astype(complex) + with assert_raises(NotImplementedError): + solve(a, a, transposed=True) + + def test_nonsquare_a(self): + assert_raises(ValueError, solve, [1, 2], 1) + + def test_size_mismatch_with_1D_b(self): + assert_array_almost_equal(solve(np.eye(3), np.ones(3)), np.ones(3)) + assert_raises(ValueError, solve, np.eye(3), np.ones(4)) + + def test_assume_a_keyword(self): + assert_raises(ValueError, solve, 1, 1, assume_a='zxcv') + + @pytest.mark.skip(reason="Failure on OS X (gh-7500), " + "crash on Windows (gh-8064)") + def test_all_type_size_routine_combinations(self): + sizes = [10, 100] + assume_as = ['gen', 'sym', 'pos', 'her'] + dtypes = [np.float32, np.float64, np.complex64, np.complex128] + for size, assume_a, dtype in itertools.product(sizes, assume_as, + dtypes): + is_complex = dtype in (np.complex64, np.complex128) + if assume_a == 'her' and not is_complex: + continue + + err_msg = (f"Failed for size: {size}, assume_a: {assume_a}," + f"dtype: {dtype}") + + a = np.random.randn(size, size).astype(dtype) + b = np.random.randn(size).astype(dtype) + if is_complex: + a = a + (1j*np.random.randn(size, size)).astype(dtype) + + if assume_a == 'sym': # Can still be complex but only symmetric + a = a + a.T + elif assume_a == 'her': # Handle hermitian matrices here instead + a = a + a.T.conj() + elif assume_a == 'pos': + a = a.conj().T.dot(a) + 0.1*np.eye(size) + + tol = 1e-12 if dtype in (np.float64, np.complex128) else 1e-6 + + if assume_a in ['gen', 'sym', 'her']: + # We revert the tolerance from before + # 4b4a6e7c34fa4060533db38f9a819b98fa81476c + if dtype in (np.float32, np.complex64): + tol *= 10 + + x = solve(a, b, assume_a=assume_a) + assert_allclose(a.dot(x), b, + atol=tol * size, + rtol=tol * size, + err_msg=err_msg) + + if assume_a == 'sym' and dtype not in (np.complex64, + np.complex128): + x = solve(a, b, assume_a=assume_a, transposed=True) + assert_allclose(a.dot(x), b, + atol=tol * size, + rtol=tol * size, + err_msg=err_msg) + + +class TestSolveTriangular: + + def test_simple(self): + """ + solve_triangular on a simple 2x2 matrix. + """ + A = array([[1, 0], [1, 2]]) + b = [1, 1] + sol = solve_triangular(A, b, lower=True) + assert_array_almost_equal(sol, [1, 0]) + + # check that it works also for non-contiguous matrices + sol = solve_triangular(A.T, b, lower=False) + assert_array_almost_equal(sol, [.5, .5]) + + # and that it gives the same result as trans=1 + sol = solve_triangular(A, b, lower=True, trans=1) + assert_array_almost_equal(sol, [.5, .5]) + + b = identity(2) + sol = solve_triangular(A, b, lower=True, trans=1) + assert_array_almost_equal(sol, [[1., -.5], [0, 0.5]]) + + def test_simple_complex(self): + """ + solve_triangular on a simple 2x2 complex matrix + """ + A = array([[1+1j, 0], [1j, 2]]) + b = identity(2) + sol = solve_triangular(A, b, lower=True, trans=1) + assert_array_almost_equal(sol, [[.5-.5j, -.25-.25j], [0, 0.5]]) + + # check other option combinations with complex rhs + b = np.diag([1+1j, 1+2j]) + sol = solve_triangular(A, b, lower=True, trans=0) + assert_array_almost_equal(sol, [[1, 0], [-0.5j, 0.5+1j]]) + + sol = solve_triangular(A, b, lower=True, trans=1) + assert_array_almost_equal(sol, [[1, 0.25-0.75j], [0, 0.5+1j]]) + + sol = solve_triangular(A, b, lower=True, trans=2) + assert_array_almost_equal(sol, [[1j, -0.75-0.25j], [0, 0.5+1j]]) + + sol = solve_triangular(A.T, b, lower=False, trans=0) + assert_array_almost_equal(sol, [[1, 0.25-0.75j], [0, 0.5+1j]]) + + sol = solve_triangular(A.T, b, lower=False, trans=1) + assert_array_almost_equal(sol, [[1, 0], [-0.5j, 0.5+1j]]) + + sol = solve_triangular(A.T, b, lower=False, trans=2) + assert_array_almost_equal(sol, [[1j, 0], [-0.5, 0.5+1j]]) + + def test_check_finite(self): + """ + solve_triangular on a simple 2x2 matrix. + """ + A = array([[1, 0], [1, 2]]) + b = [1, 1] + sol = solve_triangular(A, b, lower=True, check_finite=False) + assert_array_almost_equal(sol, [1, 0]) + + +class TestInv: + def setup_method(self): + np.random.seed(1234) + + def test_simple(self): + a = [[1, 2], [3, 4]] + a_inv = inv(a) + assert_array_almost_equal(dot(a, a_inv), np.eye(2)) + a = [[1, 2, 3], [4, 5, 6], [7, 8, 10]] + a_inv = inv(a) + assert_array_almost_equal(dot(a, a_inv), np.eye(3)) + + def test_random(self): + n = 20 + for i in range(4): + a = random([n, n]) + for i in range(n): + a[i, i] = 20*(.1+a[i, i]) + a_inv = inv(a) + assert_array_almost_equal(dot(a, a_inv), + identity(n)) + + def test_simple_complex(self): + a = [[1, 2], [3, 4j]] + a_inv = inv(a) + assert_array_almost_equal(dot(a, a_inv), [[1, 0], [0, 1]]) + + def test_random_complex(self): + n = 20 + for i in range(4): + a = random([n, n])+2j*random([n, n]) + for i in range(n): + a[i, i] = 20*(.1+a[i, i]) + a_inv = inv(a) + assert_array_almost_equal(dot(a, a_inv), + identity(n)) + + def test_check_finite(self): + a = [[1, 2], [3, 4]] + a_inv = inv(a, check_finite=False) + assert_array_almost_equal(dot(a, a_inv), [[1, 0], [0, 1]]) + + +class TestDet: + def setup_method(self): + self.rng = np.random.default_rng(1680305949878959) + + def test_1x1_all_singleton_dims(self): + a = np.array([[1]]) + deta = det(a) + assert deta.dtype.char == 'd' + assert np.isscalar(deta) + assert deta == 1. + a = np.array([[[[1]]]], dtype='f') + deta = det(a) + assert deta.dtype.char == 'd' + assert np.isscalar(deta) + assert deta == 1. + a = np.array([[[1 + 3.j]]], dtype=np.complex64) + deta = det(a) + assert deta.dtype.char == 'D' + assert np.isscalar(deta) + assert deta == 1.+3.j + + def test_1by1_stacked_input_output(self): + a = self.rng.random([4, 5, 1, 1], dtype=np.float32) + deta = det(a) + assert deta.dtype.char == 'd' + assert deta.shape == (4, 5) + assert_allclose(deta, np.squeeze(a)) + + a = self.rng.random([4, 5, 1, 1], dtype=np.float32)*np.complex64(1.j) + deta = det(a) + assert deta.dtype.char == 'D' + assert deta.shape == (4, 5) + assert_allclose(deta, np.squeeze(a)) + + @pytest.mark.parametrize('shape', [[2, 2], [20, 20], [3, 2, 20, 20]]) + def test_simple_det_shapes_real_complex(self, shape): + a = self.rng.uniform(-1., 1., size=shape) + d1, d2 = det(a), np.linalg.det(a) + assert_allclose(d1, d2) + + b = self.rng.uniform(-1., 1., size=shape)*1j + b += self.rng.uniform(-0.5, 0.5, size=shape) + d3, d4 = det(b), np.linalg.det(b) + assert_allclose(d3, d4) + + def test_for_known_det_values(self): + # Hadamard8 + a = np.array([[1, 1, 1, 1, 1, 1, 1, 1], + [1, -1, 1, -1, 1, -1, 1, -1], + [1, 1, -1, -1, 1, 1, -1, -1], + [1, -1, -1, 1, 1, -1, -1, 1], + [1, 1, 1, 1, -1, -1, -1, -1], + [1, -1, 1, -1, -1, 1, -1, 1], + [1, 1, -1, -1, -1, -1, 1, 1], + [1, -1, -1, 1, -1, 1, 1, -1]]) + assert_allclose(det(a), 4096.) + + # consecutive number array always singular + assert_allclose(det(np.arange(25).reshape(5, 5)), 0.) + + # simple anti-diagonal block array + # Upper right has det (-2+1j) and lower right has (-2-1j) + # det(a) = - (-2+1j) (-2-1j) = 5. + a = np.array([[0.+0.j, 0.+0.j, 0.-1.j, 1.-1.j], + [0.+0.j, 0.+0.j, 1.+0.j, 0.-1.j], + [0.+1.j, 1.+1.j, 0.+0.j, 0.+0.j], + [1.+0.j, 0.+1.j, 0.+0.j, 0.+0.j]], dtype=np.complex64) + assert_allclose(det(a), 5.+0.j) + + # Fiedler companion complexified + # >>> a = scipy.linalg.fiedler_companion(np.arange(1, 10)) + a = np.array([[-2., -3., 1., 0., 0., 0., 0., 0.], + [1., 0., 0., 0., 0., 0., 0., 0.], + [0., -4., 0., -5., 1., 0., 0., 0.], + [0., 1., 0., 0., 0., 0., 0., 0.], + [0., 0., 0., -6., 0., -7., 1., 0.], + [0., 0., 0., 1., 0., 0., 0., 0.], + [0., 0., 0., 0., 0., -8., 0., -9.], + [0., 0., 0., 0., 0., 1., 0., 0.]])*1.j + assert_allclose(det(a), 9.) + + # g and G dtypes are handled differently in windows and other platforms + @pytest.mark.parametrize('typ', [x for x in np.typecodes['All'][:20] + if x not in 'gG']) + def test_sample_compatible_dtype_input(self, typ): + n = 4 + a = self.rng.random([n, n]).astype(typ) # value is not important + assert isinstance(det(a), (np.float64, np.complex128)) + + def test_incompatible_dtype_input(self): + # Double backslashes needed for escaping pytest regex. + msg = 'cannot be cast to float\\(32, 64\\)' + + for c, t in zip('SUO', ['bytes8', 'str32', 'object']): + with assert_raises(TypeError, match=msg): + det(np.array([['a', 'b']]*2, dtype=c)) + with assert_raises(TypeError, match=msg): + det(np.array([[b'a', b'b']]*2, dtype='V')) + with assert_raises(TypeError, match=msg): + det(np.array([[100, 200]]*2, dtype='datetime64[s]')) + with assert_raises(TypeError, match=msg): + det(np.array([[100, 200]]*2, dtype='timedelta64[s]')) + + def test_empty_edge_cases(self): + assert_allclose(det(np.empty([0, 0])), 1.) + assert_allclose(det(np.empty([0, 0, 0])), np.array([])) + assert_allclose(det(np.empty([3, 0, 0])), np.array([1., 1., 1.])) + with assert_raises(ValueError, match='Last 2 dimensions'): + det(np.empty([0, 0, 3])) + with assert_raises(ValueError, match='at least two-dimensional'): + det(np.array([])) + with assert_raises(ValueError, match='Last 2 dimensions'): + det(np.array([[]])) + with assert_raises(ValueError, match='Last 2 dimensions'): + det(np.array([[[]]])) + + def test_overwrite_a(self): + # If all conditions are met then input should be overwritten; + # - dtype is one of 'fdFD' + # - C-contiguous + # - writeable + a = np.arange(9).reshape(3, 3).astype(np.float32) + ac = a.copy() + deta = det(ac, overwrite_a=True) + assert_allclose(deta, 0.) + assert not (a == ac).all() + + def test_readonly_array(self): + a = np.array([[2., 0., 1.], [5., 3., -1.], [1., 1., 1.]]) + a.setflags(write=False) + # overwrite_a will be overridden + assert_allclose(det(a, overwrite_a=True), 10.) + + def test_simple_check_finite(self): + a = [[1, 2], [3, np.inf]] + with assert_raises(ValueError, match='array must not contain'): + det(a) + + +def direct_lstsq(a, b, cmplx=0): + at = transpose(a) + if cmplx: + at = conjugate(at) + a1 = dot(at, a) + b1 = dot(at, b) + return solve(a1, b1) + + +class TestLstsq: + lapack_drivers = ('gelsd', 'gelss', 'gelsy', None) + + def test_simple_exact(self): + for dtype in REAL_DTYPES: + a = np.array([[1, 20], [-30, 4]], dtype=dtype) + for lapack_driver in TestLstsq.lapack_drivers: + for overwrite in (True, False): + for bt in (((1, 0), (0, 1)), (1, 0), + ((2, 1), (-30, 4))): + # Store values in case they are overwritten + # later + a1 = a.copy() + b = np.array(bt, dtype=dtype) + b1 = b.copy() + out = lstsq(a1, b1, + lapack_driver=lapack_driver, + overwrite_a=overwrite, + overwrite_b=overwrite) + x = out[0] + r = out[2] + assert_(r == 2, + 'expected efficient rank 2, got %s' % r) + assert_allclose(dot(a, x), b, + atol=25 * _eps_cast(a1.dtype), + rtol=25 * _eps_cast(a1.dtype), + err_msg="driver: %s" % lapack_driver) + + def test_simple_overdet(self): + for dtype in REAL_DTYPES: + a = np.array([[1, 2], [4, 5], [3, 4]], dtype=dtype) + b = np.array([1, 2, 3], dtype=dtype) + for lapack_driver in TestLstsq.lapack_drivers: + for overwrite in (True, False): + # Store values in case they are overwritten later + a1 = a.copy() + b1 = b.copy() + out = lstsq(a1, b1, lapack_driver=lapack_driver, + overwrite_a=overwrite, + overwrite_b=overwrite) + x = out[0] + if lapack_driver == 'gelsy': + residuals = np.sum((b - a.dot(x))**2) + else: + residuals = out[1] + r = out[2] + assert_(r == 2, 'expected efficient rank 2, got %s' % r) + assert_allclose(abs((dot(a, x) - b)**2).sum(axis=0), + residuals, + rtol=25 * _eps_cast(a1.dtype), + atol=25 * _eps_cast(a1.dtype), + err_msg="driver: %s" % lapack_driver) + assert_allclose(x, (-0.428571428571429, 0.85714285714285), + rtol=25 * _eps_cast(a1.dtype), + atol=25 * _eps_cast(a1.dtype), + err_msg="driver: %s" % lapack_driver) + + def test_simple_overdet_complex(self): + for dtype in COMPLEX_DTYPES: + a = np.array([[1+2j, 2], [4, 5], [3, 4]], dtype=dtype) + b = np.array([1, 2+4j, 3], dtype=dtype) + for lapack_driver in TestLstsq.lapack_drivers: + for overwrite in (True, False): + # Store values in case they are overwritten later + a1 = a.copy() + b1 = b.copy() + out = lstsq(a1, b1, lapack_driver=lapack_driver, + overwrite_a=overwrite, + overwrite_b=overwrite) + + x = out[0] + if lapack_driver == 'gelsy': + res = b - a.dot(x) + residuals = np.sum(res * res.conj()) + else: + residuals = out[1] + r = out[2] + assert_(r == 2, 'expected efficient rank 2, got %s' % r) + assert_allclose(abs((dot(a, x) - b)**2).sum(axis=0), + residuals, + rtol=25 * _eps_cast(a1.dtype), + atol=25 * _eps_cast(a1.dtype), + err_msg="driver: %s" % lapack_driver) + assert_allclose( + x, (-0.4831460674157303 + 0.258426966292135j, + 0.921348314606741 + 0.292134831460674j), + rtol=25 * _eps_cast(a1.dtype), + atol=25 * _eps_cast(a1.dtype), + err_msg="driver: %s" % lapack_driver) + + def test_simple_underdet(self): + for dtype in REAL_DTYPES: + a = np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype) + b = np.array([1, 2], dtype=dtype) + for lapack_driver in TestLstsq.lapack_drivers: + for overwrite in (True, False): + # Store values in case they are overwritten later + a1 = a.copy() + b1 = b.copy() + out = lstsq(a1, b1, lapack_driver=lapack_driver, + overwrite_a=overwrite, + overwrite_b=overwrite) + + x = out[0] + r = out[2] + assert_(r == 2, 'expected efficient rank 2, got %s' % r) + assert_allclose(x, (-0.055555555555555, 0.111111111111111, + 0.277777777777777), + rtol=25 * _eps_cast(a1.dtype), + atol=25 * _eps_cast(a1.dtype), + err_msg="driver: %s" % lapack_driver) + + def test_random_exact(self): + rng = np.random.RandomState(1234) + for dtype in REAL_DTYPES: + for n in (20, 200): + for lapack_driver in TestLstsq.lapack_drivers: + for overwrite in (True, False): + a = np.asarray(rng.random([n, n]), dtype=dtype) + for i in range(n): + a[i, i] = 20 * (0.1 + a[i, i]) + for i in range(4): + b = np.asarray(rng.random([n, 3]), dtype=dtype) + # Store values in case they are overwritten later + a1 = a.copy() + b1 = b.copy() + out = lstsq(a1, b1, + lapack_driver=lapack_driver, + overwrite_a=overwrite, + overwrite_b=overwrite) + x = out[0] + r = out[2] + assert_(r == n, f'expected efficient rank {n}, ' + f'got {r}') + if dtype is np.float32: + assert_allclose( + dot(a, x), b, + rtol=500 * _eps_cast(a1.dtype), + atol=500 * _eps_cast(a1.dtype), + err_msg="driver: %s" % lapack_driver) + else: + assert_allclose( + dot(a, x), b, + rtol=1000 * _eps_cast(a1.dtype), + atol=1000 * _eps_cast(a1.dtype), + err_msg="driver: %s" % lapack_driver) + + @pytest.mark.skipif(IS_MUSL, reason="may segfault on Alpine, see gh-17630") + def test_random_complex_exact(self): + rng = np.random.RandomState(1234) + for dtype in COMPLEX_DTYPES: + for n in (20, 200): + for lapack_driver in TestLstsq.lapack_drivers: + for overwrite in (True, False): + a = np.asarray(rng.random([n, n]) + 1j*rng.random([n, n]), + dtype=dtype) + for i in range(n): + a[i, i] = 20 * (0.1 + a[i, i]) + for i in range(2): + b = np.asarray(rng.random([n, 3]), dtype=dtype) + # Store values in case they are overwritten later + a1 = a.copy() + b1 = b.copy() + out = lstsq(a1, b1, lapack_driver=lapack_driver, + overwrite_a=overwrite, + overwrite_b=overwrite) + x = out[0] + r = out[2] + assert_(r == n, f'expected efficient rank {n}, ' + f'got {r}') + if dtype is np.complex64: + assert_allclose( + dot(a, x), b, + rtol=400 * _eps_cast(a1.dtype), + atol=400 * _eps_cast(a1.dtype), + err_msg="driver: %s" % lapack_driver) + else: + assert_allclose( + dot(a, x), b, + rtol=1000 * _eps_cast(a1.dtype), + atol=1000 * _eps_cast(a1.dtype), + err_msg="driver: %s" % lapack_driver) + + def test_random_overdet(self): + rng = np.random.RandomState(1234) + for dtype in REAL_DTYPES: + for (n, m) in ((20, 15), (200, 2)): + for lapack_driver in TestLstsq.lapack_drivers: + for overwrite in (True, False): + a = np.asarray(rng.random([n, m]), dtype=dtype) + for i in range(m): + a[i, i] = 20 * (0.1 + a[i, i]) + for i in range(4): + b = np.asarray(rng.random([n, 3]), dtype=dtype) + # Store values in case they are overwritten later + a1 = a.copy() + b1 = b.copy() + out = lstsq(a1, b1, + lapack_driver=lapack_driver, + overwrite_a=overwrite, + overwrite_b=overwrite) + x = out[0] + r = out[2] + assert_(r == m, f'expected efficient rank {m}, ' + f'got {r}') + assert_allclose( + x, direct_lstsq(a, b, cmplx=0), + rtol=25 * _eps_cast(a1.dtype), + atol=25 * _eps_cast(a1.dtype), + err_msg="driver: %s" % lapack_driver) + + def test_random_complex_overdet(self): + rng = np.random.RandomState(1234) + for dtype in COMPLEX_DTYPES: + for (n, m) in ((20, 15), (200, 2)): + for lapack_driver in TestLstsq.lapack_drivers: + for overwrite in (True, False): + a = np.asarray(rng.random([n, m]) + 1j*rng.random([n, m]), + dtype=dtype) + for i in range(m): + a[i, i] = 20 * (0.1 + a[i, i]) + for i in range(2): + b = np.asarray(rng.random([n, 3]), dtype=dtype) + # Store values in case they are overwritten + # later + a1 = a.copy() + b1 = b.copy() + out = lstsq(a1, b1, + lapack_driver=lapack_driver, + overwrite_a=overwrite, + overwrite_b=overwrite) + x = out[0] + r = out[2] + assert_(r == m, f'expected efficient rank {m}, ' + f'got {r}') + assert_allclose( + x, direct_lstsq(a, b, cmplx=1), + rtol=25 * _eps_cast(a1.dtype), + atol=25 * _eps_cast(a1.dtype), + err_msg="driver: %s" % lapack_driver) + + def test_check_finite(self): + with suppress_warnings() as sup: + # On (some) OSX this tests triggers a warning (gh-7538) + sup.filter(RuntimeWarning, + "internal gelsd driver lwork query error,.*" + "Falling back to 'gelss' driver.") + + at = np.array(((1, 20), (-30, 4))) + for dtype, bt, lapack_driver, overwrite, check_finite in \ + itertools.product(REAL_DTYPES, + (((1, 0), (0, 1)), (1, 0), ((2, 1), (-30, 4))), + TestLstsq.lapack_drivers, + (True, False), + (True, False)): + + a = at.astype(dtype) + b = np.array(bt, dtype=dtype) + # Store values in case they are overwritten + # later + a1 = a.copy() + b1 = b.copy() + out = lstsq(a1, b1, lapack_driver=lapack_driver, + check_finite=check_finite, overwrite_a=overwrite, + overwrite_b=overwrite) + x = out[0] + r = out[2] + assert_(r == 2, 'expected efficient rank 2, got %s' % r) + assert_allclose(dot(a, x), b, + rtol=25 * _eps_cast(a.dtype), + atol=25 * _eps_cast(a.dtype), + err_msg="driver: %s" % lapack_driver) + + def test_zero_size(self): + for a_shape, b_shape in (((0, 2), (0,)), + ((0, 4), (0, 2)), + ((4, 0), (4,)), + ((4, 0), (4, 2))): + b = np.ones(b_shape) + x, residues, rank, s = lstsq(np.zeros(a_shape), b) + assert_equal(x, np.zeros((a_shape[1],) + b_shape[1:])) + residues_should_be = (np.empty((0,)) if a_shape[1] + else np.linalg.norm(b, axis=0)**2) + assert_equal(residues, residues_should_be) + assert_(rank == 0, 'expected rank 0') + assert_equal(s, np.empty((0,))) + + +class TestPinv: + def setup_method(self): + np.random.seed(1234) + + def test_simple_real(self): + a = array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float) + a_pinv = pinv(a) + assert_array_almost_equal(dot(a, a_pinv), np.eye(3)) + + def test_simple_complex(self): + a = (array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], + dtype=float) + 1j * array([[10, 8, 7], [6, 5, 4], [3, 2, 1]], + dtype=float)) + a_pinv = pinv(a) + assert_array_almost_equal(dot(a, a_pinv), np.eye(3)) + + def test_simple_singular(self): + a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=float) + a_pinv = pinv(a) + expected = array([[-6.38888889e-01, -1.66666667e-01, 3.05555556e-01], + [-5.55555556e-02, 1.30136518e-16, 5.55555556e-02], + [5.27777778e-01, 1.66666667e-01, -1.94444444e-01]]) + assert_array_almost_equal(a_pinv, expected) + + def test_simple_cols(self): + a = array([[1, 2, 3], [4, 5, 6]], dtype=float) + a_pinv = pinv(a) + expected = array([[-0.94444444, 0.44444444], + [-0.11111111, 0.11111111], + [0.72222222, -0.22222222]]) + assert_array_almost_equal(a_pinv, expected) + + def test_simple_rows(self): + a = array([[1, 2], [3, 4], [5, 6]], dtype=float) + a_pinv = pinv(a) + expected = array([[-1.33333333, -0.33333333, 0.66666667], + [1.08333333, 0.33333333, -0.41666667]]) + assert_array_almost_equal(a_pinv, expected) + + def test_check_finite(self): + a = array([[1, 2, 3], [4, 5, 6.], [7, 8, 10]]) + a_pinv = pinv(a, check_finite=False) + assert_array_almost_equal(dot(a, a_pinv), np.eye(3)) + + def test_native_list_argument(self): + a = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] + a_pinv = pinv(a) + expected = array([[-6.38888889e-01, -1.66666667e-01, 3.05555556e-01], + [-5.55555556e-02, 1.30136518e-16, 5.55555556e-02], + [5.27777778e-01, 1.66666667e-01, -1.94444444e-01]]) + assert_array_almost_equal(a_pinv, expected) + + def test_atol_rtol(self): + n = 12 + # get a random ortho matrix for shuffling + q, _ = qr(np.random.rand(n, n)) + a_m = np.arange(35.0).reshape(7, 5) + a = a_m.copy() + a[0, 0] = 0.001 + atol = 1e-5 + rtol = 0.05 + # svds of a_m is ~ [116.906, 4.234, tiny, tiny, tiny] + # svds of a is ~ [116.906, 4.234, 4.62959e-04, tiny, tiny] + # Just abs cutoff such that we arrive at a_modified + a_p = pinv(a_m, atol=atol, rtol=0.) + adiff1 = a @ a_p @ a - a + adiff2 = a_m @ a_p @ a_m - a_m + # Now adiff1 should be around atol value while adiff2 should be + # relatively tiny + assert_allclose(np.linalg.norm(adiff1), 5e-4, atol=5.e-4) + assert_allclose(np.linalg.norm(adiff2), 5e-14, atol=5.e-14) + + # Now do the same but remove another sv ~4.234 via rtol + a_p = pinv(a_m, atol=atol, rtol=rtol) + adiff1 = a @ a_p @ a - a + adiff2 = a_m @ a_p @ a_m - a_m + assert_allclose(np.linalg.norm(adiff1), 4.233, rtol=0.01) + assert_allclose(np.linalg.norm(adiff2), 4.233, rtol=0.01) + + @pytest.mark.parametrize("cond", [1, None, _NoValue]) + @pytest.mark.parametrize("rcond", [1, None, _NoValue]) + def test_cond_rcond_deprecation(self, cond, rcond): + if cond is _NoValue and rcond is _NoValue: + # the defaults if cond/rcond aren't set -> no warning + pinv(np.ones((2,2)), cond=cond, rcond=rcond) + else: + # at least one of cond/rcond has a user-supplied value -> warn + with pytest.deprecated_call(match='"cond" and "rcond"'): + pinv(np.ones((2,2)), cond=cond, rcond=rcond) + + def test_positional_deprecation(self): + with pytest.deprecated_call(match="use keyword arguments"): + pinv(np.ones((2,2)), 0., 1e-10) + + +class TestPinvSymmetric: + + def setup_method(self): + np.random.seed(1234) + + def test_simple_real(self): + a = array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float) + a = np.dot(a, a.T) + a_pinv = pinvh(a) + assert_array_almost_equal(np.dot(a, a_pinv), np.eye(3)) + + def test_nonpositive(self): + a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=float) + a = np.dot(a, a.T) + u, s, vt = np.linalg.svd(a) + s[0] *= -1 + a = np.dot(u * s, vt) # a is now symmetric non-positive and singular + a_pinv = pinv(a) + a_pinvh = pinvh(a) + assert_array_almost_equal(a_pinv, a_pinvh) + + def test_simple_complex(self): + a = (array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], + dtype=float) + 1j * array([[10, 8, 7], [6, 5, 4], [3, 2, 1]], + dtype=float)) + a = np.dot(a, a.conj().T) + a_pinv = pinvh(a) + assert_array_almost_equal(np.dot(a, a_pinv), np.eye(3)) + + def test_native_list_argument(self): + a = array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float) + a = np.dot(a, a.T) + a_pinv = pinvh(a.tolist()) + assert_array_almost_equal(np.dot(a, a_pinv), np.eye(3)) + + def test_atol_rtol(self): + n = 12 + # get a random ortho matrix for shuffling + q, _ = qr(np.random.rand(n, n)) + a = np.diag([4, 3, 2, 1, 0.99e-4, 0.99e-5] + [0.99e-6]*(n-6)) + a = q.T @ a @ q + a_m = np.diag([4, 3, 2, 1, 0.99e-4, 0.] + [0.]*(n-6)) + a_m = q.T @ a_m @ q + atol = 1e-5 + rtol = (4.01e-4 - 4e-5)/4 + # Just abs cutoff such that we arrive at a_modified + a_p = pinvh(a, atol=atol, rtol=0.) + adiff1 = a @ a_p @ a - a + adiff2 = a_m @ a_p @ a_m - a_m + # Now adiff1 should dance around atol value since truncation + # while adiff2 should be relatively tiny + assert_allclose(norm(adiff1), atol, rtol=0.1) + assert_allclose(norm(adiff2), 1e-12, atol=1e-11) + + # Now do the same but through rtol cancelling atol value + a_p = pinvh(a, atol=atol, rtol=rtol) + adiff1 = a @ a_p @ a - a + adiff2 = a_m @ a_p @ a_m - a_m + # adiff1 and adiff2 should be elevated to ~1e-4 due to mismatch + assert_allclose(norm(adiff1), 1e-4, rtol=0.1) + assert_allclose(norm(adiff2), 1e-4, rtol=0.1) + + +@pytest.mark.parametrize('scale', (1e-20, 1., 1e20)) +@pytest.mark.parametrize('pinv_', (pinv, pinvh)) +def test_auto_rcond(scale, pinv_): + x = np.array([[1, 0], [0, 1e-10]]) * scale + expected = np.diag(1. / np.diag(x)) + x_inv = pinv_(x) + assert_allclose(x_inv, expected) + + +class TestVectorNorms: + + def test_types(self): + for dtype in np.typecodes['AllFloat']: + x = np.array([1, 2, 3], dtype=dtype) + tol = max(1e-15, np.finfo(dtype).eps.real * 20) + assert_allclose(norm(x), np.sqrt(14), rtol=tol) + assert_allclose(norm(x, 2), np.sqrt(14), rtol=tol) + + for dtype in np.typecodes['Complex']: + x = np.array([1j, 2j, 3j], dtype=dtype) + tol = max(1e-15, np.finfo(dtype).eps.real * 20) + assert_allclose(norm(x), np.sqrt(14), rtol=tol) + assert_allclose(norm(x, 2), np.sqrt(14), rtol=tol) + + def test_overflow(self): + # unlike numpy's norm, this one is + # safer on overflow + a = array([1e20], dtype=float32) + assert_almost_equal(norm(a), a) + + def test_stable(self): + # more stable than numpy's norm + a = array([1e4] + [1]*10000, dtype=float32) + try: + # snrm in double precision; we obtain the same as for float64 + # -- large atol needed due to varying blas implementations + assert_allclose(norm(a) - 1e4, 0.5, atol=1e-2) + except AssertionError: + # snrm implemented in single precision, == np.linalg.norm result + msg = ": Result should equal either 0.0 or 0.5 (depending on " \ + "implementation of snrm2)." + assert_almost_equal(norm(a) - 1e4, 0.0, err_msg=msg) + + def test_zero_norm(self): + assert_equal(norm([1, 0, 3], 0), 2) + assert_equal(norm([1, 2, 3], 0), 3) + + def test_axis_kwd(self): + a = np.array([[[2, 1], [3, 4]]] * 2, 'd') + assert_allclose(norm(a, axis=1), [[3.60555128, 4.12310563]] * 2) + assert_allclose(norm(a, 1, axis=1), [[5.] * 2] * 2) + + def test_keepdims_kwd(self): + a = np.array([[[2, 1], [3, 4]]] * 2, 'd') + b = norm(a, axis=1, keepdims=True) + assert_allclose(b, [[[3.60555128, 4.12310563]]] * 2) + assert_(b.shape == (2, 1, 2)) + assert_allclose(norm(a, 1, axis=2, keepdims=True), [[[3.], [7.]]] * 2) + + @pytest.mark.skipif(not HAS_ILP64, reason="64-bit BLAS required") + def test_large_vector(self): + check_free_memory(free_mb=17000) + x = np.zeros([2**31], dtype=np.float64) + x[-1] = 1 + res = norm(x) + del x + assert_allclose(res, 1.0) + + +class TestMatrixNorms: + + def test_matrix_norms(self): + # Not all of these are matrix norms in the most technical sense. + np.random.seed(1234) + for n, m in (1, 1), (1, 3), (3, 1), (4, 4), (4, 5), (5, 4): + for t in np.float32, np.float64, np.complex64, np.complex128, np.int64: + A = 10 * np.random.randn(n, m).astype(t) + if np.issubdtype(A.dtype, np.complexfloating): + A = (A + 10j * np.random.randn(n, m)).astype(t) + t_high = np.complex128 + else: + t_high = np.float64 + for order in (None, 'fro', 1, -1, 2, -2, np.inf, -np.inf): + actual = norm(A, ord=order) + desired = np.linalg.norm(A, ord=order) + # SciPy may return higher precision matrix norms. + # This is a consequence of using LAPACK. + if not np.allclose(actual, desired): + desired = np.linalg.norm(A.astype(t_high), ord=order) + assert_allclose(actual, desired) + + def test_axis_kwd(self): + a = np.array([[[2, 1], [3, 4]]] * 2, 'd') + b = norm(a, ord=np.inf, axis=(1, 0)) + c = norm(np.swapaxes(a, 0, 1), ord=np.inf, axis=(0, 1)) + d = norm(a, ord=1, axis=(0, 1)) + assert_allclose(b, c) + assert_allclose(c, d) + assert_allclose(b, d) + assert_(b.shape == c.shape == d.shape) + b = norm(a, ord=1, axis=(1, 0)) + c = norm(np.swapaxes(a, 0, 1), ord=1, axis=(0, 1)) + d = norm(a, ord=np.inf, axis=(0, 1)) + assert_allclose(b, c) + assert_allclose(c, d) + assert_allclose(b, d) + assert_(b.shape == c.shape == d.shape) + + def test_keepdims_kwd(self): + a = np.arange(120, dtype='d').reshape(2, 3, 4, 5) + b = norm(a, ord=np.inf, axis=(1, 0), keepdims=True) + c = norm(a, ord=1, axis=(0, 1), keepdims=True) + assert_allclose(b, c) + assert_(b.shape == c.shape) + + +class TestOverwrite: + def test_solve(self): + assert_no_overwrite(solve, [(3, 3), (3,)]) + + def test_solve_triangular(self): + assert_no_overwrite(solve_triangular, [(3, 3), (3,)]) + + def test_solve_banded(self): + assert_no_overwrite(lambda ab, b: solve_banded((2, 1), ab, b), + [(4, 6), (6,)]) + + def test_solveh_banded(self): + assert_no_overwrite(solveh_banded, [(2, 6), (6,)]) + + def test_inv(self): + assert_no_overwrite(inv, [(3, 3)]) + + def test_det(self): + assert_no_overwrite(det, [(3, 3)]) + + def test_lstsq(self): + assert_no_overwrite(lstsq, [(3, 2), (3,)]) + + def test_pinv(self): + assert_no_overwrite(pinv, [(3, 3)]) + + def test_pinvh(self): + assert_no_overwrite(pinvh, [(3, 3)]) + + +class TestSolveCirculant: + + def test_basic1(self): + c = np.array([1, 2, 3, 5]) + b = np.array([1, -1, 1, 0]) + x = solve_circulant(c, b) + y = solve(circulant(c), b) + assert_allclose(x, y) + + def test_basic2(self): + # b is a 2-d matrix. + c = np.array([1, 2, -3, -5]) + b = np.arange(12).reshape(4, 3) + x = solve_circulant(c, b) + y = solve(circulant(c), b) + assert_allclose(x, y) + + def test_basic3(self): + # b is a 3-d matrix. + c = np.array([1, 2, -3, -5]) + b = np.arange(24).reshape(4, 3, 2) + x = solve_circulant(c, b) + y = solve(circulant(c), b) + assert_allclose(x, y) + + def test_complex(self): + # Complex b and c + c = np.array([1+2j, -3, 4j, 5]) + b = np.arange(8).reshape(4, 2) + 0.5j + x = solve_circulant(c, b) + y = solve(circulant(c), b) + assert_allclose(x, y) + + def test_random_b_and_c(self): + # Random b and c + np.random.seed(54321) + c = np.random.randn(50) + b = np.random.randn(50) + x = solve_circulant(c, b) + y = solve(circulant(c), b) + assert_allclose(x, y) + + def test_singular(self): + # c gives a singular circulant matrix. + c = np.array([1, 1, 0, 0]) + b = np.array([1, 2, 3, 4]) + x = solve_circulant(c, b, singular='lstsq') + y, res, rnk, s = lstsq(circulant(c), b) + assert_allclose(x, y) + assert_raises(LinAlgError, solve_circulant, x, y) + + def test_axis_args(self): + # Test use of caxis, baxis and outaxis. + + # c has shape (2, 1, 4) + c = np.array([[[-1, 2.5, 3, 3.5]], [[1, 6, 6, 6.5]]]) + + # b has shape (3, 4) + b = np.array([[0, 0, 1, 1], [1, 1, 0, 0], [1, -1, 0, 0]]) + + x = solve_circulant(c, b, baxis=1) + assert_equal(x.shape, (4, 2, 3)) + expected = np.empty_like(x) + expected[:, 0, :] = solve(circulant(c[0]), b.T) + expected[:, 1, :] = solve(circulant(c[1]), b.T) + assert_allclose(x, expected) + + x = solve_circulant(c, b, baxis=1, outaxis=-1) + assert_equal(x.shape, (2, 3, 4)) + assert_allclose(np.moveaxis(x, -1, 0), expected) + + # np.swapaxes(c, 1, 2) has shape (2, 4, 1); b.T has shape (4, 3). + x = solve_circulant(np.swapaxes(c, 1, 2), b.T, caxis=1) + assert_equal(x.shape, (4, 2, 3)) + assert_allclose(x, expected) + + def test_native_list_arguments(self): + # Same as test_basic1 using python's native list. + c = [1, 2, 3, 5] + b = [1, -1, 1, 0] + x = solve_circulant(c, b) + y = solve(circulant(c), b) + assert_allclose(x, y) + + +class TestMatrix_Balance: + + def test_string_arg(self): + assert_raises(ValueError, matrix_balance, 'Some string for fail') + + def test_infnan_arg(self): + assert_raises(ValueError, matrix_balance, + np.array([[1, 2], [3, np.inf]])) + assert_raises(ValueError, matrix_balance, + np.array([[1, 2], [3, np.nan]])) + + def test_scaling(self): + _, y = matrix_balance(np.array([[1000, 1], [1000, 0]])) + # Pre/post LAPACK 3.5.0 gives the same result up to an offset + # since in each case col norm is x1000 greater and + # 1000 / 32 ~= 1 * 32 hence balanced with 2 ** 5. + assert_allclose(np.diff(np.log2(np.diag(y))), [5]) + + def test_scaling_order(self): + A = np.array([[1, 0, 1e-4], [1, 1, 1e-2], [1e4, 1e2, 1]]) + x, y = matrix_balance(A) + assert_allclose(solve(y, A).dot(y), x) + + def test_separate(self): + _, (y, z) = matrix_balance(np.array([[1000, 1], [1000, 0]]), + separate=1) + assert_equal(np.diff(np.log2(y)), [5]) + assert_allclose(z, np.arange(2)) + + def test_permutation(self): + A = block_diag(np.ones((2, 2)), np.tril(np.ones((2, 2))), + np.ones((3, 3))) + x, (y, z) = matrix_balance(A, separate=1) + assert_allclose(y, np.ones_like(y)) + assert_allclose(z, np.array([0, 1, 6, 5, 4, 3, 2])) + + def test_perm_and_scaling(self): + # Matrix with its diagonal removed + cases = ( # Case 0 + np.array([[0., 0., 0., 0., 0.000002], + [0., 0., 0., 0., 0.], + [2., 2., 0., 0., 0.], + [2., 2., 0., 0., 0.], + [0., 0., 0.000002, 0., 0.]]), + # Case 1 user reported GH-7258 + np.array([[-0.5, 0., 0., 0.], + [0., -1., 0., 0.], + [1., 0., -0.5, 0.], + [0., 1., 0., -1.]]), + # Case 2 user reported GH-7258 + np.array([[-3., 0., 1., 0.], + [-1., -1., -0., 1.], + [-3., -0., -0., 0.], + [-1., -0., 1., -1.]]) + ) + + for A in cases: + x, y = matrix_balance(A) + x, (s, p) = matrix_balance(A, separate=1) + ip = np.empty_like(p) + ip[p] = np.arange(A.shape[0]) + assert_allclose(y, np.diag(s)[ip, :]) + assert_allclose(solve(y, A).dot(y), x) diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_blas.py b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_blas.py new file mode 100644 index 0000000000000000000000000000000000000000..727dfa45d37beea695febfe34a0f4415bc045e94 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_blas.py @@ -0,0 +1,1114 @@ +# +# Created by: Pearu Peterson, April 2002 +# + +import math +import pytest +import numpy as np +from numpy.testing import (assert_equal, assert_almost_equal, assert_, + assert_array_almost_equal, assert_allclose) +from pytest import raises as assert_raises + +from numpy import float32, float64, complex64, complex128, arange, triu, \ + tril, zeros, tril_indices, ones, mod, diag, append, eye, \ + nonzero + +from numpy.random import rand, seed +import scipy +from scipy.linalg import _fblas as fblas, get_blas_funcs, toeplitz, solve + +try: + from scipy.linalg import _cblas as cblas +except ImportError: + cblas = None + +REAL_DTYPES = [float32, float64] +COMPLEX_DTYPES = [complex64, complex128] +DTYPES = REAL_DTYPES + COMPLEX_DTYPES + + +def test_get_blas_funcs(): + # check that it returns Fortran code for arrays that are + # fortran-ordered + f1, f2, f3 = get_blas_funcs( + ('axpy', 'axpy', 'axpy'), + (np.empty((2, 2), dtype=np.complex64, order='F'), + np.empty((2, 2), dtype=np.complex128, order='C')) + ) + + # get_blas_funcs will choose libraries depending on most generic + # array + assert_equal(f1.typecode, 'z') + assert_equal(f2.typecode, 'z') + if cblas is not None: + assert_equal(f1.module_name, 'cblas') + assert_equal(f2.module_name, 'cblas') + + # check defaults. + f1 = get_blas_funcs('rotg') + assert_equal(f1.typecode, 'd') + + # check also dtype interface + f1 = get_blas_funcs('gemm', dtype=np.complex64) + assert_equal(f1.typecode, 'c') + f1 = get_blas_funcs('gemm', dtype='F') + assert_equal(f1.typecode, 'c') + + # extended precision complex + f1 = get_blas_funcs('gemm', dtype=np.clongdouble) + assert_equal(f1.typecode, 'z') + + # check safe complex upcasting + f1 = get_blas_funcs('axpy', + (np.empty((2, 2), dtype=np.float64), + np.empty((2, 2), dtype=np.complex64)) + ) + assert_equal(f1.typecode, 'z') + + +def test_get_blas_funcs_alias(): + # check alias for get_blas_funcs + f, g = get_blas_funcs(('nrm2', 'dot'), dtype=np.complex64) + assert f.typecode == 'c' + assert g.typecode == 'c' + + f, g, h = get_blas_funcs(('dot', 'dotc', 'dotu'), dtype=np.float64) + assert f is g + assert f is h + + +class TestCBLAS1Simple: + + def test_axpy(self): + for p in 'sd': + f = getattr(cblas, p+'axpy', None) + if f is None: + continue + assert_array_almost_equal(f([1, 2, 3], [2, -1, 3], a=5), + [7, 9, 18]) + for p in 'cz': + f = getattr(cblas, p+'axpy', None) + if f is None: + continue + assert_array_almost_equal(f([1, 2j, 3], [2, -1, 3], a=5), + [7, 10j-1, 18]) + + +class TestFBLAS1Simple: + + def test_axpy(self): + for p in 'sd': + f = getattr(fblas, p+'axpy', None) + if f is None: + continue + assert_array_almost_equal(f([1, 2, 3], [2, -1, 3], a=5), + [7, 9, 18]) + for p in 'cz': + f = getattr(fblas, p+'axpy', None) + if f is None: + continue + assert_array_almost_equal(f([1, 2j, 3], [2, -1, 3], a=5), + [7, 10j-1, 18]) + + def test_copy(self): + for p in 'sd': + f = getattr(fblas, p+'copy', None) + if f is None: + continue + assert_array_almost_equal(f([3, 4, 5], [8]*3), [3, 4, 5]) + for p in 'cz': + f = getattr(fblas, p+'copy', None) + if f is None: + continue + assert_array_almost_equal(f([3, 4j, 5+3j], [8]*3), [3, 4j, 5+3j]) + + def test_asum(self): + for p in 'sd': + f = getattr(fblas, p+'asum', None) + if f is None: + continue + assert_almost_equal(f([3, -4, 5]), 12) + for p in ['sc', 'dz']: + f = getattr(fblas, p+'asum', None) + if f is None: + continue + assert_almost_equal(f([3j, -4, 3-4j]), 14) + + def test_dot(self): + for p in 'sd': + f = getattr(fblas, p+'dot', None) + if f is None: + continue + assert_almost_equal(f([3, -4, 5], [2, 5, 1]), -9) + + def test_complex_dotu(self): + for p in 'cz': + f = getattr(fblas, p+'dotu', None) + if f is None: + continue + assert_almost_equal(f([3j, -4, 3-4j], [2, 3, 1]), -9+2j) + + def test_complex_dotc(self): + for p in 'cz': + f = getattr(fblas, p+'dotc', None) + if f is None: + continue + assert_almost_equal(f([3j, -4, 3-4j], [2, 3j, 1]), 3-14j) + + def test_nrm2(self): + for p in 'sd': + f = getattr(fblas, p+'nrm2', None) + if f is None: + continue + assert_almost_equal(f([3, -4, 5]), math.sqrt(50)) + for p in ['c', 'z', 'sc', 'dz']: + f = getattr(fblas, p+'nrm2', None) + if f is None: + continue + assert_almost_equal(f([3j, -4, 3-4j]), math.sqrt(50)) + + def test_scal(self): + for p in 'sd': + f = getattr(fblas, p+'scal', None) + if f is None: + continue + assert_array_almost_equal(f(2, [3, -4, 5]), [6, -8, 10]) + for p in 'cz': + f = getattr(fblas, p+'scal', None) + if f is None: + continue + assert_array_almost_equal(f(3j, [3j, -4, 3-4j]), [-9, -12j, 12+9j]) + for p in ['cs', 'zd']: + f = getattr(fblas, p+'scal', None) + if f is None: + continue + assert_array_almost_equal(f(3, [3j, -4, 3-4j]), [9j, -12, 9-12j]) + + def test_swap(self): + for p in 'sd': + f = getattr(fblas, p+'swap', None) + if f is None: + continue + x, y = [2, 3, 1], [-2, 3, 7] + x1, y1 = f(x, y) + assert_array_almost_equal(x1, y) + assert_array_almost_equal(y1, x) + for p in 'cz': + f = getattr(fblas, p+'swap', None) + if f is None: + continue + x, y = [2, 3j, 1], [-2, 3, 7-3j] + x1, y1 = f(x, y) + assert_array_almost_equal(x1, y) + assert_array_almost_equal(y1, x) + + def test_amax(self): + for p in 'sd': + f = getattr(fblas, 'i'+p+'amax') + assert_equal(f([-2, 4, 3]), 1) + for p in 'cz': + f = getattr(fblas, 'i'+p+'amax') + assert_equal(f([-5, 4+3j, 6]), 1) + # XXX: need tests for rot,rotm,rotg,rotmg + + +class TestFBLAS2Simple: + + def test_gemv(self): + for p in 'sd': + f = getattr(fblas, p+'gemv', None) + if f is None: + continue + assert_array_almost_equal(f(3, [[3]], [-4]), [-36]) + assert_array_almost_equal(f(3, [[3]], [-4], 3, [5]), [-21]) + for p in 'cz': + f = getattr(fblas, p+'gemv', None) + if f is None: + continue + assert_array_almost_equal(f(3j, [[3-4j]], [-4]), [-48-36j]) + assert_array_almost_equal(f(3j, [[3-4j]], [-4], 3, [5j]), + [-48-21j]) + + def test_ger(self): + + for p in 'sd': + f = getattr(fblas, p+'ger', None) + if f is None: + continue + assert_array_almost_equal(f(1, [1, 2], [3, 4]), [[3, 4], [6, 8]]) + assert_array_almost_equal(f(2, [1, 2, 3], [3, 4]), + [[6, 8], [12, 16], [18, 24]]) + + assert_array_almost_equal(f(1, [1, 2], [3, 4], + a=[[1, 2], [3, 4]]), [[4, 6], [9, 12]]) + + for p in 'cz': + f = getattr(fblas, p+'geru', None) + if f is None: + continue + assert_array_almost_equal(f(1, [1j, 2], [3, 4]), + [[3j, 4j], [6, 8]]) + assert_array_almost_equal(f(-2, [1j, 2j, 3j], [3j, 4j]), + [[6, 8], [12, 16], [18, 24]]) + + for p in 'cz': + for name in ('ger', 'gerc'): + f = getattr(fblas, p+name, None) + if f is None: + continue + assert_array_almost_equal(f(1, [1j, 2], [3, 4]), + [[3j, 4j], [6, 8]]) + assert_array_almost_equal(f(2, [1j, 2j, 3j], [3j, 4j]), + [[6, 8], [12, 16], [18, 24]]) + + def test_syr_her(self): + x = np.arange(1, 5, dtype='d') + resx = np.triu(x[:, np.newaxis] * x) + resx_reverse = np.triu(x[::-1, np.newaxis] * x[::-1]) + + y = np.linspace(0, 8.5, 17, endpoint=False) + + z = np.arange(1, 9, dtype='d').view('D') + resz = np.triu(z[:, np.newaxis] * z) + resz_reverse = np.triu(z[::-1, np.newaxis] * z[::-1]) + rehz = np.triu(z[:, np.newaxis] * z.conj()) + rehz_reverse = np.triu(z[::-1, np.newaxis] * z[::-1].conj()) + + w = np.c_[np.zeros(4), z, np.zeros(4)].ravel() + + for p, rtol in zip('sd', [1e-7, 1e-14]): + f = getattr(fblas, p+'syr', None) + if f is None: + continue + assert_allclose(f(1.0, x), resx, rtol=rtol) + assert_allclose(f(1.0, x, lower=True), resx.T, rtol=rtol) + assert_allclose(f(1.0, y, incx=2, offx=2, n=4), resx, rtol=rtol) + # negative increments imply reversed vectors in blas + assert_allclose(f(1.0, y, incx=-2, offx=2, n=4), + resx_reverse, rtol=rtol) + + a = np.zeros((4, 4), 'f' if p == 's' else 'd', 'F') + b = f(1.0, x, a=a, overwrite_a=True) + assert_allclose(a, resx, rtol=rtol) + + b = f(2.0, x, a=a) + assert_(a is not b) + assert_allclose(b, 3*resx, rtol=rtol) + + assert_raises(Exception, f, 1.0, x, incx=0) + assert_raises(Exception, f, 1.0, x, offx=5) + assert_raises(Exception, f, 1.0, x, offx=-2) + assert_raises(Exception, f, 1.0, x, n=-2) + assert_raises(Exception, f, 1.0, x, n=5) + assert_raises(Exception, f, 1.0, x, lower=2) + assert_raises(Exception, f, 1.0, x, a=np.zeros((2, 2), 'd', 'F')) + + for p, rtol in zip('cz', [1e-7, 1e-14]): + f = getattr(fblas, p+'syr', None) + if f is None: + continue + assert_allclose(f(1.0, z), resz, rtol=rtol) + assert_allclose(f(1.0, z, lower=True), resz.T, rtol=rtol) + assert_allclose(f(1.0, w, incx=3, offx=1, n=4), resz, rtol=rtol) + # negative increments imply reversed vectors in blas + assert_allclose(f(1.0, w, incx=-3, offx=1, n=4), + resz_reverse, rtol=rtol) + + a = np.zeros((4, 4), 'F' if p == 'c' else 'D', 'F') + b = f(1.0, z, a=a, overwrite_a=True) + assert_allclose(a, resz, rtol=rtol) + + b = f(2.0, z, a=a) + assert_(a is not b) + assert_allclose(b, 3*resz, rtol=rtol) + + assert_raises(Exception, f, 1.0, x, incx=0) + assert_raises(Exception, f, 1.0, x, offx=5) + assert_raises(Exception, f, 1.0, x, offx=-2) + assert_raises(Exception, f, 1.0, x, n=-2) + assert_raises(Exception, f, 1.0, x, n=5) + assert_raises(Exception, f, 1.0, x, lower=2) + assert_raises(Exception, f, 1.0, x, a=np.zeros((2, 2), 'd', 'F')) + + for p, rtol in zip('cz', [1e-7, 1e-14]): + f = getattr(fblas, p+'her', None) + if f is None: + continue + assert_allclose(f(1.0, z), rehz, rtol=rtol) + assert_allclose(f(1.0, z, lower=True), rehz.T.conj(), rtol=rtol) + assert_allclose(f(1.0, w, incx=3, offx=1, n=4), rehz, rtol=rtol) + # negative increments imply reversed vectors in blas + assert_allclose(f(1.0, w, incx=-3, offx=1, n=4), + rehz_reverse, rtol=rtol) + + a = np.zeros((4, 4), 'F' if p == 'c' else 'D', 'F') + b = f(1.0, z, a=a, overwrite_a=True) + assert_allclose(a, rehz, rtol=rtol) + + b = f(2.0, z, a=a) + assert_(a is not b) + assert_allclose(b, 3*rehz, rtol=rtol) + + assert_raises(Exception, f, 1.0, x, incx=0) + assert_raises(Exception, f, 1.0, x, offx=5) + assert_raises(Exception, f, 1.0, x, offx=-2) + assert_raises(Exception, f, 1.0, x, n=-2) + assert_raises(Exception, f, 1.0, x, n=5) + assert_raises(Exception, f, 1.0, x, lower=2) + assert_raises(Exception, f, 1.0, x, a=np.zeros((2, 2), 'd', 'F')) + + def test_syr2(self): + x = np.arange(1, 5, dtype='d') + y = np.arange(5, 9, dtype='d') + resxy = np.triu(x[:, np.newaxis] * y + y[:, np.newaxis] * x) + resxy_reverse = np.triu(x[::-1, np.newaxis] * y[::-1] + + y[::-1, np.newaxis] * x[::-1]) + + q = np.linspace(0, 8.5, 17, endpoint=False) + + for p, rtol in zip('sd', [1e-7, 1e-14]): + f = getattr(fblas, p+'syr2', None) + if f is None: + continue + assert_allclose(f(1.0, x, y), resxy, rtol=rtol) + assert_allclose(f(1.0, x, y, n=3), resxy[:3, :3], rtol=rtol) + assert_allclose(f(1.0, x, y, lower=True), resxy.T, rtol=rtol) + + assert_allclose(f(1.0, q, q, incx=2, offx=2, incy=2, offy=10), + resxy, rtol=rtol) + assert_allclose(f(1.0, q, q, incx=2, offx=2, incy=2, offy=10, n=3), + resxy[:3, :3], rtol=rtol) + # negative increments imply reversed vectors in blas + assert_allclose(f(1.0, q, q, incx=-2, offx=2, incy=-2, offy=10), + resxy_reverse, rtol=rtol) + + a = np.zeros((4, 4), 'f' if p == 's' else 'd', 'F') + b = f(1.0, x, y, a=a, overwrite_a=True) + assert_allclose(a, resxy, rtol=rtol) + + b = f(2.0, x, y, a=a) + assert_(a is not b) + assert_allclose(b, 3*resxy, rtol=rtol) + + assert_raises(Exception, f, 1.0, x, y, incx=0) + assert_raises(Exception, f, 1.0, x, y, offx=5) + assert_raises(Exception, f, 1.0, x, y, offx=-2) + assert_raises(Exception, f, 1.0, x, y, incy=0) + assert_raises(Exception, f, 1.0, x, y, offy=5) + assert_raises(Exception, f, 1.0, x, y, offy=-2) + assert_raises(Exception, f, 1.0, x, y, n=-2) + assert_raises(Exception, f, 1.0, x, y, n=5) + assert_raises(Exception, f, 1.0, x, y, lower=2) + assert_raises(Exception, f, 1.0, x, y, + a=np.zeros((2, 2), 'd', 'F')) + + def test_her2(self): + x = np.arange(1, 9, dtype='d').view('D') + y = np.arange(9, 17, dtype='d').view('D') + resxy = x[:, np.newaxis] * y.conj() + y[:, np.newaxis] * x.conj() + resxy = np.triu(resxy) + + resxy_reverse = x[::-1, np.newaxis] * y[::-1].conj() + resxy_reverse += y[::-1, np.newaxis] * x[::-1].conj() + resxy_reverse = np.triu(resxy_reverse) + + u = np.c_[np.zeros(4), x, np.zeros(4)].ravel() + v = np.c_[np.zeros(4), y, np.zeros(4)].ravel() + + for p, rtol in zip('cz', [1e-7, 1e-14]): + f = getattr(fblas, p+'her2', None) + if f is None: + continue + assert_allclose(f(1.0, x, y), resxy, rtol=rtol) + assert_allclose(f(1.0, x, y, n=3), resxy[:3, :3], rtol=rtol) + assert_allclose(f(1.0, x, y, lower=True), resxy.T.conj(), + rtol=rtol) + + assert_allclose(f(1.0, u, v, incx=3, offx=1, incy=3, offy=1), + resxy, rtol=rtol) + assert_allclose(f(1.0, u, v, incx=3, offx=1, incy=3, offy=1, n=3), + resxy[:3, :3], rtol=rtol) + # negative increments imply reversed vectors in blas + assert_allclose(f(1.0, u, v, incx=-3, offx=1, incy=-3, offy=1), + resxy_reverse, rtol=rtol) + + a = np.zeros((4, 4), 'F' if p == 'c' else 'D', 'F') + b = f(1.0, x, y, a=a, overwrite_a=True) + assert_allclose(a, resxy, rtol=rtol) + + b = f(2.0, x, y, a=a) + assert_(a is not b) + assert_allclose(b, 3*resxy, rtol=rtol) + + assert_raises(Exception, f, 1.0, x, y, incx=0) + assert_raises(Exception, f, 1.0, x, y, offx=5) + assert_raises(Exception, f, 1.0, x, y, offx=-2) + assert_raises(Exception, f, 1.0, x, y, incy=0) + assert_raises(Exception, f, 1.0, x, y, offy=5) + assert_raises(Exception, f, 1.0, x, y, offy=-2) + assert_raises(Exception, f, 1.0, x, y, n=-2) + assert_raises(Exception, f, 1.0, x, y, n=5) + assert_raises(Exception, f, 1.0, x, y, lower=2) + assert_raises(Exception, f, 1.0, x, y, + a=np.zeros((2, 2), 'd', 'F')) + + def test_gbmv(self): + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 7 + m = 5 + kl = 1 + ku = 2 + # fake a banded matrix via toeplitz + A = toeplitz(append(rand(kl+1), zeros(m-kl-1)), + append(rand(ku+1), zeros(n-ku-1))) + A = A.astype(dtype) + Ab = zeros((kl+ku+1, n), dtype=dtype) + + # Form the banded storage + Ab[2, :5] = A[0, 0] # diag + Ab[1, 1:6] = A[0, 1] # sup1 + Ab[0, 2:7] = A[0, 2] # sup2 + Ab[3, :4] = A[1, 0] # sub1 + + x = rand(n).astype(dtype) + y = rand(m).astype(dtype) + alpha, beta = dtype(3), dtype(-5) + + func, = get_blas_funcs(('gbmv',), dtype=dtype) + y1 = func(m=m, n=n, ku=ku, kl=kl, alpha=alpha, a=Ab, + x=x, y=y, beta=beta) + y2 = alpha * A.dot(x) + beta * y + assert_array_almost_equal(y1, y2) + + y1 = func(m=m, n=n, ku=ku, kl=kl, alpha=alpha, a=Ab, + x=y, y=x, beta=beta, trans=1) + y2 = alpha * A.T.dot(y) + beta * x + assert_array_almost_equal(y1, y2) + + def test_sbmv_hbmv(self): + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 6 + k = 2 + A = zeros((n, n), dtype=dtype) + Ab = zeros((k+1, n), dtype=dtype) + + # Form the array and its packed banded storage + A[arange(n), arange(n)] = rand(n) + for ind2 in range(1, k+1): + temp = rand(n-ind2) + A[arange(n-ind2), arange(ind2, n)] = temp + Ab[-1-ind2, ind2:] = temp + A = A.astype(dtype) + A = A + A.T if ind < 2 else A + A.conj().T + Ab[-1, :] = diag(A) + x = rand(n).astype(dtype) + y = rand(n).astype(dtype) + alpha, beta = dtype(1.25), dtype(3) + + if ind > 1: + func, = get_blas_funcs(('hbmv',), dtype=dtype) + else: + func, = get_blas_funcs(('sbmv',), dtype=dtype) + y1 = func(k=k, alpha=alpha, a=Ab, x=x, y=y, beta=beta) + y2 = alpha * A.dot(x) + beta * y + assert_array_almost_equal(y1, y2) + + def test_spmv_hpmv(self): + seed(1234) + for ind, dtype in enumerate(DTYPES+COMPLEX_DTYPES): + n = 3 + A = rand(n, n).astype(dtype) + if ind > 1: + A += rand(n, n)*1j + A = A.astype(dtype) + A = A + A.T if ind < 4 else A + A.conj().T + c, r = tril_indices(n) + Ap = A[r, c] + x = rand(n).astype(dtype) + y = rand(n).astype(dtype) + xlong = arange(2*n).astype(dtype) + ylong = ones(2*n).astype(dtype) + alpha, beta = dtype(1.25), dtype(2) + + if ind > 3: + func, = get_blas_funcs(('hpmv',), dtype=dtype) + else: + func, = get_blas_funcs(('spmv',), dtype=dtype) + y1 = func(n=n, alpha=alpha, ap=Ap, x=x, y=y, beta=beta) + y2 = alpha * A.dot(x) + beta * y + assert_array_almost_equal(y1, y2) + + # Test inc and offsets + y1 = func(n=n-1, alpha=alpha, beta=beta, x=xlong, y=ylong, ap=Ap, + incx=2, incy=2, offx=n, offy=n) + y2 = (alpha * A[:-1, :-1]).dot(xlong[3::2]) + beta * ylong[3::2] + assert_array_almost_equal(y1[3::2], y2) + assert_almost_equal(y1[4], ylong[4]) + + def test_spr_hpr(self): + seed(1234) + for ind, dtype in enumerate(DTYPES+COMPLEX_DTYPES): + n = 3 + A = rand(n, n).astype(dtype) + if ind > 1: + A += rand(n, n)*1j + A = A.astype(dtype) + A = A + A.T if ind < 4 else A + A.conj().T + c, r = tril_indices(n) + Ap = A[r, c] + x = rand(n).astype(dtype) + alpha = (DTYPES+COMPLEX_DTYPES)[mod(ind, 4)](2.5) + + if ind > 3: + func, = get_blas_funcs(('hpr',), dtype=dtype) + y2 = alpha * x[:, None].dot(x[None, :].conj()) + A + else: + func, = get_blas_funcs(('spr',), dtype=dtype) + y2 = alpha * x[:, None].dot(x[None, :]) + A + + y1 = func(n=n, alpha=alpha, ap=Ap, x=x) + y1f = zeros((3, 3), dtype=dtype) + y1f[r, c] = y1 + y1f[c, r] = y1.conj() if ind > 3 else y1 + assert_array_almost_equal(y1f, y2) + + def test_spr2_hpr2(self): + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 3 + A = rand(n, n).astype(dtype) + if ind > 1: + A += rand(n, n)*1j + A = A.astype(dtype) + A = A + A.T if ind < 2 else A + A.conj().T + c, r = tril_indices(n) + Ap = A[r, c] + x = rand(n).astype(dtype) + y = rand(n).astype(dtype) + alpha = dtype(2) + + if ind > 1: + func, = get_blas_funcs(('hpr2',), dtype=dtype) + else: + func, = get_blas_funcs(('spr2',), dtype=dtype) + + u = alpha.conj() * x[:, None].dot(y[None, :].conj()) + y2 = A + u + u.conj().T + y1 = func(n=n, alpha=alpha, x=x, y=y, ap=Ap) + y1f = zeros((3, 3), dtype=dtype) + y1f[r, c] = y1 + y1f[[1, 2, 2], [0, 0, 1]] = y1[[1, 3, 4]].conj() + assert_array_almost_equal(y1f, y2) + + def test_tbmv(self): + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 10 + k = 3 + x = rand(n).astype(dtype) + A = zeros((n, n), dtype=dtype) + # Banded upper triangular array + for sup in range(k+1): + A[arange(n-sup), arange(sup, n)] = rand(n-sup) + + # Add complex parts for c,z + if ind > 1: + A[nonzero(A)] += 1j * rand((k+1)*n-(k*(k+1)//2)).astype(dtype) + + # Form the banded storage + Ab = zeros((k+1, n), dtype=dtype) + for row in range(k+1): + Ab[-row-1, row:] = diag(A, k=row) + func, = get_blas_funcs(('tbmv',), dtype=dtype) + + y1 = func(k=k, a=Ab, x=x) + y2 = A.dot(x) + assert_array_almost_equal(y1, y2) + + y1 = func(k=k, a=Ab, x=x, diag=1) + A[arange(n), arange(n)] = dtype(1) + y2 = A.dot(x) + assert_array_almost_equal(y1, y2) + + y1 = func(k=k, a=Ab, x=x, diag=1, trans=1) + y2 = A.T.dot(x) + assert_array_almost_equal(y1, y2) + + y1 = func(k=k, a=Ab, x=x, diag=1, trans=2) + y2 = A.conj().T.dot(x) + assert_array_almost_equal(y1, y2) + + def test_tbsv(self): + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 6 + k = 3 + x = rand(n).astype(dtype) + A = zeros((n, n), dtype=dtype) + # Banded upper triangular array + for sup in range(k+1): + A[arange(n-sup), arange(sup, n)] = rand(n-sup) + + # Add complex parts for c,z + if ind > 1: + A[nonzero(A)] += 1j * rand((k+1)*n-(k*(k+1)//2)).astype(dtype) + + # Form the banded storage + Ab = zeros((k+1, n), dtype=dtype) + for row in range(k+1): + Ab[-row-1, row:] = diag(A, k=row) + func, = get_blas_funcs(('tbsv',), dtype=dtype) + + y1 = func(k=k, a=Ab, x=x) + y2 = solve(A, x) + assert_array_almost_equal(y1, y2) + + y1 = func(k=k, a=Ab, x=x, diag=1) + A[arange(n), arange(n)] = dtype(1) + y2 = solve(A, x) + assert_array_almost_equal(y1, y2) + + y1 = func(k=k, a=Ab, x=x, diag=1, trans=1) + y2 = solve(A.T, x) + assert_array_almost_equal(y1, y2) + + y1 = func(k=k, a=Ab, x=x, diag=1, trans=2) + y2 = solve(A.conj().T, x) + assert_array_almost_equal(y1, y2) + + def test_tpmv(self): + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 10 + x = rand(n).astype(dtype) + # Upper triangular array + A = triu(rand(n, n)) if ind < 2 else triu(rand(n, n)+rand(n, n)*1j) + # Form the packed storage + c, r = tril_indices(n) + Ap = A[r, c] + func, = get_blas_funcs(('tpmv',), dtype=dtype) + + y1 = func(n=n, ap=Ap, x=x) + y2 = A.dot(x) + assert_array_almost_equal(y1, y2) + + y1 = func(n=n, ap=Ap, x=x, diag=1) + A[arange(n), arange(n)] = dtype(1) + y2 = A.dot(x) + assert_array_almost_equal(y1, y2) + + y1 = func(n=n, ap=Ap, x=x, diag=1, trans=1) + y2 = A.T.dot(x) + assert_array_almost_equal(y1, y2) + + y1 = func(n=n, ap=Ap, x=x, diag=1, trans=2) + y2 = A.conj().T.dot(x) + assert_array_almost_equal(y1, y2) + + def test_tpsv(self): + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 10 + x = rand(n).astype(dtype) + # Upper triangular array + A = triu(rand(n, n)) if ind < 2 else triu(rand(n, n)+rand(n, n)*1j) + A += eye(n) + # Form the packed storage + c, r = tril_indices(n) + Ap = A[r, c] + func, = get_blas_funcs(('tpsv',), dtype=dtype) + + y1 = func(n=n, ap=Ap, x=x) + y2 = solve(A, x) + assert_array_almost_equal(y1, y2) + + y1 = func(n=n, ap=Ap, x=x, diag=1) + A[arange(n), arange(n)] = dtype(1) + y2 = solve(A, x) + assert_array_almost_equal(y1, y2) + + y1 = func(n=n, ap=Ap, x=x, diag=1, trans=1) + y2 = solve(A.T, x) + assert_array_almost_equal(y1, y2) + + y1 = func(n=n, ap=Ap, x=x, diag=1, trans=2) + y2 = solve(A.conj().T, x) + assert_array_almost_equal(y1, y2) + + def test_trmv(self): + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 3 + A = (rand(n, n)+eye(n)).astype(dtype) + x = rand(3).astype(dtype) + func, = get_blas_funcs(('trmv',), dtype=dtype) + + y1 = func(a=A, x=x) + y2 = triu(A).dot(x) + assert_array_almost_equal(y1, y2) + + y1 = func(a=A, x=x, diag=1) + A[arange(n), arange(n)] = dtype(1) + y2 = triu(A).dot(x) + assert_array_almost_equal(y1, y2) + + y1 = func(a=A, x=x, diag=1, trans=1) + y2 = triu(A).T.dot(x) + assert_array_almost_equal(y1, y2) + + y1 = func(a=A, x=x, diag=1, trans=2) + y2 = triu(A).conj().T.dot(x) + assert_array_almost_equal(y1, y2) + + def test_trsv(self): + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 15 + A = (rand(n, n)+eye(n)).astype(dtype) + x = rand(n).astype(dtype) + func, = get_blas_funcs(('trsv',), dtype=dtype) + + y1 = func(a=A, x=x) + y2 = solve(triu(A), x) + assert_array_almost_equal(y1, y2) + + y1 = func(a=A, x=x, lower=1) + y2 = solve(tril(A), x) + assert_array_almost_equal(y1, y2) + + y1 = func(a=A, x=x, diag=1) + A[arange(n), arange(n)] = dtype(1) + y2 = solve(triu(A), x) + assert_array_almost_equal(y1, y2) + + y1 = func(a=A, x=x, diag=1, trans=1) + y2 = solve(triu(A).T, x) + assert_array_almost_equal(y1, y2) + + y1 = func(a=A, x=x, diag=1, trans=2) + y2 = solve(triu(A).conj().T, x) + assert_array_almost_equal(y1, y2) + + +class TestFBLAS3Simple: + + def test_gemm(self): + for p in 'sd': + f = getattr(fblas, p+'gemm', None) + if f is None: + continue + assert_array_almost_equal(f(3, [3], [-4]), [[-36]]) + assert_array_almost_equal(f(3, [3], [-4], 3, [5]), [-21]) + for p in 'cz': + f = getattr(fblas, p+'gemm', None) + if f is None: + continue + assert_array_almost_equal(f(3j, [3-4j], [-4]), [[-48-36j]]) + assert_array_almost_equal(f(3j, [3-4j], [-4], 3, [5j]), [-48-21j]) + + +def _get_func(func, ps='sdzc'): + """Just a helper: return a specified BLAS function w/typecode.""" + for p in ps: + f = getattr(fblas, p+func, None) + if f is None: + continue + yield f + + +class TestBLAS3Symm: + + def setup_method(self): + self.a = np.array([[1., 2.], + [0., 1.]]) + self.b = np.array([[1., 0., 3.], + [0., -1., 2.]]) + self.c = np.ones((2, 3)) + self.t = np.array([[2., -1., 8.], + [3., 0., 9.]]) + + def test_symm(self): + for f in _get_func('symm'): + res = f(a=self.a, b=self.b, c=self.c, alpha=1., beta=1.) + assert_array_almost_equal(res, self.t) + + res = f(a=self.a.T, b=self.b, lower=1, c=self.c, alpha=1., beta=1.) + assert_array_almost_equal(res, self.t) + + res = f(a=self.a, b=self.b.T, side=1, c=self.c.T, + alpha=1., beta=1.) + assert_array_almost_equal(res, self.t.T) + + def test_summ_wrong_side(self): + f = getattr(fblas, 'dsymm', None) + if f is not None: + assert_raises(Exception, f, **{'a': self.a, 'b': self.b, + 'alpha': 1, 'side': 1}) + # `side=1` means C <- B*A, hence shapes of A and B are to be + # compatible. Otherwise, f2py exception is raised + + def test_symm_wrong_uplo(self): + """SYMM only considers the upper/lower part of A. Hence setting + wrong value for `lower` (default is lower=0, meaning upper triangle) + gives a wrong result. + """ + f = getattr(fblas, 'dsymm', None) + if f is not None: + res = f(a=self.a, b=self.b, c=self.c, alpha=1., beta=1.) + assert np.allclose(res, self.t) + + res = f(a=self.a, b=self.b, lower=1, c=self.c, alpha=1., beta=1.) + assert not np.allclose(res, self.t) + + +class TestBLAS3Syrk: + def setup_method(self): + self.a = np.array([[1., 0.], + [0., -2.], + [2., 3.]]) + self.t = np.array([[1., 0., 2.], + [0., 4., -6.], + [2., -6., 13.]]) + self.tt = np.array([[5., 6.], + [6., 13.]]) + + def test_syrk(self): + for f in _get_func('syrk'): + c = f(a=self.a, alpha=1.) + assert_array_almost_equal(np.triu(c), np.triu(self.t)) + + c = f(a=self.a, alpha=1., lower=1) + assert_array_almost_equal(np.tril(c), np.tril(self.t)) + + c0 = np.ones(self.t.shape) + c = f(a=self.a, alpha=1., beta=1., c=c0) + assert_array_almost_equal(np.triu(c), np.triu(self.t+c0)) + + c = f(a=self.a, alpha=1., trans=1) + assert_array_almost_equal(np.triu(c), np.triu(self.tt)) + + # prints '0-th dimension must be fixed to 3 but got 5', + # FIXME: suppress? + # FIXME: how to catch the _fblas.error? + def test_syrk_wrong_c(self): + f = getattr(fblas, 'dsyrk', None) + if f is not None: + assert_raises(Exception, f, **{'a': self.a, 'alpha': 1., + 'c': np.ones((5, 8))}) + # if C is supplied, it must have compatible dimensions + + +class TestBLAS3Syr2k: + def setup_method(self): + self.a = np.array([[1., 0.], + [0., -2.], + [2., 3.]]) + self.b = np.array([[0., 1.], + [1., 0.], + [0, 1.]]) + self.t = np.array([[0., -1., 3.], + [-1., 0., 0.], + [3., 0., 6.]]) + self.tt = np.array([[0., 1.], + [1., 6]]) + + def test_syr2k(self): + for f in _get_func('syr2k'): + c = f(a=self.a, b=self.b, alpha=1.) + assert_array_almost_equal(np.triu(c), np.triu(self.t)) + + c = f(a=self.a, b=self.b, alpha=1., lower=1) + assert_array_almost_equal(np.tril(c), np.tril(self.t)) + + c0 = np.ones(self.t.shape) + c = f(a=self.a, b=self.b, alpha=1., beta=1., c=c0) + assert_array_almost_equal(np.triu(c), np.triu(self.t+c0)) + + c = f(a=self.a, b=self.b, alpha=1., trans=1) + assert_array_almost_equal(np.triu(c), np.triu(self.tt)) + + # prints '0-th dimension must be fixed to 3 but got 5', FIXME: suppress? + def test_syr2k_wrong_c(self): + f = getattr(fblas, 'dsyr2k', None) + if f is not None: + assert_raises(Exception, f, **{'a': self.a, + 'b': self.b, + 'alpha': 1., + 'c': np.zeros((15, 8))}) + # if C is supplied, it must have compatible dimensions + + +class TestSyHe: + """Quick and simple tests for (zc)-symm, syrk, syr2k.""" + + def setup_method(self): + self.sigma_y = np.array([[0., -1.j], + [1.j, 0.]]) + + def test_symm_zc(self): + for f in _get_func('symm', 'zc'): + # NB: a is symmetric w/upper diag of ONLY + res = f(a=self.sigma_y, b=self.sigma_y, alpha=1.) + assert_array_almost_equal(np.triu(res), np.diag([1, -1])) + + def test_hemm_zc(self): + for f in _get_func('hemm', 'zc'): + # NB: a is hermitian w/upper diag of ONLY + res = f(a=self.sigma_y, b=self.sigma_y, alpha=1.) + assert_array_almost_equal(np.triu(res), np.diag([1, 1])) + + def test_syrk_zr(self): + for f in _get_func('syrk', 'zc'): + res = f(a=self.sigma_y, alpha=1.) + assert_array_almost_equal(np.triu(res), np.diag([-1, -1])) + + def test_herk_zr(self): + for f in _get_func('herk', 'zc'): + res = f(a=self.sigma_y, alpha=1.) + assert_array_almost_equal(np.triu(res), np.diag([1, 1])) + + def test_syr2k_zr(self): + for f in _get_func('syr2k', 'zc'): + res = f(a=self.sigma_y, b=self.sigma_y, alpha=1.) + assert_array_almost_equal(np.triu(res), 2.*np.diag([-1, -1])) + + def test_her2k_zr(self): + for f in _get_func('her2k', 'zc'): + res = f(a=self.sigma_y, b=self.sigma_y, alpha=1.) + assert_array_almost_equal(np.triu(res), 2.*np.diag([1, 1])) + + +class TestTRMM: + """Quick and simple tests for dtrmm.""" + + def setup_method(self): + self.a = np.array([[1., 2., ], + [-2., 1.]]) + self.b = np.array([[3., 4., -1.], + [5., 6., -2.]]) + + self.a2 = np.array([[1, 1, 2, 3], + [0, 1, 4, 5], + [0, 0, 1, 6], + [0, 0, 0, 1]], order="f") + self.b2 = np.array([[1, 4], [2, 5], [3, 6], [7, 8], [9, 10]], + order="f") + + @pytest.mark.parametrize("dtype_", DTYPES) + def test_side(self, dtype_): + trmm = get_blas_funcs("trmm", dtype=dtype_) + # Provide large A array that works for side=1 but not 0 (see gh-10841) + assert_raises(Exception, trmm, 1.0, self.a2, self.b2) + res = trmm(1.0, self.a2.astype(dtype_), self.b2.astype(dtype_), + side=1) + k = self.b2.shape[1] + assert_allclose(res, self.b2 @ self.a2[:k, :k], rtol=0., + atol=100*np.finfo(dtype_).eps) + + def test_ab(self): + f = getattr(fblas, 'dtrmm', None) + if f is not None: + result = f(1., self.a, self.b) + # default a is upper triangular + expected = np.array([[13., 16., -5.], + [5., 6., -2.]]) + assert_array_almost_equal(result, expected) + + def test_ab_lower(self): + f = getattr(fblas, 'dtrmm', None) + if f is not None: + result = f(1., self.a, self.b, lower=True) + expected = np.array([[3., 4., -1.], + [-1., -2., 0.]]) # now a is lower triangular + assert_array_almost_equal(result, expected) + + def test_b_overwrites(self): + # BLAS dtrmm modifies B argument in-place. + # Here the default is to copy, but this can be overridden + f = getattr(fblas, 'dtrmm', None) + if f is not None: + for overwr in [True, False]: + bcopy = self.b.copy() + result = f(1., self.a, bcopy, overwrite_b=overwr) + # C-contiguous arrays are copied + assert_(bcopy.flags.f_contiguous is False and + np.may_share_memory(bcopy, result) is False) + assert_equal(bcopy, self.b) + + bcopy = np.asfortranarray(self.b.copy()) # or just transpose it + result = f(1., self.a, bcopy, overwrite_b=True) + assert_(bcopy.flags.f_contiguous is True and + np.may_share_memory(bcopy, result) is True) + assert_array_almost_equal(bcopy, result) + + +def test_trsm(): + seed(1234) + for ind, dtype in enumerate(DTYPES): + tol = np.finfo(dtype).eps*1000 + func, = get_blas_funcs(('trsm',), dtype=dtype) + + # Test protection against size mismatches + A = rand(4, 5).astype(dtype) + B = rand(4, 4).astype(dtype) + alpha = dtype(1) + assert_raises(Exception, func, alpha, A, B) + assert_raises(Exception, func, alpha, A.T, B) + + n = 8 + m = 7 + alpha = dtype(-2.5) + A = (rand(m, m) if ind < 2 else rand(m, m) + rand(m, m)*1j) + eye(m) + A = A.astype(dtype) + Au = triu(A) + Al = tril(A) + B1 = rand(m, n).astype(dtype) + B2 = rand(n, m).astype(dtype) + + x1 = func(alpha=alpha, a=A, b=B1) + assert_equal(B1.shape, x1.shape) + x2 = solve(Au, alpha*B1) + assert_allclose(x1, x2, atol=tol) + + x1 = func(alpha=alpha, a=A, b=B1, trans_a=1) + x2 = solve(Au.T, alpha*B1) + assert_allclose(x1, x2, atol=tol) + + x1 = func(alpha=alpha, a=A, b=B1, trans_a=2) + x2 = solve(Au.conj().T, alpha*B1) + assert_allclose(x1, x2, atol=tol) + + x1 = func(alpha=alpha, a=A, b=B1, diag=1) + Au[arange(m), arange(m)] = dtype(1) + x2 = solve(Au, alpha*B1) + assert_allclose(x1, x2, atol=tol) + + x1 = func(alpha=alpha, a=A, b=B2, diag=1, side=1) + x2 = solve(Au.conj().T, alpha*B2.conj().T) + assert_allclose(x1, x2.conj().T, atol=tol) + + x1 = func(alpha=alpha, a=A, b=B2, diag=1, side=1, lower=1) + Al[arange(m), arange(m)] = dtype(1) + x2 = solve(Al.conj().T, alpha*B2.conj().T) + assert_allclose(x1, x2.conj().T, atol=tol) + + +@pytest.mark.xfail(run=False, + reason="gh-16930") +def test_gh_169309(): + x = np.repeat(10, 9) + actual = scipy.linalg.blas.dnrm2(x, 5, 3, -1) + expected = math.sqrt(500) + assert_allclose(actual, expected) + + +def test_dnrm2_neg_incx(): + # check that dnrm2(..., incx < 0) raises + # XXX: remove the test after the lowest supported BLAS implements + # negative incx (new in LAPACK 3.10) + x = np.repeat(10, 9) + incx = -1 + with assert_raises(fblas.__fblas_error): + scipy.linalg.blas.dnrm2(x, 5, 3, incx) diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_cython_blas.py b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_cython_blas.py new file mode 100644 index 0000000000000000000000000000000000000000..284e214d38ed331cf0493d1e3bba6e1214939b2c --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_cython_blas.py @@ -0,0 +1,118 @@ +import numpy as np +from numpy.testing import (assert_allclose, + assert_equal) +import scipy.linalg.cython_blas as blas + +class TestDGEMM: + + def test_transposes(self): + + a = np.arange(12, dtype='d').reshape((3, 4))[:2,:2] + b = np.arange(1, 13, dtype='d').reshape((4, 3))[:2,:2] + c = np.empty((2, 4))[:2,:2] + + blas._test_dgemm(1., a, b, 0., c) + assert_allclose(c, a.dot(b)) + + blas._test_dgemm(1., a.T, b, 0., c) + assert_allclose(c, a.T.dot(b)) + + blas._test_dgemm(1., a, b.T, 0., c) + assert_allclose(c, a.dot(b.T)) + + blas._test_dgemm(1., a.T, b.T, 0., c) + assert_allclose(c, a.T.dot(b.T)) + + blas._test_dgemm(1., a, b, 0., c.T) + assert_allclose(c, a.dot(b).T) + + blas._test_dgemm(1., a.T, b, 0., c.T) + assert_allclose(c, a.T.dot(b).T) + + blas._test_dgemm(1., a, b.T, 0., c.T) + assert_allclose(c, a.dot(b.T).T) + + blas._test_dgemm(1., a.T, b.T, 0., c.T) + assert_allclose(c, a.T.dot(b.T).T) + + def test_shapes(self): + a = np.arange(6, dtype='d').reshape((3, 2)) + b = np.arange(-6, 2, dtype='d').reshape((2, 4)) + c = np.empty((3, 4)) + + blas._test_dgemm(1., a, b, 0., c) + assert_allclose(c, a.dot(b)) + + blas._test_dgemm(1., b.T, a.T, 0., c.T) + assert_allclose(c, b.T.dot(a.T).T) + +class TestWfuncPointers: + """ Test the function pointers that are expected to fail on + Mac OS X without the additional entry statement in their definitions + in fblas_l1.pyf.src. """ + + def test_complex_args(self): + + cx = np.array([.5 + 1.j, .25 - .375j, 12.5 - 4.j], np.complex64) + cy = np.array([.8 + 2.j, .875 - .625j, -1. + 2.j], np.complex64) + + assert_allclose(blas._test_cdotc(cx, cy), + -17.6468753815+21.3718757629j) + assert_allclose(blas._test_cdotu(cx, cy), + -6.11562538147+30.3156242371j) + + assert_equal(blas._test_icamax(cx), 3) + + assert_allclose(blas._test_scasum(cx), 18.625) + assert_allclose(blas._test_scnrm2(cx), 13.1796483994) + + assert_allclose(blas._test_cdotc(cx[::2], cy[::2]), + -18.1000003815+21.2000007629j) + assert_allclose(blas._test_cdotu(cx[::2], cy[::2]), + -6.10000038147+30.7999992371j) + assert_allclose(blas._test_scasum(cx[::2]), 18.) + assert_allclose(blas._test_scnrm2(cx[::2]), 13.1719398499) + + def test_double_args(self): + + x = np.array([5., -3, -.5], np.float64) + y = np.array([2, 1, .5], np.float64) + + assert_allclose(blas._test_dasum(x), 8.5) + assert_allclose(blas._test_ddot(x, y), 6.75) + assert_allclose(blas._test_dnrm2(x), 5.85234975815) + + assert_allclose(blas._test_dasum(x[::2]), 5.5) + assert_allclose(blas._test_ddot(x[::2], y[::2]), 9.75) + assert_allclose(blas._test_dnrm2(x[::2]), 5.0249376297) + + assert_equal(blas._test_idamax(x), 1) + + def test_float_args(self): + + x = np.array([5., -3, -.5], np.float32) + y = np.array([2, 1, .5], np.float32) + + assert_equal(blas._test_isamax(x), 1) + + assert_allclose(blas._test_sasum(x), 8.5) + assert_allclose(blas._test_sdot(x, y), 6.75) + assert_allclose(blas._test_snrm2(x), 5.85234975815) + + assert_allclose(blas._test_sasum(x[::2]), 5.5) + assert_allclose(blas._test_sdot(x[::2], y[::2]), 9.75) + assert_allclose(blas._test_snrm2(x[::2]), 5.0249376297) + + def test_double_complex_args(self): + + cx = np.array([.5 + 1.j, .25 - .375j, 13. - 4.j], np.complex128) + cy = np.array([.875 + 2.j, .875 - .625j, -1. + 2.j], np.complex128) + + assert_equal(blas._test_izamax(cx), 3) + + assert_allclose(blas._test_zdotc(cx, cy), -18.109375+22.296875j) + assert_allclose(blas._test_zdotu(cx, cy), -6.578125+31.390625j) + + assert_allclose(blas._test_zdotc(cx[::2], cy[::2]), -18.5625+22.125j) + assert_allclose(blas._test_zdotu(cx[::2], cy[::2]), -6.5625+31.875j) + diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_cython_lapack.py b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_cython_lapack.py new file mode 100644 index 0000000000000000000000000000000000000000..2a4e7b34b62042efdb0ce0f8ee61ce0189320995 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_cython_lapack.py @@ -0,0 +1,22 @@ +from numpy.testing import assert_allclose +from scipy.linalg import cython_lapack as cython_lapack +from scipy.linalg import lapack + + +class TestLamch: + + def test_slamch(self): + for c in [b'e', b's', b'b', b'p', b'n', b'r', b'm', b'u', b'l', b'o']: + assert_allclose(cython_lapack._test_slamch(c), + lapack.slamch(c)) + + def test_dlamch(self): + for c in [b'e', b's', b'b', b'p', b'n', b'r', b'm', b'u', b'l', b'o']: + assert_allclose(cython_lapack._test_dlamch(c), + lapack.dlamch(c)) + + def test_complex_ladiv(self): + cx = .5 + 1.j + cy = .875 + 2.j + assert_allclose(cython_lapack._test_zladiv(cy, cx), 1.95+0.1j) + assert_allclose(cython_lapack._test_cladiv(cy, cx), 1.95+0.1j) diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_cythonized_array_utils.py b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_cythonized_array_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..19a0b39e28274d74e1bfcbe86807c03ec0159643 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_cythonized_array_utils.py @@ -0,0 +1,121 @@ +import numpy as np +from scipy.linalg import bandwidth, issymmetric, ishermitian +import pytest +from pytest import raises + + +def test_bandwidth_dtypes(): + n = 5 + for t in np.typecodes['All']: + A = np.zeros([n, n], dtype=t) + if t in 'eUVOMm': + raises(TypeError, bandwidth, A) + elif t == 'G': # No-op test. On win these pass on others fail. + pass + else: + _ = bandwidth(A) + + +def test_bandwidth_non2d_input(): + A = np.array([1, 2, 3]) + raises(ValueError, bandwidth, A) + A = np.array([[[1, 2, 3], [4, 5, 6]]]) + raises(ValueError, bandwidth, A) + + +@pytest.mark.parametrize('T', [x for x in np.typecodes['All'] + if x not in 'eGUVOMm']) +def test_bandwidth_square_inputs(T): + n = 20 + k = 4 + R = np.zeros([n, n], dtype=T, order='F') + # form a banded matrix inplace + R[[x for x in range(n)], [x for x in range(n)]] = 1 + R[[x for x in range(n-k)], [x for x in range(k, n)]] = 1 + R[[x for x in range(1, n)], [x for x in range(n-1)]] = 1 + R[[x for x in range(k, n)], [x for x in range(n-k)]] = 1 + assert bandwidth(R) == (k, k) + + +@pytest.mark.parametrize('T', [x for x in np.typecodes['All'] + if x not in 'eGUVOMm']) +def test_bandwidth_rect_inputs(T): + n, m = 10, 20 + k = 5 + R = np.zeros([n, m], dtype=T, order='F') + # form a banded matrix inplace + R[[x for x in range(n)], [x for x in range(n)]] = 1 + R[[x for x in range(n-k)], [x for x in range(k, n)]] = 1 + R[[x for x in range(1, n)], [x for x in range(n-1)]] = 1 + R[[x for x in range(k, n)], [x for x in range(n-k)]] = 1 + assert bandwidth(R) == (k, k) + + +def test_issymetric_ishermitian_dtypes(): + n = 5 + for t in np.typecodes['All']: + A = np.zeros([n, n], dtype=t) + if t in 'eUVOMm': + raises(TypeError, issymmetric, A) + raises(TypeError, ishermitian, A) + elif t == 'G': # No-op test. On win these pass on others fail. + pass + else: + assert issymmetric(A) + assert ishermitian(A) + + +def test_issymmetric_ishermitian_invalid_input(): + A = np.array([1, 2, 3]) + raises(ValueError, issymmetric, A) + raises(ValueError, ishermitian, A) + A = np.array([[[1, 2, 3], [4, 5, 6]]]) + raises(ValueError, issymmetric, A) + raises(ValueError, ishermitian, A) + A = np.array([[1, 2, 3], [4, 5, 6]]) + raises(ValueError, issymmetric, A) + raises(ValueError, ishermitian, A) + + +def test_issymetric_complex_decimals(): + A = np.arange(1, 10).astype(complex).reshape(3, 3) + A += np.arange(-4, 5).astype(complex).reshape(3, 3)*1j + # make entries decimal + A /= np.pi + A = A + A.T + assert issymmetric(A) + + +def test_ishermitian_complex_decimals(): + A = np.arange(1, 10).astype(complex).reshape(3, 3) + A += np.arange(-4, 5).astype(complex).reshape(3, 3)*1j + # make entries decimal + A /= np.pi + A = A + A.T.conj() + assert ishermitian(A) + + +def test_issymmetric_approximate_results(): + n = 20 + rng = np.random.RandomState(123456789) + x = rng.uniform(high=5., size=[n, n]) + y = x @ x.T # symmetric + p = rng.standard_normal([n, n]) + z = p @ y @ p.T + assert issymmetric(z, atol=1e-10) + assert issymmetric(z, atol=1e-10, rtol=0.) + assert issymmetric(z, atol=0., rtol=1e-12) + assert issymmetric(z, atol=1e-13, rtol=1e-12) + + +def test_ishermitian_approximate_results(): + n = 20 + rng = np.random.RandomState(987654321) + x = rng.uniform(high=5., size=[n, n]) + y = x @ x.T # symmetric + p = rng.standard_normal([n, n]) + rng.standard_normal([n, n])*1j + z = p @ y @ p.conj().T + assert ishermitian(z, atol=1e-10) + assert ishermitian(z, atol=1e-10, rtol=0.) + assert ishermitian(z, atol=0., rtol=1e-12) + assert ishermitian(z, atol=1e-13, rtol=1e-12) diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_decomp.py b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_decomp.py new file mode 100644 index 0000000000000000000000000000000000000000..8722b31468202a3d3f7c8a17acc922d3d0fa8015 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_decomp.py @@ -0,0 +1,2794 @@ +import itertools +import platform +import sys + +import numpy as np +from numpy.testing import (assert_equal, assert_almost_equal, + assert_array_almost_equal, assert_array_equal, + assert_, assert_allclose) + +import pytest +from pytest import raises as assert_raises + +from scipy.linalg import (eig, eigvals, lu, svd, svdvals, cholesky, qr, + schur, rsf2csf, lu_solve, lu_factor, solve, diagsvd, + hessenberg, rq, eig_banded, eigvals_banded, eigh, + eigvalsh, qr_multiply, qz, orth, ordqz, + subspace_angles, hadamard, eigvalsh_tridiagonal, + eigh_tridiagonal, null_space, cdf2rdf, LinAlgError) + +from scipy.linalg.lapack import (dgbtrf, dgbtrs, zgbtrf, zgbtrs, dsbev, + dsbevd, dsbevx, zhbevd, zhbevx) + +from scipy.linalg._misc import norm +from scipy.linalg._decomp_qz import _select_function +from scipy.stats import ortho_group + +from numpy import (array, diag, full, linalg, argsort, zeros, arange, + float32, complex64, ravel, sqrt, iscomplex, shape, sort, + sign, asarray, isfinite, ndarray, eye,) + +from scipy.linalg._testutils import assert_no_overwrite +from scipy.sparse._sputils import matrix + +from scipy._lib._testutils import check_free_memory +from scipy.linalg.blas import HAS_ILP64 +try: + from scipy.__config__ import CONFIG +except ImportError: + CONFIG = None + + +def _random_hermitian_matrix(n, posdef=False, dtype=float): + "Generate random sym/hermitian array of the given size n" + if dtype in COMPLEX_DTYPES: + A = np.random.rand(n, n) + np.random.rand(n, n)*1.0j + A = (A + A.conj().T)/2 + else: + A = np.random.rand(n, n) + A = (A + A.T)/2 + + if posdef: + A += sqrt(2*n)*np.eye(n) + + return A.astype(dtype) + + +REAL_DTYPES = [np.float32, np.float64] +COMPLEX_DTYPES = [np.complex64, np.complex128] +DTYPES = REAL_DTYPES + COMPLEX_DTYPES + + +# XXX: This function should not be defined here, but somewhere in +# scipy.linalg namespace +def symrand(dim_or_eigv, rng): + """Return a random symmetric (Hermitian) matrix. + + If 'dim_or_eigv' is an integer N, return a NxN matrix, with eigenvalues + uniformly distributed on (-1,1). + + If 'dim_or_eigv' is 1-D real array 'a', return a matrix whose + eigenvalues are 'a'. + """ + if isinstance(dim_or_eigv, int): + dim = dim_or_eigv + d = rng.random(dim)*2 - 1 + elif (isinstance(dim_or_eigv, ndarray) and + len(dim_or_eigv.shape) == 1): + dim = dim_or_eigv.shape[0] + d = dim_or_eigv + else: + raise TypeError("input type not supported.") + + v = ortho_group.rvs(dim) + h = v.T.conj() @ diag(d) @ v + # to avoid roundoff errors, symmetrize the matrix (again) + h = 0.5*(h.T+h) + return h + + +class TestEigVals: + + def test_simple(self): + a = [[1, 2, 3], [1, 2, 3], [2, 5, 6]] + w = eigvals(a) + exact_w = [(9+sqrt(93))/2, 0, (9-sqrt(93))/2] + assert_array_almost_equal(w, exact_w) + + def test_simple_tr(self): + a = array([[1, 2, 3], [1, 2, 3], [2, 5, 6]], 'd').T + a = a.copy() + a = a.T + w = eigvals(a) + exact_w = [(9+sqrt(93))/2, 0, (9-sqrt(93))/2] + assert_array_almost_equal(w, exact_w) + + def test_simple_complex(self): + a = [[1, 2, 3], [1, 2, 3], [2, 5, 6+1j]] + w = eigvals(a) + exact_w = [(9+1j+sqrt(92+6j))/2, + 0, + (9+1j-sqrt(92+6j))/2] + assert_array_almost_equal(w, exact_w) + + def test_finite(self): + a = [[1, 2, 3], [1, 2, 3], [2, 5, 6]] + w = eigvals(a, check_finite=False) + exact_w = [(9+sqrt(93))/2, 0, (9-sqrt(93))/2] + assert_array_almost_equal(w, exact_w) + + +class TestEig: + + def test_simple(self): + a = array([[1, 2, 3], [1, 2, 3], [2, 5, 6]]) + w, v = eig(a) + exact_w = [(9+sqrt(93))/2, 0, (9-sqrt(93))/2] + v0 = array([1, 1, (1+sqrt(93)/3)/2]) + v1 = array([3., 0, -1]) + v2 = array([1, 1, (1-sqrt(93)/3)/2]) + v0 = v0 / norm(v0) + v1 = v1 / norm(v1) + v2 = v2 / norm(v2) + assert_array_almost_equal(w, exact_w) + assert_array_almost_equal(v0, v[:, 0]*sign(v[0, 0])) + assert_array_almost_equal(v1, v[:, 1]*sign(v[0, 1])) + assert_array_almost_equal(v2, v[:, 2]*sign(v[0, 2])) + for i in range(3): + assert_array_almost_equal(a @ v[:, i], w[i]*v[:, i]) + w, v = eig(a, left=1, right=0) + for i in range(3): + assert_array_almost_equal(a.T @ v[:, i], w[i]*v[:, i]) + + def test_simple_complex_eig(self): + a = array([[1, 2], [-2, 1]]) + w, vl, vr = eig(a, left=1, right=1) + assert_array_almost_equal(w, array([1+2j, 1-2j])) + for i in range(2): + assert_array_almost_equal(a @ vr[:, i], w[i]*vr[:, i]) + for i in range(2): + assert_array_almost_equal(a.conj().T @ vl[:, i], + w[i].conj()*vl[:, i]) + + def test_simple_complex(self): + a = array([[1, 2, 3], [1, 2, 3], [2, 5, 6+1j]]) + w, vl, vr = eig(a, left=1, right=1) + for i in range(3): + assert_array_almost_equal(a @ vr[:, i], w[i]*vr[:, i]) + for i in range(3): + assert_array_almost_equal(a.conj().T @ vl[:, i], + w[i].conj()*vl[:, i]) + + def test_gh_3054(self): + a = [[1]] + b = [[0]] + w, vr = eig(a, b, homogeneous_eigvals=True) + assert_allclose(w[1, 0], 0) + assert_(w[0, 0] != 0) + assert_allclose(vr, 1) + + w, vr = eig(a, b) + assert_equal(w, np.inf) + assert_allclose(vr, 1) + + def _check_gen_eig(self, A, B, atol_homog=1e-13, rtol_homog=1e-13): + if B is not None: + A, B = asarray(A), asarray(B) + B0 = B + else: + A = asarray(A) + B0 = B + B = np.eye(*A.shape) + msg = f"\n{A!r}\n{B!r}" + + # Eigenvalues in homogeneous coordinates + w, vr = eig(A, B0, homogeneous_eigvals=True) + wt = eigvals(A, B0, homogeneous_eigvals=True) + val1 = A @ vr * w[1, :] + val2 = B @ vr * w[0, :] + for i in range(val1.shape[1]): + assert_allclose(val1[:, i], val2[:, i], + rtol=rtol_homog, atol=atol_homog, err_msg=msg) + + if B0 is None: + assert_allclose(w[1, :], 1) + assert_allclose(wt[1, :], 1) + + perm = np.lexsort(w) + permt = np.lexsort(wt) + assert_allclose(w[:, perm], wt[:, permt], atol=1e-7, rtol=1e-7, + err_msg=msg) + + length = np.empty(len(vr)) + + for i in range(len(vr)): + length[i] = norm(vr[:, i]) + + assert_allclose(length, np.ones(length.size), err_msg=msg, + atol=1e-7, rtol=1e-7) + + # Convert homogeneous coordinates + beta_nonzero = (w[1, :] != 0) + wh = w[0, beta_nonzero] / w[1, beta_nonzero] + + # Eigenvalues in standard coordinates + w, vr = eig(A, B0) + wt = eigvals(A, B0) + val1 = A @ vr + val2 = B @ vr * w + res = val1 - val2 + for i in range(res.shape[1]): + if np.all(isfinite(res[:, i])): + assert_allclose(res[:, i], 0, + rtol=1e-13, atol=1e-13, err_msg=msg) + + # try to consistently order eigenvalues, including complex conjugate pairs + w_fin = w[isfinite(w)] + wt_fin = wt[isfinite(wt)] + + # prune noise in the real parts + w_fin = -1j * np.real_if_close(1j*w_fin, tol=1e-10) + wt_fin = -1j * np.real_if_close(1j*wt_fin, tol=1e-10) + + perm = argsort(w_fin) + permt = argsort(wt_fin) + + assert_allclose(w_fin[perm], wt_fin[permt], + atol=1e-7, rtol=1e-7, err_msg=msg) + + length = np.empty(len(vr)) + for i in range(len(vr)): + length[i] = norm(vr[:, i]) + assert_allclose(length, np.ones(length.size), err_msg=msg) + + # Compare homogeneous and nonhomogeneous versions + assert_allclose(sort(wh), sort(w[np.isfinite(w)])) + + def test_singular(self): + # Example taken from + # https://web.archive.org/web/20040903121217/http://www.cs.umu.se/research/nla/singular_pairs/guptri/matlab.html + A = array([[22, 34, 31, 31, 17], + [45, 45, 42, 19, 29], + [39, 47, 49, 26, 34], + [27, 31, 26, 21, 15], + [38, 44, 44, 24, 30]]) + B = array([[13, 26, 25, 17, 24], + [31, 46, 40, 26, 37], + [26, 40, 19, 25, 25], + [16, 25, 27, 14, 23], + [24, 35, 18, 21, 22]]) + + with np.errstate(all='ignore'): + self._check_gen_eig(A, B, atol_homog=5e-13) + + def test_falker(self): + # Test matrices giving some Nan generalized eigenvalues. + M = diag(array([1, 0, 3])) + K = array(([2, -1, -1], [-1, 2, -1], [-1, -1, 2])) + D = array(([1, -1, 0], [-1, 1, 0], [0, 0, 0])) + Z = zeros((3, 3)) + I3 = eye(3) + A = np.block([[I3, Z], [Z, -K]]) + B = np.block([[Z, I3], [M, D]]) + + with np.errstate(all='ignore'): + self._check_gen_eig(A, B) + + def test_bad_geneig(self): + # Ticket #709 (strange return values from DGGEV) + + def matrices(omega): + c1 = -9 + omega**2 + c2 = 2*omega + A = [[1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, c1, 0], + [0, 0, 0, c1]] + B = [[0, 0, 1, 0], + [0, 0, 0, 1], + [1, 0, 0, -c2], + [0, 1, c2, 0]] + return A, B + + # With a buggy LAPACK, this can fail for different omega on different + # machines -- so we need to test several values + with np.errstate(all='ignore'): + for k in range(100): + A, B = matrices(omega=k*5./100) + self._check_gen_eig(A, B) + + def test_make_eigvals(self): + # Step through all paths in _make_eigvals + # Real eigenvalues + rng = np.random.RandomState(1234) + A = symrand(3, rng) + self._check_gen_eig(A, None) + B = symrand(3, rng) + self._check_gen_eig(A, B) + # Complex eigenvalues + A = rng.random((3, 3)) + 1j*rng.random((3, 3)) + self._check_gen_eig(A, None) + B = rng.random((3, 3)) + 1j*rng.random((3, 3)) + self._check_gen_eig(A, B) + + def test_check_finite(self): + a = [[1, 2, 3], [1, 2, 3], [2, 5, 6]] + w, v = eig(a, check_finite=False) + exact_w = [(9+sqrt(93))/2, 0, (9-sqrt(93))/2] + v0 = array([1, 1, (1+sqrt(93)/3)/2]) + v1 = array([3., 0, -1]) + v2 = array([1, 1, (1-sqrt(93)/3)/2]) + v0 = v0 / norm(v0) + v1 = v1 / norm(v1) + v2 = v2 / norm(v2) + assert_array_almost_equal(w, exact_w) + assert_array_almost_equal(v0, v[:, 0]*sign(v[0, 0])) + assert_array_almost_equal(v1, v[:, 1]*sign(v[0, 1])) + assert_array_almost_equal(v2, v[:, 2]*sign(v[0, 2])) + for i in range(3): + assert_array_almost_equal(a @ v[:, i], w[i]*v[:, i]) + + def test_not_square_error(self): + """Check that passing a non-square array raises a ValueError.""" + A = np.arange(6).reshape(3, 2) + assert_raises(ValueError, eig, A) + + def test_shape_mismatch(self): + """Check that passing arrays of with different shapes + raises a ValueError.""" + A = eye(2) + B = np.arange(9.0).reshape(3, 3) + assert_raises(ValueError, eig, A, B) + assert_raises(ValueError, eig, B, A) + + def test_gh_11577(self): + # https://github.com/scipy/scipy/issues/11577 + # `A - lambda B` should have 4 and 8 among the eigenvalues, and this + # was apparently broken on some platforms + A = np.array([[12.0, 28.0, 76.0, 220.0], + [16.0, 32.0, 80.0, 224.0], + [24.0, 40.0, 88.0, 232.0], + [40.0, 56.0, 104.0, 248.0]], dtype='float64') + B = np.array([[2.0, 4.0, 10.0, 28.0], + [3.0, 5.0, 11.0, 29.0], + [5.0, 7.0, 13.0, 31.0], + [9.0, 11.0, 17.0, 35.0]], dtype='float64') + + D, V = eig(A, B) + + # The problem is ill-conditioned, and two other eigenvalues + # depend on ATLAS/OpenBLAS version, compiler version etc + # see gh-11577 for discussion + # + # NB: it is tempting to use `assert_allclose(D[:2], [4, 8])` instead but + # the ordering of eigenvalues also comes out different on different + # systems depending on who knows what. + with np.testing.suppress_warnings() as sup: + # isclose chokes on inf/nan values + sup.filter(RuntimeWarning, "invalid value encountered in multiply") + assert np.isclose(D, 4.0, atol=1e-14).any() + assert np.isclose(D, 8.0, atol=1e-14).any() + + +class TestEigBanded: + def setup_method(self): + self.create_bandmat() + + def create_bandmat(self): + """Create the full matrix `self.fullmat` and + the corresponding band matrix `self.bandmat`.""" + N = 10 + self.KL = 2 # number of subdiagonals (below the diagonal) + self.KU = 2 # number of superdiagonals (above the diagonal) + + # symmetric band matrix + self.sym_mat = (diag(full(N, 1.0)) + + diag(full(N-1, -1.0), -1) + diag(full(N-1, -1.0), 1) + + diag(full(N-2, -2.0), -2) + diag(full(N-2, -2.0), 2)) + + # hermitian band matrix + self.herm_mat = (diag(full(N, -1.0)) + + 1j*diag(full(N-1, 1.0), -1) + - 1j*diag(full(N-1, 1.0), 1) + + diag(full(N-2, -2.0), -2) + + diag(full(N-2, -2.0), 2)) + + # general real band matrix + self.real_mat = (diag(full(N, 1.0)) + + diag(full(N-1, -1.0), -1) + diag(full(N-1, -3.0), 1) + + diag(full(N-2, 2.0), -2) + diag(full(N-2, -2.0), 2)) + + # general complex band matrix + self.comp_mat = (1j*diag(full(N, 1.0)) + + diag(full(N-1, -1.0), -1) + + 1j*diag(full(N-1, -3.0), 1) + + diag(full(N-2, 2.0), -2) + + diag(full(N-2, -2.0), 2)) + + # Eigenvalues and -vectors from linalg.eig + ew, ev = linalg.eig(self.sym_mat) + ew = ew.real + args = argsort(ew) + self.w_sym_lin = ew[args] + self.evec_sym_lin = ev[:, args] + + ew, ev = linalg.eig(self.herm_mat) + ew = ew.real + args = argsort(ew) + self.w_herm_lin = ew[args] + self.evec_herm_lin = ev[:, args] + + # Extract upper bands from symmetric and hermitian band matrices + # (for use in dsbevd, dsbevx, zhbevd, zhbevx + # and their single precision versions) + LDAB = self.KU + 1 + self.bandmat_sym = zeros((LDAB, N), dtype=float) + self.bandmat_herm = zeros((LDAB, N), dtype=complex) + for i in range(LDAB): + self.bandmat_sym[LDAB-i-1, i:N] = diag(self.sym_mat, i) + self.bandmat_herm[LDAB-i-1, i:N] = diag(self.herm_mat, i) + + # Extract bands from general real and complex band matrix + # (for use in dgbtrf, dgbtrs and their single precision versions) + LDAB = 2*self.KL + self.KU + 1 + self.bandmat_real = zeros((LDAB, N), dtype=float) + self.bandmat_real[2*self.KL, :] = diag(self.real_mat) # diagonal + for i in range(self.KL): + # superdiagonals + self.bandmat_real[2*self.KL-1-i, i+1:N] = diag(self.real_mat, i+1) + # subdiagonals + self.bandmat_real[2*self.KL+1+i, 0:N-1-i] = diag(self.real_mat, + -i-1) + + self.bandmat_comp = zeros((LDAB, N), dtype=complex) + self.bandmat_comp[2*self.KL, :] = diag(self.comp_mat) # diagonal + for i in range(self.KL): + # superdiagonals + self.bandmat_comp[2*self.KL-1-i, i+1:N] = diag(self.comp_mat, i+1) + # subdiagonals + self.bandmat_comp[2*self.KL+1+i, 0:N-1-i] = diag(self.comp_mat, + -i-1) + + # absolute value for linear equation system A*x = b + self.b = 1.0*arange(N) + self.bc = self.b * (1 + 1j) + + ##################################################################### + + def test_dsbev(self): + """Compare dsbev eigenvalues and eigenvectors with + the result of linalg.eig.""" + w, evec, info = dsbev(self.bandmat_sym, compute_v=1) + evec_ = evec[:, argsort(w)] + assert_array_almost_equal(sort(w), self.w_sym_lin) + assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin)) + + def test_dsbevd(self): + """Compare dsbevd eigenvalues and eigenvectors with + the result of linalg.eig.""" + w, evec, info = dsbevd(self.bandmat_sym, compute_v=1) + evec_ = evec[:, argsort(w)] + assert_array_almost_equal(sort(w), self.w_sym_lin) + assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin)) + + def test_dsbevx(self): + """Compare dsbevx eigenvalues and eigenvectors + with the result of linalg.eig.""" + N, N = shape(self.sym_mat) + # Achtung: Argumente 0.0,0.0,range? + w, evec, num, ifail, info = dsbevx(self.bandmat_sym, 0.0, 0.0, 1, N, + compute_v=1, range=2) + evec_ = evec[:, argsort(w)] + assert_array_almost_equal(sort(w), self.w_sym_lin) + assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin)) + + def test_zhbevd(self): + """Compare zhbevd eigenvalues and eigenvectors + with the result of linalg.eig.""" + w, evec, info = zhbevd(self.bandmat_herm, compute_v=1) + evec_ = evec[:, argsort(w)] + assert_array_almost_equal(sort(w), self.w_herm_lin) + assert_array_almost_equal(abs(evec_), abs(self.evec_herm_lin)) + + def test_zhbevx(self): + """Compare zhbevx eigenvalues and eigenvectors + with the result of linalg.eig.""" + N, N = shape(self.herm_mat) + # Achtung: Argumente 0.0,0.0,range? + w, evec, num, ifail, info = zhbevx(self.bandmat_herm, 0.0, 0.0, 1, N, + compute_v=1, range=2) + evec_ = evec[:, argsort(w)] + assert_array_almost_equal(sort(w), self.w_herm_lin) + assert_array_almost_equal(abs(evec_), abs(self.evec_herm_lin)) + + def test_eigvals_banded(self): + """Compare eigenvalues of eigvals_banded with those of linalg.eig.""" + w_sym = eigvals_banded(self.bandmat_sym) + w_sym = w_sym.real + assert_array_almost_equal(sort(w_sym), self.w_sym_lin) + + w_herm = eigvals_banded(self.bandmat_herm) + w_herm = w_herm.real + assert_array_almost_equal(sort(w_herm), self.w_herm_lin) + + # extracting eigenvalues with respect to an index range + ind1 = 2 + ind2 = np.longlong(6) + w_sym_ind = eigvals_banded(self.bandmat_sym, + select='i', select_range=(ind1, ind2)) + assert_array_almost_equal(sort(w_sym_ind), + self.w_sym_lin[ind1:ind2+1]) + w_herm_ind = eigvals_banded(self.bandmat_herm, + select='i', select_range=(ind1, ind2)) + assert_array_almost_equal(sort(w_herm_ind), + self.w_herm_lin[ind1:ind2+1]) + + # extracting eigenvalues with respect to a value range + v_lower = self.w_sym_lin[ind1] - 1.0e-5 + v_upper = self.w_sym_lin[ind2] + 1.0e-5 + w_sym_val = eigvals_banded(self.bandmat_sym, + select='v', select_range=(v_lower, v_upper)) + assert_array_almost_equal(sort(w_sym_val), + self.w_sym_lin[ind1:ind2+1]) + + v_lower = self.w_herm_lin[ind1] - 1.0e-5 + v_upper = self.w_herm_lin[ind2] + 1.0e-5 + w_herm_val = eigvals_banded(self.bandmat_herm, + select='v', + select_range=(v_lower, v_upper)) + assert_array_almost_equal(sort(w_herm_val), + self.w_herm_lin[ind1:ind2+1]) + + w_sym = eigvals_banded(self.bandmat_sym, check_finite=False) + w_sym = w_sym.real + assert_array_almost_equal(sort(w_sym), self.w_sym_lin) + + def test_eig_banded(self): + """Compare eigenvalues and eigenvectors of eig_banded + with those of linalg.eig. """ + w_sym, evec_sym = eig_banded(self.bandmat_sym) + evec_sym_ = evec_sym[:, argsort(w_sym.real)] + assert_array_almost_equal(sort(w_sym), self.w_sym_lin) + assert_array_almost_equal(abs(evec_sym_), abs(self.evec_sym_lin)) + + w_herm, evec_herm = eig_banded(self.bandmat_herm) + evec_herm_ = evec_herm[:, argsort(w_herm.real)] + assert_array_almost_equal(sort(w_herm), self.w_herm_lin) + assert_array_almost_equal(abs(evec_herm_), abs(self.evec_herm_lin)) + + # extracting eigenvalues with respect to an index range + ind1 = 2 + ind2 = 6 + w_sym_ind, evec_sym_ind = eig_banded(self.bandmat_sym, + select='i', + select_range=(ind1, ind2)) + assert_array_almost_equal(sort(w_sym_ind), + self.w_sym_lin[ind1:ind2+1]) + assert_array_almost_equal(abs(evec_sym_ind), + abs(self.evec_sym_lin[:, ind1:ind2+1])) + + w_herm_ind, evec_herm_ind = eig_banded(self.bandmat_herm, + select='i', + select_range=(ind1, ind2)) + assert_array_almost_equal(sort(w_herm_ind), + self.w_herm_lin[ind1:ind2+1]) + assert_array_almost_equal(abs(evec_herm_ind), + abs(self.evec_herm_lin[:, ind1:ind2+1])) + + # extracting eigenvalues with respect to a value range + v_lower = self.w_sym_lin[ind1] - 1.0e-5 + v_upper = self.w_sym_lin[ind2] + 1.0e-5 + w_sym_val, evec_sym_val = eig_banded(self.bandmat_sym, + select='v', + select_range=(v_lower, v_upper)) + assert_array_almost_equal(sort(w_sym_val), + self.w_sym_lin[ind1:ind2+1]) + assert_array_almost_equal(abs(evec_sym_val), + abs(self.evec_sym_lin[:, ind1:ind2+1])) + + v_lower = self.w_herm_lin[ind1] - 1.0e-5 + v_upper = self.w_herm_lin[ind2] + 1.0e-5 + w_herm_val, evec_herm_val = eig_banded(self.bandmat_herm, + select='v', + select_range=(v_lower, v_upper)) + assert_array_almost_equal(sort(w_herm_val), + self.w_herm_lin[ind1:ind2+1]) + assert_array_almost_equal(abs(evec_herm_val), + abs(self.evec_herm_lin[:, ind1:ind2+1])) + + w_sym, evec_sym = eig_banded(self.bandmat_sym, check_finite=False) + evec_sym_ = evec_sym[:, argsort(w_sym.real)] + assert_array_almost_equal(sort(w_sym), self.w_sym_lin) + assert_array_almost_equal(abs(evec_sym_), abs(self.evec_sym_lin)) + + def test_dgbtrf(self): + """Compare dgbtrf LU factorisation with the LU factorisation result + of linalg.lu.""" + M, N = shape(self.real_mat) + lu_symm_band, ipiv, info = dgbtrf(self.bandmat_real, self.KL, self.KU) + + # extract matrix u from lu_symm_band + u = diag(lu_symm_band[2*self.KL, :]) + for i in range(self.KL + self.KU): + u += diag(lu_symm_band[2*self.KL-1-i, i+1:N], i+1) + + p_lin, l_lin, u_lin = lu(self.real_mat, permute_l=0) + assert_array_almost_equal(u, u_lin) + + def test_zgbtrf(self): + """Compare zgbtrf LU factorisation with the LU factorisation result + of linalg.lu.""" + M, N = shape(self.comp_mat) + lu_symm_band, ipiv, info = zgbtrf(self.bandmat_comp, self.KL, self.KU) + + # extract matrix u from lu_symm_band + u = diag(lu_symm_band[2*self.KL, :]) + for i in range(self.KL + self.KU): + u += diag(lu_symm_band[2*self.KL-1-i, i+1:N], i+1) + + p_lin, l_lin, u_lin = lu(self.comp_mat, permute_l=0) + assert_array_almost_equal(u, u_lin) + + def test_dgbtrs(self): + """Compare dgbtrs solutions for linear equation system A*x = b + with solutions of linalg.solve.""" + + lu_symm_band, ipiv, info = dgbtrf(self.bandmat_real, self.KL, self.KU) + y, info = dgbtrs(lu_symm_band, self.KL, self.KU, self.b, ipiv) + + y_lin = linalg.solve(self.real_mat, self.b) + assert_array_almost_equal(y, y_lin) + + def test_zgbtrs(self): + """Compare zgbtrs solutions for linear equation system A*x = b + with solutions of linalg.solve.""" + + lu_symm_band, ipiv, info = zgbtrf(self.bandmat_comp, self.KL, self.KU) + y, info = zgbtrs(lu_symm_band, self.KL, self.KU, self.bc, ipiv) + + y_lin = linalg.solve(self.comp_mat, self.bc) + assert_array_almost_equal(y, y_lin) + + +class TestEigTridiagonal: + def setup_method(self): + self.create_trimat() + + def create_trimat(self): + """Create the full matrix `self.fullmat`, `self.d`, and `self.e`.""" + N = 10 + + # symmetric band matrix + self.d = full(N, 1.0) + self.e = full(N-1, -1.0) + self.full_mat = (diag(self.d) + diag(self.e, -1) + diag(self.e, 1)) + + ew, ev = linalg.eig(self.full_mat) + ew = ew.real + args = argsort(ew) + self.w = ew[args] + self.evec = ev[:, args] + + def test_degenerate(self): + """Test error conditions.""" + # Wrong sizes + assert_raises(ValueError, eigvalsh_tridiagonal, self.d, self.e[:-1]) + # Must be real + assert_raises(TypeError, eigvalsh_tridiagonal, self.d, self.e * 1j) + # Bad driver + assert_raises(TypeError, eigvalsh_tridiagonal, self.d, self.e, + lapack_driver=1.) + assert_raises(ValueError, eigvalsh_tridiagonal, self.d, self.e, + lapack_driver='foo') + # Bad bounds + assert_raises(ValueError, eigvalsh_tridiagonal, self.d, self.e, + select='i', select_range=(0, -1)) + + def test_eigvalsh_tridiagonal(self): + """Compare eigenvalues of eigvalsh_tridiagonal with those of eig.""" + # can't use ?STERF with subselection + for driver in ('sterf', 'stev', 'stebz', 'stemr', 'auto'): + w = eigvalsh_tridiagonal(self.d, self.e, lapack_driver=driver) + assert_array_almost_equal(sort(w), self.w) + + for driver in ('sterf', 'stev'): + assert_raises(ValueError, eigvalsh_tridiagonal, self.d, self.e, + lapack_driver='stev', select='i', + select_range=(0, 1)) + for driver in ('stebz', 'stemr', 'auto'): + # extracting eigenvalues with respect to the full index range + w_ind = eigvalsh_tridiagonal( + self.d, self.e, select='i', select_range=(0, len(self.d)-1), + lapack_driver=driver) + assert_array_almost_equal(sort(w_ind), self.w) + + # extracting eigenvalues with respect to an index range + ind1 = 2 + ind2 = 6 + w_ind = eigvalsh_tridiagonal( + self.d, self.e, select='i', select_range=(ind1, ind2), + lapack_driver=driver) + assert_array_almost_equal(sort(w_ind), self.w[ind1:ind2+1]) + + # extracting eigenvalues with respect to a value range + v_lower = self.w[ind1] - 1.0e-5 + v_upper = self.w[ind2] + 1.0e-5 + w_val = eigvalsh_tridiagonal( + self.d, self.e, select='v', select_range=(v_lower, v_upper), + lapack_driver=driver) + assert_array_almost_equal(sort(w_val), self.w[ind1:ind2+1]) + + def test_eigh_tridiagonal(self): + """Compare eigenvalues and eigenvectors of eigh_tridiagonal + with those of eig. """ + # can't use ?STERF when eigenvectors are requested + assert_raises(ValueError, eigh_tridiagonal, self.d, self.e, + lapack_driver='sterf') + for driver in ('stebz', 'stev', 'stemr', 'auto'): + w, evec = eigh_tridiagonal(self.d, self.e, lapack_driver=driver) + evec_ = evec[:, argsort(w)] + assert_array_almost_equal(sort(w), self.w) + assert_array_almost_equal(abs(evec_), abs(self.evec)) + + assert_raises(ValueError, eigh_tridiagonal, self.d, self.e, + lapack_driver='stev', select='i', select_range=(0, 1)) + for driver in ('stebz', 'stemr', 'auto'): + # extracting eigenvalues with respect to an index range + ind1 = 0 + ind2 = len(self.d)-1 + w, evec = eigh_tridiagonal( + self.d, self.e, select='i', select_range=(ind1, ind2), + lapack_driver=driver) + assert_array_almost_equal(sort(w), self.w) + assert_array_almost_equal(abs(evec), abs(self.evec)) + ind1 = 2 + ind2 = 6 + w, evec = eigh_tridiagonal( + self.d, self.e, select='i', select_range=(ind1, ind2), + lapack_driver=driver) + assert_array_almost_equal(sort(w), self.w[ind1:ind2+1]) + assert_array_almost_equal(abs(evec), + abs(self.evec[:, ind1:ind2+1])) + + # extracting eigenvalues with respect to a value range + v_lower = self.w[ind1] - 1.0e-5 + v_upper = self.w[ind2] + 1.0e-5 + w, evec = eigh_tridiagonal( + self.d, self.e, select='v', select_range=(v_lower, v_upper), + lapack_driver=driver) + assert_array_almost_equal(sort(w), self.w[ind1:ind2+1]) + assert_array_almost_equal(abs(evec), + abs(self.evec[:, ind1:ind2+1])) + + def test_eigh_tridiagonal_1x1(self): + """See gh-20075""" + a = np.array([-2.0]) + b = np.array([]) + x = eigh_tridiagonal(a, b, eigvals_only=True) + assert x.ndim == 1 + assert_allclose(x, a) + x, V = eigh_tridiagonal(a, b, select="i", select_range=(0, 0)) + assert x.ndim == 1 + assert V.ndim == 2 + assert_allclose(x, a) + assert_allclose(V, array([[1.]])) + + x, V = eigh_tridiagonal(a, b, select="v", select_range=(-2, 0)) + assert x.size == 0 + assert x.shape == (0,) + assert V.shape == (1, 0) + + +class TestEigh: + def setup_class(self): + np.random.seed(1234) + + def test_wrong_inputs(self): + # Nonsquare a + assert_raises(ValueError, eigh, np.ones([1, 2])) + # Nonsquare b + assert_raises(ValueError, eigh, np.ones([2, 2]), np.ones([2, 1])) + # Incompatible a, b sizes + assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([2, 2])) + # Wrong type parameter for generalized problem + assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]), + type=4) + # Both value and index subsets requested + assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]), + subset_by_value=[1, 2], subset_by_index=[2, 4]) + with np.testing.suppress_warnings() as sup: + sup.filter(DeprecationWarning, "Keyword argument 'eigvals") + assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]), + subset_by_value=[1, 2], eigvals=[2, 4]) + # Invalid upper index spec + assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]), + subset_by_index=[0, 4]) + with np.testing.suppress_warnings() as sup: + sup.filter(DeprecationWarning, "Keyword argument 'eigvals") + assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]), + eigvals=[0, 4]) + # Invalid lower index + assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]), + subset_by_index=[-2, 2]) + with np.testing.suppress_warnings() as sup: + sup.filter(DeprecationWarning, "Keyword argument 'eigvals") + assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]), + eigvals=[-2, 2]) + # Invalid index spec #2 + assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]), + subset_by_index=[2, 0]) + with np.testing.suppress_warnings() as sup: + sup.filter(DeprecationWarning, "Keyword argument 'eigvals") + assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]), + subset_by_index=[2, 0]) + # Invalid value spec + assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]), + subset_by_value=[2, 0]) + # Invalid driver name + assert_raises(ValueError, eigh, np.ones([2, 2]), driver='wrong') + # Generalized driver selection without b + assert_raises(ValueError, eigh, np.ones([3, 3]), None, driver='gvx') + # Standard driver with b + assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]), + driver='evr') + # Subset request from invalid driver + assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]), + driver='gvd', subset_by_index=[1, 2]) + assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]), + driver='gvd', subset_by_index=[1, 2]) + + def test_nonpositive_b(self): + assert_raises(LinAlgError, eigh, np.ones([3, 3]), np.ones([3, 3])) + + # index based subsets are done in the legacy test_eigh() + def test_value_subsets(self): + for ind, dt in enumerate(DTYPES): + + a = _random_hermitian_matrix(20, dtype=dt) + w, v = eigh(a, subset_by_value=[-2, 2]) + assert_equal(v.shape[1], len(w)) + assert all((w > -2) & (w < 2)) + + b = _random_hermitian_matrix(20, posdef=True, dtype=dt) + w, v = eigh(a, b, subset_by_value=[-2, 2]) + assert_equal(v.shape[1], len(w)) + assert all((w > -2) & (w < 2)) + + def test_eigh_integer(self): + a = array([[1, 2], [2, 7]]) + b = array([[3, 1], [1, 5]]) + w, z = eigh(a) + w, z = eigh(a, b) + + def test_eigh_of_sparse(self): + # This tests the rejection of inputs that eigh cannot currently handle. + import scipy.sparse + a = scipy.sparse.identity(2).tocsc() + b = np.atleast_2d(a) + assert_raises(ValueError, eigh, a) + assert_raises(ValueError, eigh, b) + + @pytest.mark.parametrize('dtype_', DTYPES) + @pytest.mark.parametrize('driver', ("ev", "evd", "evr", "evx")) + def test_various_drivers_standard(self, driver, dtype_): + a = _random_hermitian_matrix(n=20, dtype=dtype_) + w, v = eigh(a, driver=driver) + assert_allclose(a @ v - (v * w), 0., + atol=1000*np.finfo(dtype_).eps, + rtol=0.) + + @pytest.mark.parametrize('type', (1, 2, 3)) + @pytest.mark.parametrize('driver', ("gv", "gvd", "gvx")) + def test_various_drivers_generalized(self, driver, type): + atol = np.spacing(5000.) + a = _random_hermitian_matrix(20) + b = _random_hermitian_matrix(20, posdef=True) + w, v = eigh(a=a, b=b, driver=driver, type=type) + if type == 1: + assert_allclose(a @ v - w*(b @ v), 0., atol=atol, rtol=0.) + elif type == 2: + assert_allclose(a @ b @ v - v * w, 0., atol=atol, rtol=0.) + else: + assert_allclose(b @ a @ v - v * w, 0., atol=atol, rtol=0.) + + def test_eigvalsh_new_args(self): + a = _random_hermitian_matrix(5) + w = eigvalsh(a, subset_by_index=[1, 2]) + assert_equal(len(w), 2) + + w2 = eigvalsh(a, subset_by_index=[1, 2]) + assert_equal(len(w2), 2) + assert_allclose(w, w2) + + b = np.diag([1, 1.2, 1.3, 1.5, 2]) + w3 = eigvalsh(b, subset_by_value=[1, 1.4]) + assert_equal(len(w3), 2) + assert_allclose(w3, np.array([1.2, 1.3])) + + @pytest.mark.parametrize("method", [eigh, eigvalsh]) + def test_deprecation_warnings(self, method): + with pytest.warns(DeprecationWarning, + match="Keyword argument 'turbo'"): + method(np.zeros((2, 2)), turbo=True) + with pytest.warns(DeprecationWarning, + match="Keyword argument 'eigvals'"): + method(np.zeros((2, 2)), eigvals=[0, 1]) + with pytest.deprecated_call(match="use keyword arguments"): + method(np.zeros((2,2)), np.eye(2, 2), True) + + def test_deprecation_results(self): + a = _random_hermitian_matrix(3) + b = _random_hermitian_matrix(3, posdef=True) + + # check turbo gives same result as driver='gvd' + with np.testing.suppress_warnings() as sup: + sup.filter(DeprecationWarning, "Keyword argument 'turbo'") + w_dep, v_dep = eigh(a, b, turbo=True) + w, v = eigh(a, b, driver='gvd') + assert_allclose(w_dep, w) + assert_allclose(v_dep, v) + + # check eigvals gives the same result as subset_by_index + with np.testing.suppress_warnings() as sup: + sup.filter(DeprecationWarning, "Keyword argument 'eigvals'") + w_dep, v_dep = eigh(a, eigvals=[0, 1]) + w, v = eigh(a, subset_by_index=[0, 1]) + assert_allclose(w_dep, w) + assert_allclose(v_dep, v) + + +class TestSVD_GESDD: + lapack_driver = 'gesdd' + + def test_degenerate(self): + assert_raises(TypeError, svd, [[1.]], lapack_driver=1.) + assert_raises(ValueError, svd, [[1.]], lapack_driver='foo') + + def test_simple(self): + a = [[1, 2, 3], [1, 20, 3], [2, 5, 6]] + for full_matrices in (True, False): + u, s, vh = svd(a, full_matrices=full_matrices, + lapack_driver=self.lapack_driver) + assert_array_almost_equal(u.T @ u, eye(3)) + assert_array_almost_equal(vh.T @ vh, eye(3)) + sigma = zeros((u.shape[0], vh.shape[0]), s.dtype.char) + for i in range(len(s)): + sigma[i, i] = s[i] + assert_array_almost_equal(u @ sigma @ vh, a) + + def test_simple_singular(self): + a = [[1, 2, 3], [1, 2, 3], [2, 5, 6]] + for full_matrices in (True, False): + u, s, vh = svd(a, full_matrices=full_matrices, + lapack_driver=self.lapack_driver) + assert_array_almost_equal(u.T @ u, eye(3)) + assert_array_almost_equal(vh.T @ vh, eye(3)) + sigma = zeros((u.shape[0], vh.shape[0]), s.dtype.char) + for i in range(len(s)): + sigma[i, i] = s[i] + assert_array_almost_equal(u @ sigma @ vh, a) + + def test_simple_underdet(self): + a = [[1, 2, 3], [4, 5, 6]] + for full_matrices in (True, False): + u, s, vh = svd(a, full_matrices=full_matrices, + lapack_driver=self.lapack_driver) + assert_array_almost_equal(u.T @ u, eye(u.shape[0])) + sigma = zeros((u.shape[0], vh.shape[0]), s.dtype.char) + for i in range(len(s)): + sigma[i, i] = s[i] + assert_array_almost_equal(u @ sigma @ vh, a) + + def test_simple_overdet(self): + a = [[1, 2], [4, 5], [3, 4]] + for full_matrices in (True, False): + u, s, vh = svd(a, full_matrices=full_matrices, + lapack_driver=self.lapack_driver) + assert_array_almost_equal(u.T @ u, eye(u.shape[1])) + assert_array_almost_equal(vh.T @ vh, eye(2)) + sigma = zeros((u.shape[1], vh.shape[0]), s.dtype.char) + for i in range(len(s)): + sigma[i, i] = s[i] + assert_array_almost_equal(u @ sigma @ vh, a) + + def test_random(self): + rng = np.random.RandomState(1234) + n = 20 + m = 15 + for i in range(3): + for a in [rng.random([n, m]), rng.random([m, n])]: + for full_matrices in (True, False): + u, s, vh = svd(a, full_matrices=full_matrices, + lapack_driver=self.lapack_driver) + assert_array_almost_equal(u.T @ u, eye(u.shape[1])) + assert_array_almost_equal(vh @ vh.T, eye(vh.shape[0])) + sigma = zeros((u.shape[1], vh.shape[0]), s.dtype.char) + for i in range(len(s)): + sigma[i, i] = s[i] + assert_array_almost_equal(u @ sigma @ vh, a) + + def test_simple_complex(self): + a = [[1, 2, 3], [1, 2j, 3], [2, 5, 6]] + for full_matrices in (True, False): + u, s, vh = svd(a, full_matrices=full_matrices, + lapack_driver=self.lapack_driver) + assert_array_almost_equal(u.conj().T @ u, eye(u.shape[1])) + assert_array_almost_equal(vh.conj().T @ vh, eye(vh.shape[0])) + sigma = zeros((u.shape[0], vh.shape[0]), s.dtype.char) + for i in range(len(s)): + sigma[i, i] = s[i] + assert_array_almost_equal(u @ sigma @ vh, a) + + def test_random_complex(self): + rng = np.random.RandomState(1234) + n = 20 + m = 15 + for i in range(3): + for full_matrices in (True, False): + for a in [rng.random([n, m]), rng.random([m, n])]: + a = a + 1j*rng.random(list(a.shape)) + u, s, vh = svd(a, full_matrices=full_matrices, + lapack_driver=self.lapack_driver) + assert_array_almost_equal(u.conj().T @ u, + eye(u.shape[1])) + # This fails when [m,n] + # assert_array_almost_equal(vh.conj().T @ vh, + # eye(len(vh),dtype=vh.dtype.char)) + sigma = zeros((u.shape[1], vh.shape[0]), s.dtype.char) + for i in range(len(s)): + sigma[i, i] = s[i] + assert_array_almost_equal(u @ sigma @ vh, a) + + def test_crash_1580(self): + rng = np.random.RandomState(1234) + sizes = [(13, 23), (30, 50), (60, 100)] + for sz in sizes: + for dt in [np.float32, np.float64, np.complex64, np.complex128]: + a = rng.rand(*sz).astype(dt) + # should not crash + svd(a, lapack_driver=self.lapack_driver) + + def test_check_finite(self): + a = [[1, 2, 3], [1, 20, 3], [2, 5, 6]] + u, s, vh = svd(a, check_finite=False, lapack_driver=self.lapack_driver) + assert_array_almost_equal(u.T @ u, eye(3)) + assert_array_almost_equal(vh.T @ vh, eye(3)) + sigma = zeros((u.shape[0], vh.shape[0]), s.dtype.char) + for i in range(len(s)): + sigma[i, i] = s[i] + assert_array_almost_equal(u @ sigma @ vh, a) + + def test_gh_5039(self): + # This is a smoke test for https://github.com/scipy/scipy/issues/5039 + # + # The following is reported to raise "ValueError: On entry to DGESDD + # parameter number 12 had an illegal value". + # `interp1d([1,2,3,4], [1,2,3,4], kind='cubic')` + # This is reported to only show up on LAPACK 3.0.3. + # + # The matrix below is taken from the call to + # `B = _fitpack._bsplmat(order, xk)` in interpolate._find_smoothest + b = np.array( + [[0.16666667, 0.66666667, 0.16666667, 0., 0., 0.], + [0., 0.16666667, 0.66666667, 0.16666667, 0., 0.], + [0., 0., 0.16666667, 0.66666667, 0.16666667, 0.], + [0., 0., 0., 0.16666667, 0.66666667, 0.16666667]]) + svd(b, lapack_driver=self.lapack_driver) + + @pytest.mark.skipif(not HAS_ILP64, reason="64-bit LAPACK required") + @pytest.mark.slow + def test_large_matrix(self): + check_free_memory(free_mb=17000) + A = np.zeros([1, 2**31], dtype=np.float32) + A[0, -1] = 1 + u, s, vh = svd(A, full_matrices=False) + assert_allclose(s[0], 1.0) + assert_allclose(u[0, 0] * vh[0, -1], 1.0) + + +class TestSVD_GESVD(TestSVD_GESDD): + lapack_driver = 'gesvd' + + +def test_svd_gesdd_nofegfault(): + # svd(a) with {U,VT}.size > INT_MAX does not segfault + # cf https://github.com/scipy/scipy/issues/14001 + df=np.ones((4799, 53130), dtype=np.float64) + with assert_raises(ValueError): + svd(df) + + +class TestSVDVals: + + def test_empty(self): + for a in [[]], np.empty((2, 0)), np.ones((0, 3)): + s = svdvals(a) + assert_equal(s, np.empty(0)) + + def test_simple(self): + a = [[1, 2, 3], [1, 2, 3], [2, 5, 6]] + s = svdvals(a) + assert_(len(s) == 3) + assert_(s[0] >= s[1] >= s[2]) + + def test_simple_underdet(self): + a = [[1, 2, 3], [4, 5, 6]] + s = svdvals(a) + assert_(len(s) == 2) + assert_(s[0] >= s[1]) + + def test_simple_overdet(self): + a = [[1, 2], [4, 5], [3, 4]] + s = svdvals(a) + assert_(len(s) == 2) + assert_(s[0] >= s[1]) + + def test_simple_complex(self): + a = [[1, 2, 3], [1, 20, 3j], [2, 5, 6]] + s = svdvals(a) + assert_(len(s) == 3) + assert_(s[0] >= s[1] >= s[2]) + + def test_simple_underdet_complex(self): + a = [[1, 2, 3], [4, 5j, 6]] + s = svdvals(a) + assert_(len(s) == 2) + assert_(s[0] >= s[1]) + + def test_simple_overdet_complex(self): + a = [[1, 2], [4, 5], [3j, 4]] + s = svdvals(a) + assert_(len(s) == 2) + assert_(s[0] >= s[1]) + + def test_check_finite(self): + a = [[1, 2, 3], [1, 2, 3], [2, 5, 6]] + s = svdvals(a, check_finite=False) + assert_(len(s) == 3) + assert_(s[0] >= s[1] >= s[2]) + + @pytest.mark.slow + def test_crash_2609(self): + np.random.seed(1234) + a = np.random.rand(1500, 2800) + # Shouldn't crash: + svdvals(a) + + +class TestDiagSVD: + + def test_simple(self): + assert_array_almost_equal(diagsvd([1, 0, 0], 3, 3), + [[1, 0, 0], [0, 0, 0], [0, 0, 0]]) + + +class TestQR: + def test_simple(self): + a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]] + q, r = qr(a) + assert_array_almost_equal(q.T @ q, eye(3)) + assert_array_almost_equal(q @ r, a) + + def test_simple_left(self): + a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]] + q, r = qr(a) + c = [1, 2, 3] + qc, r2 = qr_multiply(a, c, "left") + assert_array_almost_equal(q @ c, qc) + assert_array_almost_equal(r, r2) + qc, r2 = qr_multiply(a, eye(3), "left") + assert_array_almost_equal(q, qc) + + def test_simple_right(self): + a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]] + q, r = qr(a) + c = [1, 2, 3] + qc, r2 = qr_multiply(a, c) + assert_array_almost_equal(c @ q, qc) + assert_array_almost_equal(r, r2) + qc, r = qr_multiply(a, eye(3)) + assert_array_almost_equal(q, qc) + + def test_simple_pivoting(self): + a = np.asarray([[8, 2, 3], [2, 9, 3], [5, 3, 6]]) + q, r, p = qr(a, pivoting=True) + d = abs(diag(r)) + assert_(np.all(d[1:] <= d[:-1])) + assert_array_almost_equal(q.T @ q, eye(3)) + assert_array_almost_equal(q @ r, a[:, p]) + q2, r2 = qr(a[:, p]) + assert_array_almost_equal(q, q2) + assert_array_almost_equal(r, r2) + + def test_simple_left_pivoting(self): + a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]] + q, r, jpvt = qr(a, pivoting=True) + c = [1, 2, 3] + qc, r, jpvt = qr_multiply(a, c, "left", True) + assert_array_almost_equal(q @ c, qc) + + def test_simple_right_pivoting(self): + a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]] + q, r, jpvt = qr(a, pivoting=True) + c = [1, 2, 3] + qc, r, jpvt = qr_multiply(a, c, pivoting=True) + assert_array_almost_equal(c @ q, qc) + + def test_simple_trap(self): + a = [[8, 2, 3], [2, 9, 3]] + q, r = qr(a) + assert_array_almost_equal(q.T @ q, eye(2)) + assert_array_almost_equal(q @ r, a) + + def test_simple_trap_pivoting(self): + a = np.asarray([[8, 2, 3], [2, 9, 3]]) + q, r, p = qr(a, pivoting=True) + d = abs(diag(r)) + assert_(np.all(d[1:] <= d[:-1])) + assert_array_almost_equal(q.T @ q, eye(2)) + assert_array_almost_equal(q @ r, a[:, p]) + q2, r2 = qr(a[:, p]) + assert_array_almost_equal(q, q2) + assert_array_almost_equal(r, r2) + + def test_simple_tall(self): + # full version + a = [[8, 2], [2, 9], [5, 3]] + q, r = qr(a) + assert_array_almost_equal(q.T @ q, eye(3)) + assert_array_almost_equal(q @ r, a) + + def test_simple_tall_pivoting(self): + # full version pivoting + a = np.asarray([[8, 2], [2, 9], [5, 3]]) + q, r, p = qr(a, pivoting=True) + d = abs(diag(r)) + assert_(np.all(d[1:] <= d[:-1])) + assert_array_almost_equal(q.T @ q, eye(3)) + assert_array_almost_equal(q @ r, a[:, p]) + q2, r2 = qr(a[:, p]) + assert_array_almost_equal(q, q2) + assert_array_almost_equal(r, r2) + + def test_simple_tall_e(self): + # economy version + a = [[8, 2], [2, 9], [5, 3]] + q, r = qr(a, mode='economic') + assert_array_almost_equal(q.T @ q, eye(2)) + assert_array_almost_equal(q @ r, a) + assert_equal(q.shape, (3, 2)) + assert_equal(r.shape, (2, 2)) + + def test_simple_tall_e_pivoting(self): + # economy version pivoting + a = np.asarray([[8, 2], [2, 9], [5, 3]]) + q, r, p = qr(a, pivoting=True, mode='economic') + d = abs(diag(r)) + assert_(np.all(d[1:] <= d[:-1])) + assert_array_almost_equal(q.T @ q, eye(2)) + assert_array_almost_equal(q @ r, a[:, p]) + q2, r2 = qr(a[:, p], mode='economic') + assert_array_almost_equal(q, q2) + assert_array_almost_equal(r, r2) + + def test_simple_tall_left(self): + a = [[8, 2], [2, 9], [5, 3]] + q, r = qr(a, mode="economic") + c = [1, 2] + qc, r2 = qr_multiply(a, c, "left") + assert_array_almost_equal(q @ c, qc) + assert_array_almost_equal(r, r2) + c = array([1, 2, 0]) + qc, r2 = qr_multiply(a, c, "left", overwrite_c=True) + assert_array_almost_equal(q @ c[:2], qc) + qc, r = qr_multiply(a, eye(2), "left") + assert_array_almost_equal(qc, q) + + def test_simple_tall_left_pivoting(self): + a = [[8, 2], [2, 9], [5, 3]] + q, r, jpvt = qr(a, mode="economic", pivoting=True) + c = [1, 2] + qc, r, kpvt = qr_multiply(a, c, "left", True) + assert_array_equal(jpvt, kpvt) + assert_array_almost_equal(q @ c, qc) + qc, r, jpvt = qr_multiply(a, eye(2), "left", True) + assert_array_almost_equal(qc, q) + + def test_simple_tall_right(self): + a = [[8, 2], [2, 9], [5, 3]] + q, r = qr(a, mode="economic") + c = [1, 2, 3] + cq, r2 = qr_multiply(a, c) + assert_array_almost_equal(c @ q, cq) + assert_array_almost_equal(r, r2) + cq, r = qr_multiply(a, eye(3)) + assert_array_almost_equal(cq, q) + + def test_simple_tall_right_pivoting(self): + a = [[8, 2], [2, 9], [5, 3]] + q, r, jpvt = qr(a, pivoting=True, mode="economic") + c = [1, 2, 3] + cq, r, jpvt = qr_multiply(a, c, pivoting=True) + assert_array_almost_equal(c @ q, cq) + cq, r, jpvt = qr_multiply(a, eye(3), pivoting=True) + assert_array_almost_equal(cq, q) + + def test_simple_fat(self): + # full version + a = [[8, 2, 5], [2, 9, 3]] + q, r = qr(a) + assert_array_almost_equal(q.T @ q, eye(2)) + assert_array_almost_equal(q @ r, a) + assert_equal(q.shape, (2, 2)) + assert_equal(r.shape, (2, 3)) + + def test_simple_fat_pivoting(self): + # full version pivoting + a = np.asarray([[8, 2, 5], [2, 9, 3]]) + q, r, p = qr(a, pivoting=True) + d = abs(diag(r)) + assert_(np.all(d[1:] <= d[:-1])) + assert_array_almost_equal(q.T @ q, eye(2)) + assert_array_almost_equal(q @ r, a[:, p]) + assert_equal(q.shape, (2, 2)) + assert_equal(r.shape, (2, 3)) + q2, r2 = qr(a[:, p]) + assert_array_almost_equal(q, q2) + assert_array_almost_equal(r, r2) + + def test_simple_fat_e(self): + # economy version + a = [[8, 2, 3], [2, 9, 5]] + q, r = qr(a, mode='economic') + assert_array_almost_equal(q.T @ q, eye(2)) + assert_array_almost_equal(q @ r, a) + assert_equal(q.shape, (2, 2)) + assert_equal(r.shape, (2, 3)) + + def test_simple_fat_e_pivoting(self): + # economy version pivoting + a = np.asarray([[8, 2, 3], [2, 9, 5]]) + q, r, p = qr(a, pivoting=True, mode='economic') + d = abs(diag(r)) + assert_(np.all(d[1:] <= d[:-1])) + assert_array_almost_equal(q.T @ q, eye(2)) + assert_array_almost_equal(q @ r, a[:, p]) + assert_equal(q.shape, (2, 2)) + assert_equal(r.shape, (2, 3)) + q2, r2 = qr(a[:, p], mode='economic') + assert_array_almost_equal(q, q2) + assert_array_almost_equal(r, r2) + + def test_simple_fat_left(self): + a = [[8, 2, 3], [2, 9, 5]] + q, r = qr(a, mode="economic") + c = [1, 2] + qc, r2 = qr_multiply(a, c, "left") + assert_array_almost_equal(q @ c, qc) + assert_array_almost_equal(r, r2) + qc, r = qr_multiply(a, eye(2), "left") + assert_array_almost_equal(qc, q) + + def test_simple_fat_left_pivoting(self): + a = [[8, 2, 3], [2, 9, 5]] + q, r, jpvt = qr(a, mode="economic", pivoting=True) + c = [1, 2] + qc, r, jpvt = qr_multiply(a, c, "left", True) + assert_array_almost_equal(q @ c, qc) + qc, r, jpvt = qr_multiply(a, eye(2), "left", True) + assert_array_almost_equal(qc, q) + + def test_simple_fat_right(self): + a = [[8, 2, 3], [2, 9, 5]] + q, r = qr(a, mode="economic") + c = [1, 2] + cq, r2 = qr_multiply(a, c) + assert_array_almost_equal(c @ q, cq) + assert_array_almost_equal(r, r2) + cq, r = qr_multiply(a, eye(2)) + assert_array_almost_equal(cq, q) + + def test_simple_fat_right_pivoting(self): + a = [[8, 2, 3], [2, 9, 5]] + q, r, jpvt = qr(a, pivoting=True, mode="economic") + c = [1, 2] + cq, r, jpvt = qr_multiply(a, c, pivoting=True) + assert_array_almost_equal(c @ q, cq) + cq, r, jpvt = qr_multiply(a, eye(2), pivoting=True) + assert_array_almost_equal(cq, q) + + def test_simple_complex(self): + a = [[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]] + q, r = qr(a) + assert_array_almost_equal(q.conj().T @ q, eye(3)) + assert_array_almost_equal(q @ r, a) + + def test_simple_complex_left(self): + a = [[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]] + q, r = qr(a) + c = [1, 2, 3+4j] + qc, r = qr_multiply(a, c, "left") + assert_array_almost_equal(q @ c, qc) + qc, r = qr_multiply(a, eye(3), "left") + assert_array_almost_equal(q, qc) + + def test_simple_complex_right(self): + a = [[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]] + q, r = qr(a) + c = [1, 2, 3+4j] + qc, r = qr_multiply(a, c) + assert_array_almost_equal(c @ q, qc) + qc, r = qr_multiply(a, eye(3)) + assert_array_almost_equal(q, qc) + + def test_simple_tall_complex_left(self): + a = [[8, 2+3j], [2, 9], [5+7j, 3]] + q, r = qr(a, mode="economic") + c = [1, 2+2j] + qc, r2 = qr_multiply(a, c, "left") + assert_array_almost_equal(q @ c, qc) + assert_array_almost_equal(r, r2) + c = array([1, 2, 0]) + qc, r2 = qr_multiply(a, c, "left", overwrite_c=True) + assert_array_almost_equal(q @ c[:2], qc) + qc, r = qr_multiply(a, eye(2), "left") + assert_array_almost_equal(qc, q) + + def test_simple_complex_left_conjugate(self): + a = [[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]] + q, r = qr(a) + c = [1, 2, 3+4j] + qc, r = qr_multiply(a, c, "left", conjugate=True) + assert_array_almost_equal(q.conj() @ c, qc) + + def test_simple_complex_tall_left_conjugate(self): + a = [[3, 3+4j], [5, 2+2j], [3, 2]] + q, r = qr(a, mode='economic') + c = [1, 3+4j] + qc, r = qr_multiply(a, c, "left", conjugate=True) + assert_array_almost_equal(q.conj() @ c, qc) + + def test_simple_complex_right_conjugate(self): + a = [[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]] + q, r = qr(a) + c = np.array([1, 2, 3+4j]) + qc, r = qr_multiply(a, c, conjugate=True) + assert_array_almost_equal(c @ q.conj(), qc) + + def test_simple_complex_pivoting(self): + a = array([[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]]) + q, r, p = qr(a, pivoting=True) + d = abs(diag(r)) + assert_(np.all(d[1:] <= d[:-1])) + assert_array_almost_equal(q.conj().T @ q, eye(3)) + assert_array_almost_equal(q @ r, a[:, p]) + q2, r2 = qr(a[:, p]) + assert_array_almost_equal(q, q2) + assert_array_almost_equal(r, r2) + + def test_simple_complex_left_pivoting(self): + a = array([[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]]) + q, r, jpvt = qr(a, pivoting=True) + c = [1, 2, 3+4j] + qc, r, jpvt = qr_multiply(a, c, "left", True) + assert_array_almost_equal(q @ c, qc) + + def test_simple_complex_right_pivoting(self): + a = array([[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]]) + q, r, jpvt = qr(a, pivoting=True) + c = [1, 2, 3+4j] + qc, r, jpvt = qr_multiply(a, c, pivoting=True) + assert_array_almost_equal(c @ q, qc) + + def test_random(self): + rng = np.random.RandomState(1234) + n = 20 + for k in range(2): + a = rng.random([n, n]) + q, r = qr(a) + assert_array_almost_equal(q.T @ q, eye(n)) + assert_array_almost_equal(q @ r, a) + + def test_random_left(self): + rng = np.random.RandomState(1234) + n = 20 + for k in range(2): + a = rng.random([n, n]) + q, r = qr(a) + c = rng.random([n]) + qc, r = qr_multiply(a, c, "left") + assert_array_almost_equal(q @ c, qc) + qc, r = qr_multiply(a, eye(n), "left") + assert_array_almost_equal(q, qc) + + def test_random_right(self): + rng = np.random.RandomState(1234) + n = 20 + for k in range(2): + a = rng.random([n, n]) + q, r = qr(a) + c = rng.random([n]) + cq, r = qr_multiply(a, c) + assert_array_almost_equal(c @ q, cq) + cq, r = qr_multiply(a, eye(n)) + assert_array_almost_equal(q, cq) + + def test_random_pivoting(self): + rng = np.random.RandomState(1234) + n = 20 + for k in range(2): + a = rng.random([n, n]) + q, r, p = qr(a, pivoting=True) + d = abs(diag(r)) + assert_(np.all(d[1:] <= d[:-1])) + assert_array_almost_equal(q.T @ q, eye(n)) + assert_array_almost_equal(q @ r, a[:, p]) + q2, r2 = qr(a[:, p]) + assert_array_almost_equal(q, q2) + assert_array_almost_equal(r, r2) + + def test_random_tall(self): + rng = np.random.RandomState(1234) + # full version + m = 200 + n = 100 + for k in range(2): + a = rng.random([m, n]) + q, r = qr(a) + assert_array_almost_equal(q.T @ q, eye(m)) + assert_array_almost_equal(q @ r, a) + + def test_random_tall_left(self): + rng = np.random.RandomState(1234) + # full version + m = 200 + n = 100 + for k in range(2): + a = rng.random([m, n]) + q, r = qr(a, mode="economic") + c = rng.random([n]) + qc, r = qr_multiply(a, c, "left") + assert_array_almost_equal(q @ c, qc) + qc, r = qr_multiply(a, eye(n), "left") + assert_array_almost_equal(qc, q) + + def test_random_tall_right(self): + rng = np.random.RandomState(1234) + # full version + m = 200 + n = 100 + for k in range(2): + a = rng.random([m, n]) + q, r = qr(a, mode="economic") + c = rng.random([m]) + cq, r = qr_multiply(a, c) + assert_array_almost_equal(c @ q, cq) + cq, r = qr_multiply(a, eye(m)) + assert_array_almost_equal(cq, q) + + def test_random_tall_pivoting(self): + rng = np.random.RandomState(1234) + # full version pivoting + m = 200 + n = 100 + for k in range(2): + a = rng.random([m, n]) + q, r, p = qr(a, pivoting=True) + d = abs(diag(r)) + assert_(np.all(d[1:] <= d[:-1])) + assert_array_almost_equal(q.T @ q, eye(m)) + assert_array_almost_equal(q @ r, a[:, p]) + q2, r2 = qr(a[:, p]) + assert_array_almost_equal(q, q2) + assert_array_almost_equal(r, r2) + + def test_random_tall_e(self): + rng = np.random.RandomState(1234) + # economy version + m = 200 + n = 100 + for k in range(2): + a = rng.random([m, n]) + q, r = qr(a, mode='economic') + assert_array_almost_equal(q.T @ q, eye(n)) + assert_array_almost_equal(q @ r, a) + assert_equal(q.shape, (m, n)) + assert_equal(r.shape, (n, n)) + + def test_random_tall_e_pivoting(self): + rng = np.random.RandomState(1234) + # economy version pivoting + m = 200 + n = 100 + for k in range(2): + a = rng.random([m, n]) + q, r, p = qr(a, pivoting=True, mode='economic') + d = abs(diag(r)) + assert_(np.all(d[1:] <= d[:-1])) + assert_array_almost_equal(q.T @ q, eye(n)) + assert_array_almost_equal(q @ r, a[:, p]) + assert_equal(q.shape, (m, n)) + assert_equal(r.shape, (n, n)) + q2, r2 = qr(a[:, p], mode='economic') + assert_array_almost_equal(q, q2) + assert_array_almost_equal(r, r2) + + def test_random_trap(self): + rng = np.random.RandomState(1234) + m = 100 + n = 200 + for k in range(2): + a = rng.random([m, n]) + q, r = qr(a) + assert_array_almost_equal(q.T @ q, eye(m)) + assert_array_almost_equal(q @ r, a) + + def test_random_trap_pivoting(self): + rng = np.random.RandomState(1234) + m = 100 + n = 200 + for k in range(2): + a = rng.random([m, n]) + q, r, p = qr(a, pivoting=True) + d = abs(diag(r)) + assert_(np.all(d[1:] <= d[:-1])) + assert_array_almost_equal(q.T @ q, eye(m)) + assert_array_almost_equal(q @ r, a[:, p]) + q2, r2 = qr(a[:, p]) + assert_array_almost_equal(q, q2) + assert_array_almost_equal(r, r2) + + def test_random_complex(self): + rng = np.random.RandomState(1234) + n = 20 + for k in range(2): + a = rng.random([n, n]) + 1j*rng.random([n, n]) + q, r = qr(a) + assert_array_almost_equal(q.conj().T @ q, eye(n)) + assert_array_almost_equal(q @ r, a) + + def test_random_complex_left(self): + rng = np.random.RandomState(1234) + n = 20 + for k in range(2): + a = rng.random([n, n]) + 1j*rng.random([n, n]) + q, r = qr(a) + c = rng.random([n]) + 1j*rng.random([n]) + qc, r = qr_multiply(a, c, "left") + assert_array_almost_equal(q @ c, qc) + qc, r = qr_multiply(a, eye(n), "left") + assert_array_almost_equal(q, qc) + + def test_random_complex_right(self): + rng = np.random.RandomState(1234) + n = 20 + for k in range(2): + a = rng.random([n, n]) + 1j*rng.random([n, n]) + q, r = qr(a) + c = rng.random([n]) + 1j*rng.random([n]) + cq, r = qr_multiply(a, c) + assert_array_almost_equal(c @ q, cq) + cq, r = qr_multiply(a, eye(n)) + assert_array_almost_equal(q, cq) + + def test_random_complex_pivoting(self): + rng = np.random.RandomState(1234) + n = 20 + for k in range(2): + a = rng.random([n, n]) + 1j*rng.random([n, n]) + q, r, p = qr(a, pivoting=True) + d = abs(diag(r)) + assert_(np.all(d[1:] <= d[:-1])) + assert_array_almost_equal(q.conj().T @ q, eye(n)) + assert_array_almost_equal(q @ r, a[:, p]) + q2, r2 = qr(a[:, p]) + assert_array_almost_equal(q, q2) + assert_array_almost_equal(r, r2) + + def test_check_finite(self): + a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]] + q, r = qr(a, check_finite=False) + assert_array_almost_equal(q.T @ q, eye(3)) + assert_array_almost_equal(q @ r, a) + + def test_lwork(self): + a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]] + # Get comparison values + q, r = qr(a, lwork=None) + + # Test against minimum valid lwork + q2, r2 = qr(a, lwork=3) + assert_array_almost_equal(q2, q) + assert_array_almost_equal(r2, r) + + # Test against larger lwork + q3, r3 = qr(a, lwork=10) + assert_array_almost_equal(q3, q) + assert_array_almost_equal(r3, r) + + # Test against explicit lwork=-1 + q4, r4 = qr(a, lwork=-1) + assert_array_almost_equal(q4, q) + assert_array_almost_equal(r4, r) + + # Test against invalid lwork + assert_raises(Exception, qr, (a,), {'lwork': 0}) + assert_raises(Exception, qr, (a,), {'lwork': 2}) + + +class TestRQ: + def test_simple(self): + a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]] + r, q = rq(a) + assert_array_almost_equal(q @ q.T, eye(3)) + assert_array_almost_equal(r @ q, a) + + def test_r(self): + a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]] + r, q = rq(a) + r2 = rq(a, mode='r') + assert_array_almost_equal(r, r2) + + def test_random(self): + rng = np.random.RandomState(1234) + n = 20 + for k in range(2): + a = rng.random([n, n]) + r, q = rq(a) + assert_array_almost_equal(q @ q.T, eye(n)) + assert_array_almost_equal(r @ q, a) + + def test_simple_trap(self): + a = [[8, 2, 3], [2, 9, 3]] + r, q = rq(a) + assert_array_almost_equal(q.T @ q, eye(3)) + assert_array_almost_equal(r @ q, a) + + def test_simple_tall(self): + a = [[8, 2], [2, 9], [5, 3]] + r, q = rq(a) + assert_array_almost_equal(q.T @ q, eye(2)) + assert_array_almost_equal(r @ q, a) + + def test_simple_fat(self): + a = [[8, 2, 5], [2, 9, 3]] + r, q = rq(a) + assert_array_almost_equal(q @ q.T, eye(3)) + assert_array_almost_equal(r @ q, a) + + def test_simple_complex(self): + a = [[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]] + r, q = rq(a) + assert_array_almost_equal(q @ q.conj().T, eye(3)) + assert_array_almost_equal(r @ q, a) + + def test_random_tall(self): + rng = np.random.RandomState(1234) + m = 200 + n = 100 + for k in range(2): + a = rng.random([m, n]) + r, q = rq(a) + assert_array_almost_equal(q @ q.T, eye(n)) + assert_array_almost_equal(r @ q, a) + + def test_random_trap(self): + rng = np.random.RandomState(1234) + m = 100 + n = 200 + for k in range(2): + a = rng.random([m, n]) + r, q = rq(a) + assert_array_almost_equal(q @ q.T, eye(n)) + assert_array_almost_equal(r @ q, a) + + def test_random_trap_economic(self): + rng = np.random.RandomState(1234) + m = 100 + n = 200 + for k in range(2): + a = rng.random([m, n]) + r, q = rq(a, mode='economic') + assert_array_almost_equal(q @ q.T, eye(m)) + assert_array_almost_equal(r @ q, a) + assert_equal(q.shape, (m, n)) + assert_equal(r.shape, (m, m)) + + def test_random_complex(self): + rng = np.random.RandomState(1234) + n = 20 + for k in range(2): + a = rng.random([n, n]) + 1j*rng.random([n, n]) + r, q = rq(a) + assert_array_almost_equal(q @ q.conj().T, eye(n)) + assert_array_almost_equal(r @ q, a) + + def test_random_complex_economic(self): + rng = np.random.RandomState(1234) + m = 100 + n = 200 + for k in range(2): + a = rng.random([m, n]) + 1j*rng.random([m, n]) + r, q = rq(a, mode='economic') + assert_array_almost_equal(q @ q.conj().T, eye(m)) + assert_array_almost_equal(r @ q, a) + assert_equal(q.shape, (m, n)) + assert_equal(r.shape, (m, m)) + + def test_check_finite(self): + a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]] + r, q = rq(a, check_finite=False) + assert_array_almost_equal(q @ q.T, eye(3)) + assert_array_almost_equal(r @ q, a) + + +class TestSchur: + + def check_schur(self, a, t, u, rtol, atol): + # Check that the Schur decomposition is correct. + assert_allclose(u @ t @ u.conj().T, a, rtol=rtol, atol=atol, + err_msg="Schur decomposition does not match 'a'") + # The expected value of u @ u.H - I is all zeros, so test + # with absolute tolerance only. + assert_allclose(u @ u.conj().T - np.eye(len(u)), 0, rtol=0, atol=atol, + err_msg="u is not unitary") + + def test_simple(self): + a = [[8, 12, 3], [2, 9, 3], [10, 3, 6]] + t, z = schur(a) + self.check_schur(a, t, z, rtol=1e-14, atol=5e-15) + tc, zc = schur(a, 'complex') + assert_(np.any(ravel(iscomplex(zc))) and np.any(ravel(iscomplex(tc)))) + self.check_schur(a, tc, zc, rtol=1e-14, atol=5e-15) + tc2, zc2 = rsf2csf(tc, zc) + self.check_schur(a, tc2, zc2, rtol=1e-14, atol=5e-15) + + @pytest.mark.parametrize( + 'sort, expected_diag', + [('lhp', [-np.sqrt(2), -0.5, np.sqrt(2), 0.5]), + ('rhp', [np.sqrt(2), 0.5, -np.sqrt(2), -0.5]), + ('iuc', [-0.5, 0.5, np.sqrt(2), -np.sqrt(2)]), + ('ouc', [np.sqrt(2), -np.sqrt(2), -0.5, 0.5]), + (lambda x: x >= 0.0, [np.sqrt(2), 0.5, -np.sqrt(2), -0.5])] + ) + def test_sort(self, sort, expected_diag): + # The exact eigenvalues of this matrix are + # -sqrt(2), sqrt(2), -1/2, 1/2. + a = [[4., 3., 1., -1.], + [-4.5, -3.5, -1., 1.], + [9., 6., -4., 4.5], + [6., 4., -3., 3.5]] + t, u, sdim = schur(a, sort=sort) + self.check_schur(a, t, u, rtol=1e-14, atol=5e-15) + assert_allclose(np.diag(t), expected_diag, rtol=1e-12) + assert_equal(2, sdim) + + def test_sort_errors(self): + a = [[4., 3., 1., -1.], + [-4.5, -3.5, -1., 1.], + [9., 6., -4., 4.5], + [6., 4., -3., 3.5]] + assert_raises(ValueError, schur, a, sort='unsupported') + assert_raises(ValueError, schur, a, sort=1) + + def test_check_finite(self): + a = [[8, 12, 3], [2, 9, 3], [10, 3, 6]] + t, z = schur(a, check_finite=False) + assert_array_almost_equal(z @ t @ z.conj().T, a) + + +class TestHessenberg: + + def test_simple(self): + a = [[-149, -50, -154], + [537, 180, 546], + [-27, -9, -25]] + h1 = [[-149.0000, 42.2037, -156.3165], + [-537.6783, 152.5511, -554.9272], + [0, 0.0728, 2.4489]] + h, q = hessenberg(a, calc_q=1) + assert_array_almost_equal(q.T @ a @ q, h) + assert_array_almost_equal(h, h1, decimal=4) + + def test_simple_complex(self): + a = [[-149, -50, -154], + [537, 180j, 546], + [-27j, -9, -25]] + h, q = hessenberg(a, calc_q=1) + assert_array_almost_equal(q.conj().T @ a @ q, h) + + def test_simple2(self): + a = [[1, 2, 3, 4, 5, 6, 7], + [0, 2, 3, 4, 6, 7, 2], + [0, 2, 2, 3, 0, 3, 2], + [0, 0, 2, 8, 0, 0, 2], + [0, 3, 1, 2, 0, 1, 2], + [0, 1, 2, 3, 0, 1, 0], + [0, 0, 0, 0, 0, 1, 2]] + h, q = hessenberg(a, calc_q=1) + assert_array_almost_equal(q.T @ a @ q, h) + + def test_simple3(self): + a = np.eye(3) + a[-1, 0] = 2 + h, q = hessenberg(a, calc_q=1) + assert_array_almost_equal(q.T @ a @ q, h) + + def test_random(self): + rng = np.random.RandomState(1234) + n = 20 + for k in range(2): + a = rng.random([n, n]) + h, q = hessenberg(a, calc_q=1) + assert_array_almost_equal(q.T @ a @ q, h) + + def test_random_complex(self): + rng = np.random.RandomState(1234) + n = 20 + for k in range(2): + a = rng.random([n, n]) + 1j*rng.random([n, n]) + h, q = hessenberg(a, calc_q=1) + assert_array_almost_equal(q.conj().T @ a @ q, h) + + def test_check_finite(self): + a = [[-149, -50, -154], + [537, 180, 546], + [-27, -9, -25]] + h1 = [[-149.0000, 42.2037, -156.3165], + [-537.6783, 152.5511, -554.9272], + [0, 0.0728, 2.4489]] + h, q = hessenberg(a, calc_q=1, check_finite=False) + assert_array_almost_equal(q.T @ a @ q, h) + assert_array_almost_equal(h, h1, decimal=4) + + def test_2x2(self): + a = [[2, 1], [7, 12]] + + h, q = hessenberg(a, calc_q=1) + assert_array_almost_equal(q, np.eye(2)) + assert_array_almost_equal(h, a) + + b = [[2-7j, 1+2j], [7+3j, 12-2j]] + h2, q2 = hessenberg(b, calc_q=1) + assert_array_almost_equal(q2, np.eye(2)) + assert_array_almost_equal(h2, b) + + +blas_provider = blas_version = None +if CONFIG is not None: + blas_provider = CONFIG['Build Dependencies']['blas']['name'] + blas_version = CONFIG['Build Dependencies']['blas']['version'] + + +class TestQZ: + @pytest.mark.xfail( + sys.platform == 'darwin' and + blas_provider == 'openblas' and + blas_version < "0.3.21.dev", + reason="gges[float32] broken for OpenBLAS on macOS, see gh-16949" + ) + def test_qz_single(self): + rng = np.random.RandomState(12345) + n = 5 + A = rng.random([n, n]).astype(float32) + B = rng.random([n, n]).astype(float32) + AA, BB, Q, Z = qz(A, B) + assert_array_almost_equal(Q @ AA @ Z.T, A, decimal=5) + assert_array_almost_equal(Q @ BB @ Z.T, B, decimal=5) + assert_array_almost_equal(Q @ Q.T, eye(n), decimal=5) + assert_array_almost_equal(Z @ Z.T, eye(n), decimal=5) + assert_(np.all(diag(BB) >= 0)) + + def test_qz_double(self): + rng = np.random.RandomState(12345) + n = 5 + A = rng.random([n, n]) + B = rng.random([n, n]) + AA, BB, Q, Z = qz(A, B) + assert_array_almost_equal(Q @ AA @ Z.T, A) + assert_array_almost_equal(Q @ BB @ Z.T, B) + assert_array_almost_equal(Q @ Q.T, eye(n)) + assert_array_almost_equal(Z @ Z.T, eye(n)) + assert_(np.all(diag(BB) >= 0)) + + def test_qz_complex(self): + rng = np.random.RandomState(12345) + n = 5 + A = rng.random([n, n]) + 1j*rng.random([n, n]) + B = rng.random([n, n]) + 1j*rng.random([n, n]) + AA, BB, Q, Z = qz(A, B) + assert_array_almost_equal(Q @ AA @ Z.conj().T, A) + assert_array_almost_equal(Q @ BB @ Z.conj().T, B) + assert_array_almost_equal(Q @ Q.conj().T, eye(n)) + assert_array_almost_equal(Z @ Z.conj().T, eye(n)) + assert_(np.all(diag(BB) >= 0)) + assert_(np.all(diag(BB).imag == 0)) + + def test_qz_complex64(self): + rng = np.random.RandomState(12345) + n = 5 + A = (rng.random([n, n]) + 1j*rng.random([n, n])).astype(complex64) + B = (rng.random([n, n]) + 1j*rng.random([n, n])).astype(complex64) + AA, BB, Q, Z = qz(A, B) + assert_array_almost_equal(Q @ AA @ Z.conj().T, A, decimal=5) + assert_array_almost_equal(Q @ BB @ Z.conj().T, B, decimal=5) + assert_array_almost_equal(Q @ Q.conj().T, eye(n), decimal=5) + assert_array_almost_equal(Z @ Z.conj().T, eye(n), decimal=5) + assert_(np.all(diag(BB) >= 0)) + assert_(np.all(diag(BB).imag == 0)) + + def test_qz_double_complex(self): + rng = np.random.RandomState(12345) + n = 5 + A = rng.random([n, n]) + B = rng.random([n, n]) + AA, BB, Q, Z = qz(A, B, output='complex') + aa = Q @ AA @ Z.conj().T + assert_array_almost_equal(aa.real, A) + assert_array_almost_equal(aa.imag, 0) + bb = Q @ BB @ Z.conj().T + assert_array_almost_equal(bb.real, B) + assert_array_almost_equal(bb.imag, 0) + assert_array_almost_equal(Q @ Q.conj().T, eye(n)) + assert_array_almost_equal(Z @ Z.conj().T, eye(n)) + assert_(np.all(diag(BB) >= 0)) + + def test_qz_double_sort(self): + # from https://www.nag.com/lapack-ex/node119.html + # NOTE: These matrices may be ill-conditioned and lead to a + # seg fault on certain python versions when compiled with + # sse2 or sse3 older ATLAS/LAPACK binaries for windows + # A = np.array([[3.9, 12.5, -34.5, -0.5], + # [ 4.3, 21.5, -47.5, 7.5], + # [ 4.3, 21.5, -43.5, 3.5], + # [ 4.4, 26.0, -46.0, 6.0 ]]) + + # B = np.array([[ 1.0, 2.0, -3.0, 1.0], + # [1.0, 3.0, -5.0, 4.0], + # [1.0, 3.0, -4.0, 3.0], + # [1.0, 3.0, -4.0, 4.0]]) + A = np.array([[3.9, 12.5, -34.5, 2.5], + [4.3, 21.5, -47.5, 7.5], + [4.3, 1.5, -43.5, 3.5], + [4.4, 6.0, -46.0, 6.0]]) + + B = np.array([[1.0, 1.0, -3.0, 1.0], + [1.0, 3.0, -5.0, 4.4], + [1.0, 2.0, -4.0, 1.0], + [1.2, 3.0, -4.0, 4.0]]) + + assert_raises(ValueError, qz, A, B, sort=lambda ar, ai, beta: ai == 0) + if False: + AA, BB, Q, Z, sdim = qz(A, B, sort=lambda ar, ai, beta: ai == 0) + # assert_(sdim == 2) + assert_(sdim == 4) + assert_array_almost_equal(Q @ AA @ Z.T, A) + assert_array_almost_equal(Q @ BB @ Z.T, B) + + # test absolute values bc the sign is ambiguous and + # might be platform dependent + assert_array_almost_equal(np.abs(AA), np.abs(np.array( + [[35.7864, -80.9061, -12.0629, -9.498], + [0., 2.7638, -2.3505, 7.3256], + [0., 0., 0.6258, -0.0398], + [0., 0., 0., -12.8217]])), 4) + assert_array_almost_equal(np.abs(BB), np.abs(np.array( + [[4.5324, -8.7878, 3.2357, -3.5526], + [0., 1.4314, -2.1894, 0.9709], + [0., 0., 1.3126, -0.3468], + [0., 0., 0., 0.559]])), 4) + assert_array_almost_equal(np.abs(Q), np.abs(np.array( + [[-0.4193, -0.605, -0.1894, -0.6498], + [-0.5495, 0.6987, 0.2654, -0.3734], + [-0.4973, -0.3682, 0.6194, 0.4832], + [-0.5243, 0.1008, -0.7142, 0.4526]])), 4) + assert_array_almost_equal(np.abs(Z), np.abs(np.array( + [[-0.9471, -0.2971, -0.1217, 0.0055], + [-0.0367, 0.1209, 0.0358, 0.9913], + [0.3171, -0.9041, -0.2547, 0.1312], + [0.0346, 0.2824, -0.9587, 0.0014]])), 4) + + # test absolute values bc the sign is ambiguous and might be platform + # dependent + # assert_array_almost_equal(abs(AA), abs(np.array([ + # [3.8009, -69.4505, 50.3135, -43.2884], + # [0.0000, 9.2033, -0.2001, 5.9881], + # [0.0000, 0.0000, 1.4279, 4.4453], + # [0.0000, 0.0000, 0.9019, -1.1962]])), 4) + # assert_array_almost_equal(abs(BB), abs(np.array([ + # [1.9005, -10.2285, 0.8658, -5.2134], + # [0.0000, 2.3008, 0.7915, 0.4262], + # [0.0000, 0.0000, 0.8101, 0.0000], + # [0.0000, 0.0000, 0.0000, -0.2823]])), 4) + # assert_array_almost_equal(abs(Q), abs(np.array([ + # [0.4642, 0.7886, 0.2915, -0.2786], + # [0.5002, -0.5986, 0.5638, -0.2713], + # [0.5002, 0.0154, -0.0107, 0.8657], + # [0.5331, -0.1395, -0.7727, -0.3151]])), 4) + # assert_array_almost_equal(dot(Q,Q.T), eye(4)) + # assert_array_almost_equal(abs(Z), abs(np.array([ + # [0.9961, -0.0014, 0.0887, -0.0026], + # [0.0057, -0.0404, -0.0938, -0.9948], + # [0.0626, 0.7194, -0.6908, 0.0363], + # [0.0626, -0.6934, -0.7114, 0.0956]])), 4) + # assert_array_almost_equal(dot(Z,Z.T), eye(4)) + + # def test_qz_complex_sort(self): + # cA = np.array([ + # [-21.10+22.50*1j, 53.50+-50.50*1j, -34.50+127.50*1j, 7.50+ 0.50*1j], + # [-0.46+ -7.78*1j, -3.50+-37.50*1j, -15.50+ 58.50*1j,-10.50+ -1.50*1j], + # [ 4.30+ -5.50*1j, 39.70+-17.10*1j, -68.50+ 12.50*1j, -7.50+ -3.50*1j], + # [ 5.50+ 4.40*1j, 14.40+ 43.30*1j, -32.50+-46.00*1j,-19.00+-32.50*1j]]) + + # cB = np.array([ + # [1.00+ -5.00*1j, 1.60+ 1.20*1j,-3.00+ 0.00*1j, 0.00+ -1.00*1j], + # [0.80+ -0.60*1j, 3.00+ -5.00*1j,-4.00+ 3.00*1j,-2.40+ -3.20*1j], + # [1.00+ 0.00*1j, 2.40+ 1.80*1j,-4.00+ -5.00*1j, 0.00+ -3.00*1j], + # [0.00+ 1.00*1j,-1.80+ 2.40*1j, 0.00+ -4.00*1j, 4.00+ -5.00*1j]]) + + # AAS,BBS,QS,ZS,sdim = qz(cA,cB,sort='lhp') + + # eigenvalues = diag(AAS)/diag(BBS) + # assert_(np.all(np.real(eigenvalues[:sdim] < 0))) + # assert_(np.all(np.real(eigenvalues[sdim:] > 0))) + + def test_check_finite(self): + rng = np.random.RandomState(12345) + n = 5 + A = rng.random([n, n]) + B = rng.random([n, n]) + AA, BB, Q, Z = qz(A, B, check_finite=False) + assert_array_almost_equal(Q @ AA @ Z.T, A) + assert_array_almost_equal(Q @ BB @ Z.T, B) + assert_array_almost_equal(Q @ Q.T, eye(n)) + assert_array_almost_equal(Z @ Z.T, eye(n)) + assert_(np.all(diag(BB) >= 0)) + + +class TestOrdQZ: + @classmethod + def setup_class(cls): + # https://www.nag.com/lapack-ex/node119.html + A1 = np.array([[-21.10 - 22.50j, 53.5 - 50.5j, -34.5 + 127.5j, + 7.5 + 0.5j], + [-0.46 - 7.78j, -3.5 - 37.5j, -15.5 + 58.5j, + -10.5 - 1.5j], + [4.30 - 5.50j, 39.7 - 17.1j, -68.5 + 12.5j, + -7.5 - 3.5j], + [5.50 + 4.40j, 14.4 + 43.3j, -32.5 - 46.0j, + -19.0 - 32.5j]]) + + B1 = np.array([[1.0 - 5.0j, 1.6 + 1.2j, -3 + 0j, 0.0 - 1.0j], + [0.8 - 0.6j, .0 - 5.0j, -4 + 3j, -2.4 - 3.2j], + [1.0 + 0.0j, 2.4 + 1.8j, -4 - 5j, 0.0 - 3.0j], + [0.0 + 1.0j, -1.8 + 2.4j, 0 - 4j, 4.0 - 5.0j]]) + + # https://www.nag.com/numeric/fl/nagdoc_fl23/xhtml/F08/f08yuf.xml + A2 = np.array([[3.9, 12.5, -34.5, -0.5], + [4.3, 21.5, -47.5, 7.5], + [4.3, 21.5, -43.5, 3.5], + [4.4, 26.0, -46.0, 6.0]]) + + B2 = np.array([[1, 2, -3, 1], + [1, 3, -5, 4], + [1, 3, -4, 3], + [1, 3, -4, 4]]) + + # example with the eigenvalues + # -0.33891648, 1.61217396+0.74013521j, 1.61217396-0.74013521j, + # 0.61244091 + # thus featuring: + # * one complex conjugate eigenvalue pair, + # * one eigenvalue in the lhp + # * 2 eigenvalues in the unit circle + # * 2 non-real eigenvalues + A3 = np.array([[5., 1., 3., 3.], + [4., 4., 2., 7.], + [7., 4., 1., 3.], + [0., 4., 8., 7.]]) + B3 = np.array([[8., 10., 6., 10.], + [7., 7., 2., 9.], + [9., 1., 6., 6.], + [5., 1., 4., 7.]]) + + # example with infinite eigenvalues + A4 = np.eye(2) + B4 = np.diag([0, 1]) + + # example with (alpha, beta) = (0, 0) + A5 = np.diag([1, 0]) + + cls.A = [A1, A2, A3, A4, A5] + cls.B = [B1, B2, B3, B4, A5] + + def qz_decomp(self, sort): + with np.errstate(all='raise'): + ret = [ordqz(Ai, Bi, sort=sort) for Ai, Bi in zip(self.A, self.B)] + return tuple(ret) + + def check(self, A, B, sort, AA, BB, alpha, beta, Q, Z): + Id = np.eye(*A.shape) + # make sure Q and Z are orthogonal + assert_array_almost_equal(Q @ Q.T.conj(), Id) + assert_array_almost_equal(Z @ Z.T.conj(), Id) + # check factorization + assert_array_almost_equal(Q @ AA, A @ Z) + assert_array_almost_equal(Q @ BB, B @ Z) + # check shape of AA and BB + assert_array_equal(np.tril(AA, -2), np.zeros(AA.shape)) + assert_array_equal(np.tril(BB, -1), np.zeros(BB.shape)) + # check eigenvalues + for i in range(A.shape[0]): + # does the current diagonal element belong to a 2-by-2 block + # that was already checked? + if i > 0 and A[i, i - 1] != 0: + continue + # take care of 2-by-2 blocks + if i < AA.shape[0] - 1 and AA[i + 1, i] != 0: + evals, _ = eig(AA[i:i + 2, i:i + 2], BB[i:i + 2, i:i + 2]) + # make sure the pair of complex conjugate eigenvalues + # is ordered consistently (positive imaginary part first) + if evals[0].imag < 0: + evals = evals[[1, 0]] + tmp = alpha[i:i + 2]/beta[i:i + 2] + if tmp[0].imag < 0: + tmp = tmp[[1, 0]] + assert_array_almost_equal(evals, tmp) + else: + if alpha[i] == 0 and beta[i] == 0: + assert_equal(AA[i, i], 0) + assert_equal(BB[i, i], 0) + elif beta[i] == 0: + assert_equal(BB[i, i], 0) + else: + assert_almost_equal(AA[i, i]/BB[i, i], alpha[i]/beta[i]) + sortfun = _select_function(sort) + lastsort = True + for i in range(A.shape[0]): + cursort = sortfun(np.array([alpha[i]]), np.array([beta[i]])) + # once the sorting criterion was not matched all subsequent + # eigenvalues also shouldn't match + if not lastsort: + assert not cursort + lastsort = cursort + + def check_all(self, sort): + ret = self.qz_decomp(sort) + + for reti, Ai, Bi in zip(ret, self.A, self.B): + self.check(Ai, Bi, sort, *reti) + + def test_lhp(self): + self.check_all('lhp') + + def test_rhp(self): + self.check_all('rhp') + + def test_iuc(self): + self.check_all('iuc') + + def test_ouc(self): + self.check_all('ouc') + + def test_ref(self): + # real eigenvalues first (top-left corner) + def sort(x, y): + out = np.empty_like(x, dtype=bool) + nonzero = (y != 0) + out[~nonzero] = False + out[nonzero] = (x[nonzero]/y[nonzero]).imag == 0 + return out + + self.check_all(sort) + + def test_cef(self): + # complex eigenvalues first (top-left corner) + def sort(x, y): + out = np.empty_like(x, dtype=bool) + nonzero = (y != 0) + out[~nonzero] = False + out[nonzero] = (x[nonzero]/y[nonzero]).imag != 0 + return out + + self.check_all(sort) + + def test_diff_input_types(self): + ret = ordqz(self.A[1], self.B[2], sort='lhp') + self.check(self.A[1], self.B[2], 'lhp', *ret) + + ret = ordqz(self.B[2], self.A[1], sort='lhp') + self.check(self.B[2], self.A[1], 'lhp', *ret) + + def test_sort_explicit(self): + # Test order of the eigenvalues in the 2 x 2 case where we can + # explicitly compute the solution + A1 = np.eye(2) + B1 = np.diag([-2, 0.5]) + expected1 = [('lhp', [-0.5, 2]), + ('rhp', [2, -0.5]), + ('iuc', [-0.5, 2]), + ('ouc', [2, -0.5])] + A2 = np.eye(2) + B2 = np.diag([-2 + 1j, 0.5 + 0.5j]) + expected2 = [('lhp', [1/(-2 + 1j), 1/(0.5 + 0.5j)]), + ('rhp', [1/(0.5 + 0.5j), 1/(-2 + 1j)]), + ('iuc', [1/(-2 + 1j), 1/(0.5 + 0.5j)]), + ('ouc', [1/(0.5 + 0.5j), 1/(-2 + 1j)])] + # 'lhp' is ambiguous so don't test it + A3 = np.eye(2) + B3 = np.diag([2, 0]) + expected3 = [('rhp', [0.5, np.inf]), + ('iuc', [0.5, np.inf]), + ('ouc', [np.inf, 0.5])] + # 'rhp' is ambiguous so don't test it + A4 = np.eye(2) + B4 = np.diag([-2, 0]) + expected4 = [('lhp', [-0.5, np.inf]), + ('iuc', [-0.5, np.inf]), + ('ouc', [np.inf, -0.5])] + A5 = np.diag([0, 1]) + B5 = np.diag([0, 0.5]) + # 'lhp' and 'iuc' are ambiguous so don't test them + expected5 = [('rhp', [2, np.nan]), + ('ouc', [2, np.nan])] + + A = [A1, A2, A3, A4, A5] + B = [B1, B2, B3, B4, B5] + expected = [expected1, expected2, expected3, expected4, expected5] + for Ai, Bi, expectedi in zip(A, B, expected): + for sortstr, expected_eigvals in expectedi: + _, _, alpha, beta, _, _ = ordqz(Ai, Bi, sort=sortstr) + azero = (alpha == 0) + bzero = (beta == 0) + x = np.empty_like(alpha) + x[azero & bzero] = np.nan + x[~azero & bzero] = np.inf + x[~bzero] = alpha[~bzero]/beta[~bzero] + assert_allclose(expected_eigvals, x) + + +class TestOrdQZWorkspaceSize: + def test_decompose(self): + rng = np.random.RandomState(12345) + N = 202 + # raises error if lwork parameter to dtrsen is too small + for ddtype in [np.float32, np.float64]: + A = rng.random((N, N)).astype(ddtype) + B = rng.random((N, N)).astype(ddtype) + # sort = lambda ar, ai, b: ar**2 + ai**2 < b**2 + _ = ordqz(A, B, sort=lambda alpha, beta: alpha < beta, + output='real') + + for ddtype in [np.complex128, np.complex64]: + A = rng.random((N, N)).astype(ddtype) + B = rng.random((N, N)).astype(ddtype) + _ = ordqz(A, B, sort=lambda alpha, beta: alpha < beta, + output='complex') + + @pytest.mark.slow + def test_decompose_ouc(self): + rng = np.random.RandomState(12345) + N = 202 + # segfaults if lwork parameter to dtrsen is too small + for ddtype in [np.float32, np.float64, np.complex128, np.complex64]: + A = rng.random((N, N)).astype(ddtype) + B = rng.random((N, N)).astype(ddtype) + S, T, alpha, beta, U, V = ordqz(A, B, sort='ouc') + + +class TestDatacopied: + + def test_datacopied(self): + from scipy.linalg._decomp import _datacopied + + M = matrix([[0, 1], [2, 3]]) + A = asarray(M) + L = M.tolist() + M2 = M.copy() + + class Fake1: + def __array__(self, dtype=None, copy=None): + return A + + class Fake2: + __array_interface__ = A.__array_interface__ + + F1 = Fake1() + F2 = Fake2() + + for item, status in [(M, False), (A, False), (L, True), + (M2, False), (F1, False), (F2, False)]: + arr = asarray(item) + assert_equal(_datacopied(arr, item), status, + err_msg=repr(item)) + + +def test_aligned_mem_float(): + """Check linalg works with non-aligned memory (float32)""" + # Allocate 402 bytes of memory (allocated on boundary) + a = arange(402, dtype=np.uint8) + + # Create an array with boundary offset 4 + z = np.frombuffer(a.data, offset=2, count=100, dtype=float32) + z.shape = 10, 10 + + eig(z, overwrite_a=True) + eig(z.T, overwrite_a=True) + + +@pytest.mark.skipif(platform.machine() == 'ppc64le', + reason="crashes on ppc64le") +def test_aligned_mem(): + """Check linalg works with non-aligned memory (float64)""" + # Allocate 804 bytes of memory (allocated on boundary) + a = arange(804, dtype=np.uint8) + + # Create an array with boundary offset 4 + z = np.frombuffer(a.data, offset=4, count=100, dtype=float) + z.shape = 10, 10 + + eig(z, overwrite_a=True) + eig(z.T, overwrite_a=True) + + +def test_aligned_mem_complex(): + """Check that complex objects don't need to be completely aligned""" + # Allocate 1608 bytes of memory (allocated on boundary) + a = zeros(1608, dtype=np.uint8) + + # Create an array with boundary offset 8 + z = np.frombuffer(a.data, offset=8, count=100, dtype=complex) + z.shape = 10, 10 + + eig(z, overwrite_a=True) + # This does not need special handling + eig(z.T, overwrite_a=True) + + +def check_lapack_misaligned(func, args, kwargs): + args = list(args) + for i in range(len(args)): + a = args[:] + if isinstance(a[i], np.ndarray): + # Try misaligning a[i] + aa = np.zeros(a[i].size*a[i].dtype.itemsize+8, dtype=np.uint8) + aa = np.frombuffer(aa.data, offset=4, count=a[i].size, + dtype=a[i].dtype) + aa.shape = a[i].shape + aa[...] = a[i] + a[i] = aa + func(*a, **kwargs) + if len(a[i].shape) > 1: + a[i] = a[i].T + func(*a, **kwargs) + + +@pytest.mark.xfail(run=False, + reason="Ticket #1152, triggers a segfault in rare cases.") +def test_lapack_misaligned(): + M = np.eye(10, dtype=float) + R = np.arange(100) + R.shape = 10, 10 + S = np.arange(20000, dtype=np.uint8) + S = np.frombuffer(S.data, offset=4, count=100, dtype=float) + S.shape = 10, 10 + b = np.ones(10) + LU, piv = lu_factor(S) + for (func, args, kwargs) in [ + (eig, (S,), dict(overwrite_a=True)), # crash + (eigvals, (S,), dict(overwrite_a=True)), # no crash + (lu, (S,), dict(overwrite_a=True)), # no crash + (lu_factor, (S,), dict(overwrite_a=True)), # no crash + (lu_solve, ((LU, piv), b), dict(overwrite_b=True)), + (solve, (S, b), dict(overwrite_a=True, overwrite_b=True)), + (svd, (M,), dict(overwrite_a=True)), # no crash + (svd, (R,), dict(overwrite_a=True)), # no crash + (svd, (S,), dict(overwrite_a=True)), # crash + (svdvals, (S,), dict()), # no crash + (svdvals, (S,), dict(overwrite_a=True)), # crash + (cholesky, (M,), dict(overwrite_a=True)), # no crash + (qr, (S,), dict(overwrite_a=True)), # crash + (rq, (S,), dict(overwrite_a=True)), # crash + (hessenberg, (S,), dict(overwrite_a=True)), # crash + (schur, (S,), dict(overwrite_a=True)), # crash + ]: + check_lapack_misaligned(func, args, kwargs) +# not properly tested +# cholesky, rsf2csf, lu_solve, solve, eig_banded, eigvals_banded, eigh, diagsvd + + +class TestOverwrite: + def test_eig(self): + assert_no_overwrite(eig, [(3, 3)]) + assert_no_overwrite(eig, [(3, 3), (3, 3)]) + + def test_eigh(self): + assert_no_overwrite(eigh, [(3, 3)]) + assert_no_overwrite(eigh, [(3, 3), (3, 3)]) + + def test_eig_banded(self): + assert_no_overwrite(eig_banded, [(3, 2)]) + + def test_eigvals(self): + assert_no_overwrite(eigvals, [(3, 3)]) + + def test_eigvalsh(self): + assert_no_overwrite(eigvalsh, [(3, 3)]) + + def test_eigvals_banded(self): + assert_no_overwrite(eigvals_banded, [(3, 2)]) + + def test_hessenberg(self): + assert_no_overwrite(hessenberg, [(3, 3)]) + + def test_lu_factor(self): + assert_no_overwrite(lu_factor, [(3, 3)]) + + def test_lu_solve(self): + x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 8]]) + xlu = lu_factor(x) + assert_no_overwrite(lambda b: lu_solve(xlu, b), [(3,)]) + + def test_lu(self): + assert_no_overwrite(lu, [(3, 3)]) + + def test_qr(self): + assert_no_overwrite(qr, [(3, 3)]) + + def test_rq(self): + assert_no_overwrite(rq, [(3, 3)]) + + def test_schur(self): + assert_no_overwrite(schur, [(3, 3)]) + + def test_schur_complex(self): + assert_no_overwrite(lambda a: schur(a, 'complex'), [(3, 3)], + dtypes=[np.float32, np.float64]) + + def test_svd(self): + assert_no_overwrite(svd, [(3, 3)]) + assert_no_overwrite(lambda a: svd(a, lapack_driver='gesvd'), [(3, 3)]) + + def test_svdvals(self): + assert_no_overwrite(svdvals, [(3, 3)]) + + +def _check_orth(n, dtype, skip_big=False): + X = np.ones((n, 2), dtype=float).astype(dtype) + + eps = np.finfo(dtype).eps + tol = 1000 * eps + + Y = orth(X) + assert_equal(Y.shape, (n, 1)) + assert_allclose(Y, Y.mean(), atol=tol) + + Y = orth(X.T) + assert_equal(Y.shape, (2, 1)) + assert_allclose(Y, Y.mean(), atol=tol) + + if n > 5 and not skip_big: + np.random.seed(1) + X = np.random.rand(n, 5) @ np.random.rand(5, n) + X = X + 1e-4 * np.random.rand(n, 1) @ np.random.rand(1, n) + X = X.astype(dtype) + + Y = orth(X, rcond=1e-3) + assert_equal(Y.shape, (n, 5)) + + Y = orth(X, rcond=1e-6) + assert_equal(Y.shape, (n, 5 + 1)) + + +@pytest.mark.slow +@pytest.mark.skipif(np.dtype(np.intp).itemsize < 8, + reason="test only on 64-bit, else too slow") +def test_orth_memory_efficiency(): + # Pick n so that 16*n bytes is reasonable but 8*n*n bytes is unreasonable. + # Keep in mind that @pytest.mark.slow tests are likely to be running + # under configurations that support 4Gb+ memory for tests related to + # 32 bit overflow. + n = 10*1000*1000 + try: + _check_orth(n, np.float64, skip_big=True) + except MemoryError as e: + raise AssertionError( + 'memory error perhaps caused by orth regression' + ) from e + + +def test_orth(): + dtypes = [np.float32, np.float64, np.complex64, np.complex128] + sizes = [1, 2, 3, 10, 100] + for dt, n in itertools.product(dtypes, sizes): + _check_orth(n, dt) + + +def test_null_space(): + np.random.seed(1) + + dtypes = [np.float32, np.float64, np.complex64, np.complex128] + sizes = [1, 2, 3, 10, 100] + + for dt, n in itertools.product(dtypes, sizes): + X = np.ones((2, n), dtype=dt) + + eps = np.finfo(dt).eps + tol = 1000 * eps + + Y = null_space(X) + assert_equal(Y.shape, (n, n-1)) + assert_allclose(X @ Y, 0, atol=tol) + + Y = null_space(X.T) + assert_equal(Y.shape, (2, 1)) + assert_allclose(X.T @ Y, 0, atol=tol) + + X = np.random.randn(1 + n//2, n) + Y = null_space(X) + assert_equal(Y.shape, (n, n - 1 - n//2)) + assert_allclose(X @ Y, 0, atol=tol) + + if n > 5: + np.random.seed(1) + X = np.random.rand(n, 5) @ np.random.rand(5, n) + X = X + 1e-4 * np.random.rand(n, 1) @ np.random.rand(1, n) + X = X.astype(dt) + + Y = null_space(X, rcond=1e-3) + assert_equal(Y.shape, (n, n - 5)) + + Y = null_space(X, rcond=1e-6) + assert_equal(Y.shape, (n, n - 6)) + + +def test_subspace_angles(): + H = hadamard(8, float) + A = H[:, :3] + B = H[:, 3:] + assert_allclose(subspace_angles(A, B), [np.pi / 2.] * 3, atol=1e-14) + assert_allclose(subspace_angles(B, A), [np.pi / 2.] * 3, atol=1e-14) + for x in (A, B): + assert_allclose(subspace_angles(x, x), np.zeros(x.shape[1]), + atol=1e-14) + # From MATLAB function "subspace", which effectively only returns the + # last value that we calculate + x = np.array( + [[0.537667139546100, 0.318765239858981, 3.578396939725760, 0.725404224946106], # noqa: E501 + [1.833885014595086, -1.307688296305273, 2.769437029884877, -0.063054873189656], # noqa: E501 + [-2.258846861003648, -0.433592022305684, -1.349886940156521, 0.714742903826096], # noqa: E501 + [0.862173320368121, 0.342624466538650, 3.034923466331855, -0.204966058299775]]) # noqa: E501 + expected = 1.481454682101605 + assert_allclose(subspace_angles(x[:, :2], x[:, 2:])[0], expected, + rtol=1e-12) + assert_allclose(subspace_angles(x[:, 2:], x[:, :2])[0], expected, + rtol=1e-12) + expected = 0.746361174247302 + assert_allclose(subspace_angles(x[:, :2], x[:, [2]]), expected, rtol=1e-12) + assert_allclose(subspace_angles(x[:, [2]], x[:, :2]), expected, rtol=1e-12) + expected = 0.487163718534313 + assert_allclose(subspace_angles(x[:, :3], x[:, [3]]), expected, rtol=1e-12) + assert_allclose(subspace_angles(x[:, [3]], x[:, :3]), expected, rtol=1e-12) + expected = 0.328950515907756 + assert_allclose(subspace_angles(x[:, :2], x[:, 1:]), [expected, 0], + atol=1e-12) + # Degenerate conditions + assert_raises(ValueError, subspace_angles, x[0], x) + assert_raises(ValueError, subspace_angles, x, x[0]) + assert_raises(ValueError, subspace_angles, x[:-1], x) + + # Test branch if mask.any is True: + A = np.array([[1, 0, 0], + [0, 1, 0], + [0, 0, 1], + [0, 0, 0], + [0, 0, 0]]) + B = np.array([[1, 0, 0], + [0, 1, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 1]]) + expected = np.array([np.pi/2, 0, 0]) + assert_allclose(subspace_angles(A, B), expected, rtol=1e-12) + + # Complex + # second column in "b" does not affect result, just there so that + # b can have more cols than a, and vice-versa (both conditional code paths) + a = [[1 + 1j], [0]] + b = [[1 - 1j, 0], [0, 1]] + assert_allclose(subspace_angles(a, b), 0., atol=1e-14) + assert_allclose(subspace_angles(b, a), 0., atol=1e-14) + + +class TestCDF2RDF: + + def matmul(self, a, b): + return np.einsum('...ij,...jk->...ik', a, b) + + def assert_eig_valid(self, w, v, x): + assert_array_almost_equal( + self.matmul(v, w), + self.matmul(x, v) + ) + + def test_single_array0x0real(self): + # eig doesn't support 0x0 in old versions of numpy + X = np.empty((0, 0)) + w, v = np.empty(0), np.empty((0, 0)) + wr, vr = cdf2rdf(w, v) + self.assert_eig_valid(wr, vr, X) + + def test_single_array2x2_real(self): + X = np.array([[1, 2], [3, -1]]) + w, v = np.linalg.eig(X) + wr, vr = cdf2rdf(w, v) + self.assert_eig_valid(wr, vr, X) + + def test_single_array2x2_complex(self): + X = np.array([[1, 2], [-2, 1]]) + w, v = np.linalg.eig(X) + wr, vr = cdf2rdf(w, v) + self.assert_eig_valid(wr, vr, X) + + def test_single_array3x3_real(self): + X = np.array([[1, 2, 3], [1, 2, 3], [2, 5, 6]]) + w, v = np.linalg.eig(X) + wr, vr = cdf2rdf(w, v) + self.assert_eig_valid(wr, vr, X) + + def test_single_array3x3_complex(self): + X = np.array([[1, 2, 3], [0, 4, 5], [0, -5, 4]]) + w, v = np.linalg.eig(X) + wr, vr = cdf2rdf(w, v) + self.assert_eig_valid(wr, vr, X) + + def test_random_1d_stacked_arrays(self): + # cannot test M == 0 due to bug in old numpy + for M in range(1, 7): + np.random.seed(999999999) + X = np.random.rand(100, M, M) + w, v = np.linalg.eig(X) + wr, vr = cdf2rdf(w, v) + self.assert_eig_valid(wr, vr, X) + + def test_random_2d_stacked_arrays(self): + # cannot test M == 0 due to bug in old numpy + for M in range(1, 7): + X = np.random.rand(10, 10, M, M) + w, v = np.linalg.eig(X) + wr, vr = cdf2rdf(w, v) + self.assert_eig_valid(wr, vr, X) + + def test_low_dimensionality_error(self): + w, v = np.empty(()), np.array((2,)) + assert_raises(ValueError, cdf2rdf, w, v) + + def test_not_square_error(self): + # Check that passing a non-square array raises a ValueError. + w, v = np.arange(3), np.arange(6).reshape(3, 2) + assert_raises(ValueError, cdf2rdf, w, v) + + def test_swapped_v_w_error(self): + # Check that exchanging places of w and v raises ValueError. + X = np.array([[1, 2, 3], [0, 4, 5], [0, -5, 4]]) + w, v = np.linalg.eig(X) + assert_raises(ValueError, cdf2rdf, v, w) + + def test_non_associated_error(self): + # Check that passing non-associated eigenvectors raises a ValueError. + w, v = np.arange(3), np.arange(16).reshape(4, 4) + assert_raises(ValueError, cdf2rdf, w, v) + + def test_not_conjugate_pairs(self): + # Check that passing non-conjugate pairs raises a ValueError. + X = np.array([[1, 2, 3], [1, 2, 3], [2, 5, 6+1j]]) + w, v = np.linalg.eig(X) + assert_raises(ValueError, cdf2rdf, w, v) + + # different arrays in the stack, so not conjugate + X = np.array([ + [[1, 2, 3], [1, 2, 3], [2, 5, 6+1j]], + [[1, 2, 3], [1, 2, 3], [2, 5, 6-1j]], + ]) + w, v = np.linalg.eig(X) + assert_raises(ValueError, cdf2rdf, w, v) diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_decomp_cholesky.py b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_decomp_cholesky.py new file mode 100644 index 0000000000000000000000000000000000000000..9354bf93a967954f8d563d43edcb8f909c747be2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_decomp_cholesky.py @@ -0,0 +1,219 @@ +import pytest +from numpy.testing import assert_array_almost_equal, assert_array_equal +from pytest import raises as assert_raises + +import numpy as np +from numpy import array, transpose, dot, conjugate, zeros_like, empty +from numpy.random import random +from scipy.linalg import cholesky, cholesky_banded, cho_solve_banded, \ + cho_factor, cho_solve + +from scipy.linalg._testutils import assert_no_overwrite + + +class TestCholesky: + + def test_simple(self): + a = [[8, 2, 3], [2, 9, 3], [3, 3, 6]] + c = cholesky(a) + assert_array_almost_equal(dot(transpose(c), c), a) + c = transpose(c) + a = dot(c, transpose(c)) + assert_array_almost_equal(cholesky(a, lower=1), c) + + def test_check_finite(self): + a = [[8, 2, 3], [2, 9, 3], [3, 3, 6]] + c = cholesky(a, check_finite=False) + assert_array_almost_equal(dot(transpose(c), c), a) + c = transpose(c) + a = dot(c, transpose(c)) + assert_array_almost_equal(cholesky(a, lower=1, check_finite=False), c) + + def test_simple_complex(self): + m = array([[3+1j, 3+4j, 5], [0, 2+2j, 2+7j], [0, 0, 7+4j]]) + a = dot(transpose(conjugate(m)), m) + c = cholesky(a) + a1 = dot(transpose(conjugate(c)), c) + assert_array_almost_equal(a, a1) + c = transpose(c) + a = dot(c, transpose(conjugate(c))) + assert_array_almost_equal(cholesky(a, lower=1), c) + + def test_random(self): + n = 20 + for k in range(2): + m = random([n, n]) + for i in range(n): + m[i, i] = 20*(.1+m[i, i]) + a = dot(transpose(m), m) + c = cholesky(a) + a1 = dot(transpose(c), c) + assert_array_almost_equal(a, a1) + c = transpose(c) + a = dot(c, transpose(c)) + assert_array_almost_equal(cholesky(a, lower=1), c) + + def test_random_complex(self): + n = 20 + for k in range(2): + m = random([n, n])+1j*random([n, n]) + for i in range(n): + m[i, i] = 20*(.1+abs(m[i, i])) + a = dot(transpose(conjugate(m)), m) + c = cholesky(a) + a1 = dot(transpose(conjugate(c)), c) + assert_array_almost_equal(a, a1) + c = transpose(c) + a = dot(c, transpose(conjugate(c))) + assert_array_almost_equal(cholesky(a, lower=1), c) + + @pytest.mark.xslow + def test_int_overflow(self): + # regression test for + # https://github.com/scipy/scipy/issues/17436 + # the problem was an int overflow in zeroing out + # the unused triangular part + n = 47_000 + x = np.eye(n, dtype=np.float64, order='F') + x[:4, :4] = np.array([[4, -2, 3, -1], + [-2, 4, -3, 1], + [3, -3, 5, 0], + [-1, 1, 0, 5]]) + + cholesky(x, check_finite=False, overwrite_a=True) # should not segfault + + +class TestCholeskyBanded: + """Tests for cholesky_banded() and cho_solve_banded.""" + + def test_check_finite(self): + # Symmetric positive definite banded matrix `a` + a = array([[4.0, 1.0, 0.0, 0.0], + [1.0, 4.0, 0.5, 0.0], + [0.0, 0.5, 4.0, 0.2], + [0.0, 0.0, 0.2, 4.0]]) + # Banded storage form of `a`. + ab = array([[-1.0, 1.0, 0.5, 0.2], + [4.0, 4.0, 4.0, 4.0]]) + c = cholesky_banded(ab, lower=False, check_finite=False) + ufac = zeros_like(a) + ufac[list(range(4)), list(range(4))] = c[-1] + ufac[(0, 1, 2), (1, 2, 3)] = c[0, 1:] + assert_array_almost_equal(a, dot(ufac.T, ufac)) + + b = array([0.0, 0.5, 4.2, 4.2]) + x = cho_solve_banded((c, False), b, check_finite=False) + assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0]) + + def test_upper_real(self): + # Symmetric positive definite banded matrix `a` + a = array([[4.0, 1.0, 0.0, 0.0], + [1.0, 4.0, 0.5, 0.0], + [0.0, 0.5, 4.0, 0.2], + [0.0, 0.0, 0.2, 4.0]]) + # Banded storage form of `a`. + ab = array([[-1.0, 1.0, 0.5, 0.2], + [4.0, 4.0, 4.0, 4.0]]) + c = cholesky_banded(ab, lower=False) + ufac = zeros_like(a) + ufac[list(range(4)), list(range(4))] = c[-1] + ufac[(0, 1, 2), (1, 2, 3)] = c[0, 1:] + assert_array_almost_equal(a, dot(ufac.T, ufac)) + + b = array([0.0, 0.5, 4.2, 4.2]) + x = cho_solve_banded((c, False), b) + assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0]) + + def test_upper_complex(self): + # Hermitian positive definite banded matrix `a` + a = array([[4.0, 1.0, 0.0, 0.0], + [1.0, 4.0, 0.5, 0.0], + [0.0, 0.5, 4.0, -0.2j], + [0.0, 0.0, 0.2j, 4.0]]) + # Banded storage form of `a`. + ab = array([[-1.0, 1.0, 0.5, -0.2j], + [4.0, 4.0, 4.0, 4.0]]) + c = cholesky_banded(ab, lower=False) + ufac = zeros_like(a) + ufac[list(range(4)), list(range(4))] = c[-1] + ufac[(0, 1, 2), (1, 2, 3)] = c[0, 1:] + assert_array_almost_equal(a, dot(ufac.conj().T, ufac)) + + b = array([0.0, 0.5, 4.0-0.2j, 0.2j + 4.0]) + x = cho_solve_banded((c, False), b) + assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0]) + + def test_lower_real(self): + # Symmetric positive definite banded matrix `a` + a = array([[4.0, 1.0, 0.0, 0.0], + [1.0, 4.0, 0.5, 0.0], + [0.0, 0.5, 4.0, 0.2], + [0.0, 0.0, 0.2, 4.0]]) + # Banded storage form of `a`. + ab = array([[4.0, 4.0, 4.0, 4.0], + [1.0, 0.5, 0.2, -1.0]]) + c = cholesky_banded(ab, lower=True) + lfac = zeros_like(a) + lfac[list(range(4)), list(range(4))] = c[0] + lfac[(1, 2, 3), (0, 1, 2)] = c[1, :3] + assert_array_almost_equal(a, dot(lfac, lfac.T)) + + b = array([0.0, 0.5, 4.2, 4.2]) + x = cho_solve_banded((c, True), b) + assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0]) + + def test_lower_complex(self): + # Hermitian positive definite banded matrix `a` + a = array([[4.0, 1.0, 0.0, 0.0], + [1.0, 4.0, 0.5, 0.0], + [0.0, 0.5, 4.0, -0.2j], + [0.0, 0.0, 0.2j, 4.0]]) + # Banded storage form of `a`. + ab = array([[4.0, 4.0, 4.0, 4.0], + [1.0, 0.5, 0.2j, -1.0]]) + c = cholesky_banded(ab, lower=True) + lfac = zeros_like(a) + lfac[list(range(4)), list(range(4))] = c[0] + lfac[(1, 2, 3), (0, 1, 2)] = c[1, :3] + assert_array_almost_equal(a, dot(lfac, lfac.conj().T)) + + b = array([0.0, 0.5j, 3.8j, 3.8]) + x = cho_solve_banded((c, True), b) + assert_array_almost_equal(x, [0.0, 0.0, 1.0j, 1.0]) + + +class TestOverwrite: + def test_cholesky(self): + assert_no_overwrite(cholesky, [(3, 3)]) + + def test_cho_factor(self): + assert_no_overwrite(cho_factor, [(3, 3)]) + + def test_cho_solve(self): + x = array([[2, -1, 0], [-1, 2, -1], [0, -1, 2]]) + xcho = cho_factor(x) + assert_no_overwrite(lambda b: cho_solve(xcho, b), [(3,)]) + + def test_cholesky_banded(self): + assert_no_overwrite(cholesky_banded, [(2, 3)]) + + def test_cho_solve_banded(self): + x = array([[0, -1, -1], [2, 2, 2]]) + xcho = cholesky_banded(x) + assert_no_overwrite(lambda b: cho_solve_banded((xcho, False), b), + [(3,)]) + + +class TestEmptyArray: + def test_cho_factor_empty_square(self): + a = empty((0, 0)) + b = array([]) + c = array([[]]) + d = [] + e = [[]] + + x, _ = cho_factor(a) + assert_array_equal(x, a) + + for x in ([b, c, d, e]): + assert_raises(ValueError, cho_factor, x) diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_decomp_cossin.py b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_decomp_cossin.py new file mode 100644 index 0000000000000000000000000000000000000000..3302eaa5bfe618a8d980cd292c97858ed1e1ef0e --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_decomp_cossin.py @@ -0,0 +1,157 @@ +import pytest +import numpy as np +from numpy.random import default_rng +from numpy.testing import assert_allclose + +from scipy.linalg.lapack import _compute_lwork +from scipy.stats import ortho_group, unitary_group +from scipy.linalg import cossin, get_lapack_funcs + +REAL_DTYPES = (np.float32, np.float64) +COMPLEX_DTYPES = (np.complex64, np.complex128) +DTYPES = REAL_DTYPES + COMPLEX_DTYPES + + +@pytest.mark.parametrize('dtype_', DTYPES) +@pytest.mark.parametrize('m, p, q', + [ + (2, 1, 1), + (3, 2, 1), + (3, 1, 2), + (4, 2, 2), + (4, 1, 2), + (40, 12, 20), + (40, 30, 1), + (40, 1, 30), + (100, 50, 1), + (100, 50, 50), + ]) +@pytest.mark.parametrize('swap_sign', [True, False]) +def test_cossin(dtype_, m, p, q, swap_sign): + rng = default_rng(1708093570726217) + if dtype_ in COMPLEX_DTYPES: + x = np.array(unitary_group.rvs(m, random_state=rng), dtype=dtype_) + else: + x = np.array(ortho_group.rvs(m, random_state=rng), dtype=dtype_) + + u, cs, vh = cossin(x, p, q, + swap_sign=swap_sign) + assert_allclose(x, u @ cs @ vh, rtol=0., atol=m*1e3*np.finfo(dtype_).eps) + assert u.dtype == dtype_ + # Test for float32 or float 64 + assert cs.dtype == np.real(u).dtype + assert vh.dtype == dtype_ + + u, cs, vh = cossin([x[:p, :q], x[:p, q:], x[p:, :q], x[p:, q:]], + swap_sign=swap_sign) + assert_allclose(x, u @ cs @ vh, rtol=0., atol=m*1e3*np.finfo(dtype_).eps) + assert u.dtype == dtype_ + assert cs.dtype == np.real(u).dtype + assert vh.dtype == dtype_ + + _, cs2, vh2 = cossin(x, p, q, + compute_u=False, + swap_sign=swap_sign) + assert_allclose(cs, cs2, rtol=0., atol=10*np.finfo(dtype_).eps) + assert_allclose(vh, vh2, rtol=0., atol=10*np.finfo(dtype_).eps) + + u2, cs2, _ = cossin(x, p, q, + compute_vh=False, + swap_sign=swap_sign) + assert_allclose(u, u2, rtol=0., atol=10*np.finfo(dtype_).eps) + assert_allclose(cs, cs2, rtol=0., atol=10*np.finfo(dtype_).eps) + + _, cs2, _ = cossin(x, p, q, + compute_u=False, + compute_vh=False, + swap_sign=swap_sign) + assert_allclose(cs, cs2, rtol=0., atol=10*np.finfo(dtype_).eps) + + +def test_cossin_mixed_types(): + rng = default_rng(1708093736390459) + x = np.array(ortho_group.rvs(4, random_state=rng), dtype=np.float64) + u, cs, vh = cossin([x[:2, :2], + np.array(x[:2, 2:], dtype=np.complex128), + x[2:, :2], + x[2:, 2:]]) + + assert u.dtype == np.complex128 + assert cs.dtype == np.float64 + assert vh.dtype == np.complex128 + assert_allclose(x, u @ cs @ vh, rtol=0., + atol=1e4 * np.finfo(np.complex128).eps) + + +def test_cossin_error_incorrect_subblocks(): + with pytest.raises(ValueError, match="be due to missing p, q arguments."): + cossin(([1, 2], [3, 4, 5], [6, 7], [8, 9, 10])) + + +def test_cossin_error_empty_subblocks(): + with pytest.raises(ValueError, match="x11.*empty"): + cossin(([], [], [], [])) + with pytest.raises(ValueError, match="x12.*empty"): + cossin(([1, 2], [], [6, 7], [8, 9, 10])) + with pytest.raises(ValueError, match="x21.*empty"): + cossin(([1, 2], [3, 4, 5], [], [8, 9, 10])) + with pytest.raises(ValueError, match="x22.*empty"): + cossin(([1, 2], [3, 4, 5], [2], [])) + + +def test_cossin_error_missing_partitioning(): + with pytest.raises(ValueError, match=".*exactly four arrays.* got 2"): + cossin(unitary_group.rvs(2)) + + with pytest.raises(ValueError, match=".*might be due to missing p, q"): + cossin(unitary_group.rvs(4)) + + +def test_cossin_error_non_iterable(): + with pytest.raises(ValueError, match="containing the subblocks of X"): + cossin(12j) + + +def test_cossin_error_non_square(): + with pytest.raises(ValueError, match="only supports square"): + cossin(np.array([[1, 2]]), 1, 1) + + +def test_cossin_error_partitioning(): + x = np.array(ortho_group.rvs(4), dtype=np.float64) + with pytest.raises(ValueError, match="invalid p=0.*0
= n:
+ assert_allclose(u.conj().T.dot(u), np.eye(n), atol=1e-15)
+ else:
+ assert_allclose(u.dot(u.conj().T), np.eye(m), atol=1e-15)
+ # p is Hermitian positive semidefinite.
+ assert_allclose(p.conj().T, p)
+ evals = eigh(p, eigvals_only=True)
+ nonzero_evals = evals[abs(evals) > 1e-14]
+ assert_((nonzero_evals >= 0).all())
+
+ u, p = polar(a, side='left')
+ assert_equal(u.shape, (m, n))
+ assert_equal(p.shape, (m, m))
+ # a = pu
+ assert_allclose(p.dot(u), a, atol=product_atol)
+ if m >= n:
+ assert_allclose(u.conj().T.dot(u), np.eye(n), atol=1e-15)
+ else:
+ assert_allclose(u.dot(u.conj().T), np.eye(m), atol=1e-15)
+ # p is Hermitian positive semidefinite.
+ assert_allclose(p.conj().T, p)
+ evals = eigh(p, eigvals_only=True)
+ nonzero_evals = evals[abs(evals) > 1e-14]
+ assert_((nonzero_evals >= 0).all())
+
+
+def test_precomputed_cases():
+ for a, side, expected_u, expected_p in precomputed_cases:
+ check_precomputed_polar(a, side, expected_u, expected_p)
+
+
+def test_verify_cases():
+ for a in verify_cases:
+ verify_polar(a)
+
diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_decomp_update.py b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_decomp_update.py
new file mode 100644
index 0000000000000000000000000000000000000000..2222c25ae6aa8f46ca35aa276e4fc4e85b4e7100
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_decomp_update.py
@@ -0,0 +1,1700 @@
+import itertools
+
+import numpy as np
+from numpy.testing import assert_, assert_allclose, assert_equal
+from pytest import raises as assert_raises
+from scipy import linalg
+import scipy.linalg._decomp_update as _decomp_update
+from scipy.linalg._decomp_update import qr_delete, qr_update, qr_insert
+
+def assert_unitary(a, rtol=None, atol=None, assert_sqr=True):
+ if rtol is None:
+ rtol = 10.0 ** -(np.finfo(a.dtype).precision-2)
+ if atol is None:
+ atol = 10*np.finfo(a.dtype).eps
+
+ if assert_sqr:
+ assert_(a.shape[0] == a.shape[1], 'unitary matrices must be square')
+ aTa = np.dot(a.T.conj(), a)
+ assert_allclose(aTa, np.eye(a.shape[1]), rtol=rtol, atol=atol)
+
+def assert_upper_tri(a, rtol=None, atol=None):
+ if rtol is None:
+ rtol = 10.0 ** -(np.finfo(a.dtype).precision-2)
+ if atol is None:
+ atol = 2*np.finfo(a.dtype).eps
+ mask = np.tri(a.shape[0], a.shape[1], -1, np.bool_)
+ assert_allclose(a[mask], 0.0, rtol=rtol, atol=atol)
+
+def check_qr(q, r, a, rtol, atol, assert_sqr=True):
+ assert_unitary(q, rtol, atol, assert_sqr)
+ assert_upper_tri(r, rtol, atol)
+ assert_allclose(q.dot(r), a, rtol=rtol, atol=atol)
+
+def make_strided(arrs):
+ strides = [(3, 7), (2, 2), (3, 4), (4, 2), (5, 4), (2, 3), (2, 1), (4, 5)]
+ kmax = len(strides)
+ k = 0
+ ret = []
+ for a in arrs:
+ if a.ndim == 1:
+ s = strides[k % kmax]
+ k += 1
+ base = np.zeros(s[0]*a.shape[0]+s[1], a.dtype)
+ view = base[s[1]::s[0]]
+ view[...] = a
+ elif a.ndim == 2:
+ s = strides[k % kmax]
+ t = strides[(k+1) % kmax]
+ k += 2
+ base = np.zeros((s[0]*a.shape[0]+s[1], t[0]*a.shape[1]+t[1]),
+ a.dtype)
+ view = base[s[1]::s[0], t[1]::t[0]]
+ view[...] = a
+ else:
+ raise ValueError('make_strided only works for ndim = 1 or'
+ ' 2 arrays')
+ ret.append(view)
+ return ret
+
+def negate_strides(arrs):
+ ret = []
+ for a in arrs:
+ b = np.zeros_like(a)
+ if b.ndim == 2:
+ b = b[::-1, ::-1]
+ elif b.ndim == 1:
+ b = b[::-1]
+ else:
+ raise ValueError('negate_strides only works for ndim = 1 or'
+ ' 2 arrays')
+ b[...] = a
+ ret.append(b)
+ return ret
+
+def nonitemsize_strides(arrs):
+ out = []
+ for a in arrs:
+ a_dtype = a.dtype
+ b = np.zeros(a.shape, [('a', a_dtype), ('junk', 'S1')])
+ c = b.getfield(a_dtype)
+ c[...] = a
+ out.append(c)
+ return out
+
+
+def make_nonnative(arrs):
+ return [a.astype(a.dtype.newbyteorder()) for a in arrs]
+
+
+class BaseQRdeltas:
+ def setup_method(self):
+ self.rtol = 10.0 ** -(np.finfo(self.dtype).precision-2)
+ self.atol = 10 * np.finfo(self.dtype).eps
+
+ def generate(self, type, mode='full'):
+ np.random.seed(29382)
+ shape = {'sqr': (8, 8), 'tall': (12, 7), 'fat': (7, 12),
+ 'Mx1': (8, 1), '1xN': (1, 8), '1x1': (1, 1)}[type]
+ a = np.random.random(shape)
+ if np.iscomplexobj(self.dtype.type(1)):
+ b = np.random.random(shape)
+ a = a + 1j * b
+ a = a.astype(self.dtype)
+ q, r = linalg.qr(a, mode=mode)
+ return a, q, r
+
+class BaseQRdelete(BaseQRdeltas):
+ def test_sqr_1_row(self):
+ a, q, r = self.generate('sqr')
+ for row in range(r.shape[0]):
+ q1, r1 = qr_delete(q, r, row, overwrite_qr=False)
+ a1 = np.delete(a, row, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_sqr_p_row(self):
+ a, q, r = self.generate('sqr')
+ for ndel in range(2, 6):
+ for row in range(a.shape[0]-ndel):
+ q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False)
+ a1 = np.delete(a, slice(row, row+ndel), 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_sqr_1_col(self):
+ a, q, r = self.generate('sqr')
+ for col in range(r.shape[1]):
+ q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False)
+ a1 = np.delete(a, col, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_sqr_p_col(self):
+ a, q, r = self.generate('sqr')
+ for ndel in range(2, 6):
+ for col in range(r.shape[1]-ndel):
+ q1, r1 = qr_delete(q, r, col, ndel, which='col',
+ overwrite_qr=False)
+ a1 = np.delete(a, slice(col, col+ndel), 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_tall_1_row(self):
+ a, q, r = self.generate('tall')
+ for row in range(r.shape[0]):
+ q1, r1 = qr_delete(q, r, row, overwrite_qr=False)
+ a1 = np.delete(a, row, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_tall_p_row(self):
+ a, q, r = self.generate('tall')
+ for ndel in range(2, 6):
+ for row in range(a.shape[0]-ndel):
+ q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False)
+ a1 = np.delete(a, slice(row, row+ndel), 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_tall_1_col(self):
+ a, q, r = self.generate('tall')
+ for col in range(r.shape[1]):
+ q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False)
+ a1 = np.delete(a, col, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_tall_p_col(self):
+ a, q, r = self.generate('tall')
+ for ndel in range(2, 6):
+ for col in range(r.shape[1]-ndel):
+ q1, r1 = qr_delete(q, r, col, ndel, which='col',
+ overwrite_qr=False)
+ a1 = np.delete(a, slice(col, col+ndel), 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_fat_1_row(self):
+ a, q, r = self.generate('fat')
+ for row in range(r.shape[0]):
+ q1, r1 = qr_delete(q, r, row, overwrite_qr=False)
+ a1 = np.delete(a, row, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_fat_p_row(self):
+ a, q, r = self.generate('fat')
+ for ndel in range(2, 6):
+ for row in range(a.shape[0]-ndel):
+ q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False)
+ a1 = np.delete(a, slice(row, row+ndel), 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_fat_1_col(self):
+ a, q, r = self.generate('fat')
+ for col in range(r.shape[1]):
+ q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False)
+ a1 = np.delete(a, col, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_fat_p_col(self):
+ a, q, r = self.generate('fat')
+ for ndel in range(2, 6):
+ for col in range(r.shape[1]-ndel):
+ q1, r1 = qr_delete(q, r, col, ndel, which='col',
+ overwrite_qr=False)
+ a1 = np.delete(a, slice(col, col+ndel), 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_economic_1_row(self):
+ # this test always starts and ends with an economic decomp.
+ a, q, r = self.generate('tall', 'economic')
+ for row in range(r.shape[0]):
+ q1, r1 = qr_delete(q, r, row, overwrite_qr=False)
+ a1 = np.delete(a, row, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+ # for economic row deletes
+ # eco - prow = eco
+ # eco - prow = sqr
+ # eco - prow = fat
+ def base_economic_p_row_xxx(self, ndel):
+ a, q, r = self.generate('tall', 'economic')
+ for row in range(a.shape[0]-ndel):
+ q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False)
+ a1 = np.delete(a, slice(row, row+ndel), 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+ def test_economic_p_row_economic(self):
+ # (12, 7) - (3, 7) = (9,7) --> stays economic
+ self.base_economic_p_row_xxx(3)
+
+ def test_economic_p_row_sqr(self):
+ # (12, 7) - (5, 7) = (7, 7) --> becomes square
+ self.base_economic_p_row_xxx(5)
+
+ def test_economic_p_row_fat(self):
+ # (12, 7) - (7,7) = (5, 7) --> becomes fat
+ self.base_economic_p_row_xxx(7)
+
+ def test_economic_1_col(self):
+ a, q, r = self.generate('tall', 'economic')
+ for col in range(r.shape[1]):
+ q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False)
+ a1 = np.delete(a, col, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+ def test_economic_p_col(self):
+ a, q, r = self.generate('tall', 'economic')
+ for ndel in range(2, 6):
+ for col in range(r.shape[1]-ndel):
+ q1, r1 = qr_delete(q, r, col, ndel, which='col',
+ overwrite_qr=False)
+ a1 = np.delete(a, slice(col, col+ndel), 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+ def test_Mx1_1_row(self):
+ a, q, r = self.generate('Mx1')
+ for row in range(r.shape[0]):
+ q1, r1 = qr_delete(q, r, row, overwrite_qr=False)
+ a1 = np.delete(a, row, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_Mx1_p_row(self):
+ a, q, r = self.generate('Mx1')
+ for ndel in range(2, 6):
+ for row in range(a.shape[0]-ndel):
+ q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False)
+ a1 = np.delete(a, slice(row, row+ndel), 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_1xN_1_col(self):
+ a, q, r = self.generate('1xN')
+ for col in range(r.shape[1]):
+ q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False)
+ a1 = np.delete(a, col, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_1xN_p_col(self):
+ a, q, r = self.generate('1xN')
+ for ndel in range(2, 6):
+ for col in range(r.shape[1]-ndel):
+ q1, r1 = qr_delete(q, r, col, ndel, which='col',
+ overwrite_qr=False)
+ a1 = np.delete(a, slice(col, col+ndel), 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_Mx1_economic_1_row(self):
+ a, q, r = self.generate('Mx1', 'economic')
+ for row in range(r.shape[0]):
+ q1, r1 = qr_delete(q, r, row, overwrite_qr=False)
+ a1 = np.delete(a, row, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+ def test_Mx1_economic_p_row(self):
+ a, q, r = self.generate('Mx1', 'economic')
+ for ndel in range(2, 6):
+ for row in range(a.shape[0]-ndel):
+ q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False)
+ a1 = np.delete(a, slice(row, row+ndel), 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+ def test_delete_last_1_row(self):
+ # full and eco are the same for 1xN
+ a, q, r = self.generate('1xN')
+ q1, r1 = qr_delete(q, r, 0, 1, 'row')
+ assert_equal(q1, np.ndarray(shape=(0, 0), dtype=q.dtype))
+ assert_equal(r1, np.ndarray(shape=(0, r.shape[1]), dtype=r.dtype))
+
+ def test_delete_last_p_row(self):
+ a, q, r = self.generate('tall', 'full')
+ q1, r1 = qr_delete(q, r, 0, a.shape[0], 'row')
+ assert_equal(q1, np.ndarray(shape=(0, 0), dtype=q.dtype))
+ assert_equal(r1, np.ndarray(shape=(0, r.shape[1]), dtype=r.dtype))
+
+ a, q, r = self.generate('tall', 'economic')
+ q1, r1 = qr_delete(q, r, 0, a.shape[0], 'row')
+ assert_equal(q1, np.ndarray(shape=(0, 0), dtype=q.dtype))
+ assert_equal(r1, np.ndarray(shape=(0, r.shape[1]), dtype=r.dtype))
+
+ def test_delete_last_1_col(self):
+ a, q, r = self.generate('Mx1', 'economic')
+ q1, r1 = qr_delete(q, r, 0, 1, 'col')
+ assert_equal(q1, np.ndarray(shape=(q.shape[0], 0), dtype=q.dtype))
+ assert_equal(r1, np.ndarray(shape=(0, 0), dtype=r.dtype))
+
+ a, q, r = self.generate('Mx1', 'full')
+ q1, r1 = qr_delete(q, r, 0, 1, 'col')
+ assert_unitary(q1)
+ assert_(q1.dtype == q.dtype)
+ assert_(q1.shape == q.shape)
+ assert_equal(r1, np.ndarray(shape=(r.shape[0], 0), dtype=r.dtype))
+
+ def test_delete_last_p_col(self):
+ a, q, r = self.generate('tall', 'full')
+ q1, r1 = qr_delete(q, r, 0, a.shape[1], 'col')
+ assert_unitary(q1)
+ assert_(q1.dtype == q.dtype)
+ assert_(q1.shape == q.shape)
+ assert_equal(r1, np.ndarray(shape=(r.shape[0], 0), dtype=r.dtype))
+
+ a, q, r = self.generate('tall', 'economic')
+ q1, r1 = qr_delete(q, r, 0, a.shape[1], 'col')
+ assert_equal(q1, np.ndarray(shape=(q.shape[0], 0), dtype=q.dtype))
+ assert_equal(r1, np.ndarray(shape=(0, 0), dtype=r.dtype))
+
+ def test_delete_1x1_row_col(self):
+ a, q, r = self.generate('1x1')
+ q1, r1 = qr_delete(q, r, 0, 1, 'row')
+ assert_equal(q1, np.ndarray(shape=(0, 0), dtype=q.dtype))
+ assert_equal(r1, np.ndarray(shape=(0, r.shape[1]), dtype=r.dtype))
+
+ a, q, r = self.generate('1x1')
+ q1, r1 = qr_delete(q, r, 0, 1, 'col')
+ assert_unitary(q1)
+ assert_(q1.dtype == q.dtype)
+ assert_(q1.shape == q.shape)
+ assert_equal(r1, np.ndarray(shape=(r.shape[0], 0), dtype=r.dtype))
+
+ # all full qr, row deletes and single column deletes should be able to
+ # handle any non negative strides. (only row and column vector
+ # operations are used.) p column delete require fortran ordered
+ # Q and R and will make a copy as necessary. Economic qr row deletes
+ # require a contiguous q.
+
+ def base_non_simple_strides(self, adjust_strides, ks, p, which,
+ overwriteable):
+ if which == 'row':
+ qind = (slice(p,None), slice(p,None))
+ rind = (slice(p,None), slice(None))
+ else:
+ qind = (slice(None), slice(None))
+ rind = (slice(None), slice(None,-p))
+
+ for type, k in itertools.product(['sqr', 'tall', 'fat'], ks):
+ a, q0, r0, = self.generate(type)
+ qs, rs = adjust_strides((q0, r0))
+ if p == 1:
+ a1 = np.delete(a, k, 0 if which == 'row' else 1)
+ else:
+ s = slice(k,k+p)
+ if k < 0:
+ s = slice(k, k + p +
+ (a.shape[0] if which == 'row' else a.shape[1]))
+ a1 = np.delete(a, s, 0 if which == 'row' else 1)
+
+ # for each variable, q, r we try with it strided and
+ # overwrite=False. Then we try with overwrite=True, and make
+ # sure that q and r are still overwritten.
+
+ q = q0.copy('F')
+ r = r0.copy('F')
+ q1, r1 = qr_delete(qs, r, k, p, which, False)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+ q1o, r1o = qr_delete(qs, r, k, p, which, True)
+ check_qr(q1o, r1o, a1, self.rtol, self.atol)
+ if overwriteable:
+ assert_allclose(q1o, qs[qind], rtol=self.rtol, atol=self.atol)
+ assert_allclose(r1o, r[rind], rtol=self.rtol, atol=self.atol)
+
+ q = q0.copy('F')
+ r = r0.copy('F')
+ q2, r2 = qr_delete(q, rs, k, p, which, False)
+ check_qr(q2, r2, a1, self.rtol, self.atol)
+ q2o, r2o = qr_delete(q, rs, k, p, which, True)
+ check_qr(q2o, r2o, a1, self.rtol, self.atol)
+ if overwriteable:
+ assert_allclose(q2o, q[qind], rtol=self.rtol, atol=self.atol)
+ assert_allclose(r2o, rs[rind], rtol=self.rtol, atol=self.atol)
+
+ q = q0.copy('F')
+ r = r0.copy('F')
+ # since some of these were consumed above
+ qs, rs = adjust_strides((q, r))
+ q3, r3 = qr_delete(qs, rs, k, p, which, False)
+ check_qr(q3, r3, a1, self.rtol, self.atol)
+ q3o, r3o = qr_delete(qs, rs, k, p, which, True)
+ check_qr(q3o, r3o, a1, self.rtol, self.atol)
+ if overwriteable:
+ assert_allclose(q2o, qs[qind], rtol=self.rtol, atol=self.atol)
+ assert_allclose(r3o, rs[rind], rtol=self.rtol, atol=self.atol)
+
+ def test_non_unit_strides_1_row(self):
+ self.base_non_simple_strides(make_strided, [0], 1, 'row', True)
+
+ def test_non_unit_strides_p_row(self):
+ self.base_non_simple_strides(make_strided, [0], 3, 'row', True)
+
+ def test_non_unit_strides_1_col(self):
+ self.base_non_simple_strides(make_strided, [0], 1, 'col', True)
+
+ def test_non_unit_strides_p_col(self):
+ self.base_non_simple_strides(make_strided, [0], 3, 'col', False)
+
+ def test_neg_strides_1_row(self):
+ self.base_non_simple_strides(negate_strides, [0], 1, 'row', False)
+
+ def test_neg_strides_p_row(self):
+ self.base_non_simple_strides(negate_strides, [0], 3, 'row', False)
+
+ def test_neg_strides_1_col(self):
+ self.base_non_simple_strides(negate_strides, [0], 1, 'col', False)
+
+ def test_neg_strides_p_col(self):
+ self.base_non_simple_strides(negate_strides, [0], 3, 'col', False)
+
+ def test_non_itemize_strides_1_row(self):
+ self.base_non_simple_strides(nonitemsize_strides, [0], 1, 'row', False)
+
+ def test_non_itemize_strides_p_row(self):
+ self.base_non_simple_strides(nonitemsize_strides, [0], 3, 'row', False)
+
+ def test_non_itemize_strides_1_col(self):
+ self.base_non_simple_strides(nonitemsize_strides, [0], 1, 'col', False)
+
+ def test_non_itemize_strides_p_col(self):
+ self.base_non_simple_strides(nonitemsize_strides, [0], 3, 'col', False)
+
+ def test_non_native_byte_order_1_row(self):
+ self.base_non_simple_strides(make_nonnative, [0], 1, 'row', False)
+
+ def test_non_native_byte_order_p_row(self):
+ self.base_non_simple_strides(make_nonnative, [0], 3, 'row', False)
+
+ def test_non_native_byte_order_1_col(self):
+ self.base_non_simple_strides(make_nonnative, [0], 1, 'col', False)
+
+ def test_non_native_byte_order_p_col(self):
+ self.base_non_simple_strides(make_nonnative, [0], 3, 'col', False)
+
+ def test_neg_k(self):
+ a, q, r = self.generate('sqr')
+ for k, p, w in itertools.product([-3, -7], [1, 3], ['row', 'col']):
+ q1, r1 = qr_delete(q, r, k, p, w, overwrite_qr=False)
+ if w == 'row':
+ a1 = np.delete(a, slice(k+a.shape[0], k+p+a.shape[0]), 0)
+ else:
+ a1 = np.delete(a, slice(k+a.shape[0], k+p+a.shape[1]), 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def base_overwrite_qr(self, which, p, test_C, test_F, mode='full'):
+ assert_sqr = True if mode == 'full' else False
+ if which == 'row':
+ qind = (slice(p,None), slice(p,None))
+ rind = (slice(p,None), slice(None))
+ else:
+ qind = (slice(None), slice(None))
+ rind = (slice(None), slice(None,-p))
+ a, q0, r0 = self.generate('sqr', mode)
+ if p == 1:
+ a1 = np.delete(a, 3, 0 if which == 'row' else 1)
+ else:
+ a1 = np.delete(a, slice(3, 3+p), 0 if which == 'row' else 1)
+
+ # don't overwrite
+ q = q0.copy('F')
+ r = r0.copy('F')
+ q1, r1 = qr_delete(q, r, 3, p, which, False)
+ check_qr(q1, r1, a1, self.rtol, self.atol, assert_sqr)
+ check_qr(q, r, a, self.rtol, self.atol, assert_sqr)
+
+ if test_F:
+ q = q0.copy('F')
+ r = r0.copy('F')
+ q2, r2 = qr_delete(q, r, 3, p, which, True)
+ check_qr(q2, r2, a1, self.rtol, self.atol, assert_sqr)
+ # verify the overwriting
+ assert_allclose(q2, q[qind], rtol=self.rtol, atol=self.atol)
+ assert_allclose(r2, r[rind], rtol=self.rtol, atol=self.atol)
+
+ if test_C:
+ q = q0.copy('C')
+ r = r0.copy('C')
+ q3, r3 = qr_delete(q, r, 3, p, which, True)
+ check_qr(q3, r3, a1, self.rtol, self.atol, assert_sqr)
+ assert_allclose(q3, q[qind], rtol=self.rtol, atol=self.atol)
+ assert_allclose(r3, r[rind], rtol=self.rtol, atol=self.atol)
+
+ def test_overwrite_qr_1_row(self):
+ # any positively strided q and r.
+ self.base_overwrite_qr('row', 1, True, True)
+
+ def test_overwrite_economic_qr_1_row(self):
+ # Any contiguous q and positively strided r.
+ self.base_overwrite_qr('row', 1, True, True, 'economic')
+
+ def test_overwrite_qr_1_col(self):
+ # any positively strided q and r.
+ # full and eco share code paths
+ self.base_overwrite_qr('col', 1, True, True)
+
+ def test_overwrite_qr_p_row(self):
+ # any positively strided q and r.
+ self.base_overwrite_qr('row', 3, True, True)
+
+ def test_overwrite_economic_qr_p_row(self):
+ # any contiguous q and positively strided r
+ self.base_overwrite_qr('row', 3, True, True, 'economic')
+
+ def test_overwrite_qr_p_col(self):
+ # only F ordered q and r can be overwritten for cols
+ # full and eco share code paths
+ self.base_overwrite_qr('col', 3, False, True)
+
+ def test_bad_which(self):
+ a, q, r = self.generate('sqr')
+ assert_raises(ValueError, qr_delete, q, r, 0, which='foo')
+
+ def test_bad_k(self):
+ a, q, r = self.generate('tall')
+ assert_raises(ValueError, qr_delete, q, r, q.shape[0], 1)
+ assert_raises(ValueError, qr_delete, q, r, -q.shape[0]-1, 1)
+ assert_raises(ValueError, qr_delete, q, r, r.shape[0], 1, 'col')
+ assert_raises(ValueError, qr_delete, q, r, -r.shape[0]-1, 1, 'col')
+
+ def test_bad_p(self):
+ a, q, r = self.generate('tall')
+ # p must be positive
+ assert_raises(ValueError, qr_delete, q, r, 0, -1)
+ assert_raises(ValueError, qr_delete, q, r, 0, -1, 'col')
+
+ # and nonzero
+ assert_raises(ValueError, qr_delete, q, r, 0, 0)
+ assert_raises(ValueError, qr_delete, q, r, 0, 0, 'col')
+
+ # must have at least k+p rows or cols, depending.
+ assert_raises(ValueError, qr_delete, q, r, 3, q.shape[0]-2)
+ assert_raises(ValueError, qr_delete, q, r, 3, r.shape[1]-2, 'col')
+
+ def test_empty_q(self):
+ a, q, r = self.generate('tall')
+ # same code path for 'row' and 'col'
+ assert_raises(ValueError, qr_delete, np.array([]), r, 0, 1)
+
+ def test_empty_r(self):
+ a, q, r = self.generate('tall')
+ # same code path for 'row' and 'col'
+ assert_raises(ValueError, qr_delete, q, np.array([]), 0, 1)
+
+ def test_mismatched_q_and_r(self):
+ a, q, r = self.generate('tall')
+ r = r[1:]
+ assert_raises(ValueError, qr_delete, q, r, 0, 1)
+
+ def test_unsupported_dtypes(self):
+ dts = ['int8', 'int16', 'int32', 'int64',
+ 'uint8', 'uint16', 'uint32', 'uint64',
+ 'float16', 'longdouble', 'clongdouble',
+ 'bool']
+ a, q0, r0 = self.generate('tall')
+ for dtype in dts:
+ q = q0.real.astype(dtype)
+ with np.errstate(invalid="ignore"):
+ r = r0.real.astype(dtype)
+ assert_raises(ValueError, qr_delete, q, r0, 0, 1, 'row')
+ assert_raises(ValueError, qr_delete, q, r0, 0, 2, 'row')
+ assert_raises(ValueError, qr_delete, q, r0, 0, 1, 'col')
+ assert_raises(ValueError, qr_delete, q, r0, 0, 2, 'col')
+
+ assert_raises(ValueError, qr_delete, q0, r, 0, 1, 'row')
+ assert_raises(ValueError, qr_delete, q0, r, 0, 2, 'row')
+ assert_raises(ValueError, qr_delete, q0, r, 0, 1, 'col')
+ assert_raises(ValueError, qr_delete, q0, r, 0, 2, 'col')
+
+ def test_check_finite(self):
+ a0, q0, r0 = self.generate('tall')
+
+ q = q0.copy('F')
+ q[1,1] = np.nan
+ assert_raises(ValueError, qr_delete, q, r0, 0, 1, 'row')
+ assert_raises(ValueError, qr_delete, q, r0, 0, 3, 'row')
+ assert_raises(ValueError, qr_delete, q, r0, 0, 1, 'col')
+ assert_raises(ValueError, qr_delete, q, r0, 0, 3, 'col')
+
+ r = r0.copy('F')
+ r[1,1] = np.nan
+ assert_raises(ValueError, qr_delete, q0, r, 0, 1, 'row')
+ assert_raises(ValueError, qr_delete, q0, r, 0, 3, 'row')
+ assert_raises(ValueError, qr_delete, q0, r, 0, 1, 'col')
+ assert_raises(ValueError, qr_delete, q0, r, 0, 3, 'col')
+
+ def test_qr_scalar(self):
+ a, q, r = self.generate('1x1')
+ assert_raises(ValueError, qr_delete, q[0, 0], r, 0, 1, 'row')
+ assert_raises(ValueError, qr_delete, q, r[0, 0], 0, 1, 'row')
+ assert_raises(ValueError, qr_delete, q[0, 0], r, 0, 1, 'col')
+ assert_raises(ValueError, qr_delete, q, r[0, 0], 0, 1, 'col')
+
+class TestQRdelete_f(BaseQRdelete):
+ dtype = np.dtype('f')
+
+class TestQRdelete_F(BaseQRdelete):
+ dtype = np.dtype('F')
+
+class TestQRdelete_d(BaseQRdelete):
+ dtype = np.dtype('d')
+
+class TestQRdelete_D(BaseQRdelete):
+ dtype = np.dtype('D')
+
+class BaseQRinsert(BaseQRdeltas):
+ def generate(self, type, mode='full', which='row', p=1):
+ a, q, r = super().generate(type, mode)
+
+ assert_(p > 0)
+
+ # super call set the seed...
+ if which == 'row':
+ if p == 1:
+ u = np.random.random(a.shape[1])
+ else:
+ u = np.random.random((p, a.shape[1]))
+ elif which == 'col':
+ if p == 1:
+ u = np.random.random(a.shape[0])
+ else:
+ u = np.random.random((a.shape[0], p))
+ else:
+ ValueError('which should be either "row" or "col"')
+
+ if np.iscomplexobj(self.dtype.type(1)):
+ b = np.random.random(u.shape)
+ u = u + 1j * b
+
+ u = u.astype(self.dtype)
+ return a, q, r, u
+
+ def test_sqr_1_row(self):
+ a, q, r, u = self.generate('sqr', which='row')
+ for row in range(r.shape[0] + 1):
+ q1, r1 = qr_insert(q, r, u, row)
+ a1 = np.insert(a, row, u, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_sqr_p_row(self):
+ # sqr + rows --> fat always
+ a, q, r, u = self.generate('sqr', which='row', p=3)
+ for row in range(r.shape[0] + 1):
+ q1, r1 = qr_insert(q, r, u, row)
+ a1 = np.insert(a, np.full(3, row, np.intp), u, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_sqr_1_col(self):
+ a, q, r, u = self.generate('sqr', which='col')
+ for col in range(r.shape[1] + 1):
+ q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+ a1 = np.insert(a, col, u, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_sqr_p_col(self):
+ # sqr + cols --> fat always
+ a, q, r, u = self.generate('sqr', which='col', p=3)
+ for col in range(r.shape[1] + 1):
+ q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+ a1 = np.insert(a, np.full(3, col, np.intp), u, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_tall_1_row(self):
+ a, q, r, u = self.generate('tall', which='row')
+ for row in range(r.shape[0] + 1):
+ q1, r1 = qr_insert(q, r, u, row)
+ a1 = np.insert(a, row, u, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_tall_p_row(self):
+ # tall + rows --> tall always
+ a, q, r, u = self.generate('tall', which='row', p=3)
+ for row in range(r.shape[0] + 1):
+ q1, r1 = qr_insert(q, r, u, row)
+ a1 = np.insert(a, np.full(3, row, np.intp), u, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_tall_1_col(self):
+ a, q, r, u = self.generate('tall', which='col')
+ for col in range(r.shape[1] + 1):
+ q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+ a1 = np.insert(a, col, u, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ # for column adds to tall matrices there are three cases to test
+ # tall + pcol --> tall
+ # tall + pcol --> sqr
+ # tall + pcol --> fat
+ def base_tall_p_col_xxx(self, p):
+ a, q, r, u = self.generate('tall', which='col', p=p)
+ for col in range(r.shape[1] + 1):
+ q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+ a1 = np.insert(a, np.full(p, col, np.intp), u, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_tall_p_col_tall(self):
+ # 12x7 + 12x3 = 12x10 --> stays tall
+ self.base_tall_p_col_xxx(3)
+
+ def test_tall_p_col_sqr(self):
+ # 12x7 + 12x5 = 12x12 --> becomes sqr
+ self.base_tall_p_col_xxx(5)
+
+ def test_tall_p_col_fat(self):
+ # 12x7 + 12x7 = 12x14 --> becomes fat
+ self.base_tall_p_col_xxx(7)
+
+ def test_fat_1_row(self):
+ a, q, r, u = self.generate('fat', which='row')
+ for row in range(r.shape[0] + 1):
+ q1, r1 = qr_insert(q, r, u, row)
+ a1 = np.insert(a, row, u, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ # for row adds to fat matrices there are three cases to test
+ # fat + prow --> fat
+ # fat + prow --> sqr
+ # fat + prow --> tall
+ def base_fat_p_row_xxx(self, p):
+ a, q, r, u = self.generate('fat', which='row', p=p)
+ for row in range(r.shape[0] + 1):
+ q1, r1 = qr_insert(q, r, u, row)
+ a1 = np.insert(a, np.full(p, row, np.intp), u, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_fat_p_row_fat(self):
+ # 7x12 + 3x12 = 10x12 --> stays fat
+ self.base_fat_p_row_xxx(3)
+
+ def test_fat_p_row_sqr(self):
+ # 7x12 + 5x12 = 12x12 --> becomes sqr
+ self.base_fat_p_row_xxx(5)
+
+ def test_fat_p_row_tall(self):
+ # 7x12 + 7x12 = 14x12 --> becomes tall
+ self.base_fat_p_row_xxx(7)
+
+ def test_fat_1_col(self):
+ a, q, r, u = self.generate('fat', which='col')
+ for col in range(r.shape[1] + 1):
+ q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+ a1 = np.insert(a, col, u, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_fat_p_col(self):
+ # fat + cols --> fat always
+ a, q, r, u = self.generate('fat', which='col', p=3)
+ for col in range(r.shape[1] + 1):
+ q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+ a1 = np.insert(a, np.full(3, col, np.intp), u, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_economic_1_row(self):
+ a, q, r, u = self.generate('tall', 'economic', 'row')
+ for row in range(r.shape[0] + 1):
+ q1, r1 = qr_insert(q, r, u, row, overwrite_qru=False)
+ a1 = np.insert(a, row, u, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+ def test_economic_p_row(self):
+ # tall + rows --> tall always
+ a, q, r, u = self.generate('tall', 'economic', 'row', 3)
+ for row in range(r.shape[0] + 1):
+ q1, r1 = qr_insert(q, r, u, row, overwrite_qru=False)
+ a1 = np.insert(a, np.full(3, row, np.intp), u, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+ def test_economic_1_col(self):
+ a, q, r, u = self.generate('tall', 'economic', which='col')
+ for col in range(r.shape[1] + 1):
+ q1, r1 = qr_insert(q, r, u.copy(), col, 'col', overwrite_qru=False)
+ a1 = np.insert(a, col, u, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+ def test_economic_1_col_bad_update(self):
+ # When the column to be added lies in the span of Q, the update is
+ # not meaningful. This is detected, and a LinAlgError is issued.
+ q = np.eye(5, 3, dtype=self.dtype)
+ r = np.eye(3, dtype=self.dtype)
+ u = np.array([1, 0, 0, 0, 0], self.dtype)
+ assert_raises(linalg.LinAlgError, qr_insert, q, r, u, 0, 'col')
+
+ # for column adds to economic matrices there are three cases to test
+ # eco + pcol --> eco
+ # eco + pcol --> sqr
+ # eco + pcol --> fat
+ def base_economic_p_col_xxx(self, p):
+ a, q, r, u = self.generate('tall', 'economic', which='col', p=p)
+ for col in range(r.shape[1] + 1):
+ q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+ a1 = np.insert(a, np.full(p, col, np.intp), u, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+ def test_economic_p_col_eco(self):
+ # 12x7 + 12x3 = 12x10 --> stays eco
+ self.base_economic_p_col_xxx(3)
+
+ def test_economic_p_col_sqr(self):
+ # 12x7 + 12x5 = 12x12 --> becomes sqr
+ self.base_economic_p_col_xxx(5)
+
+ def test_economic_p_col_fat(self):
+ # 12x7 + 12x7 = 12x14 --> becomes fat
+ self.base_economic_p_col_xxx(7)
+
+ def test_Mx1_1_row(self):
+ a, q, r, u = self.generate('Mx1', which='row')
+ for row in range(r.shape[0] + 1):
+ q1, r1 = qr_insert(q, r, u, row)
+ a1 = np.insert(a, row, u, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_Mx1_p_row(self):
+ a, q, r, u = self.generate('Mx1', which='row', p=3)
+ for row in range(r.shape[0] + 1):
+ q1, r1 = qr_insert(q, r, u, row)
+ a1 = np.insert(a, np.full(3, row, np.intp), u, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_Mx1_1_col(self):
+ a, q, r, u = self.generate('Mx1', which='col')
+ for col in range(r.shape[1] + 1):
+ q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+ a1 = np.insert(a, col, u, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_Mx1_p_col(self):
+ a, q, r, u = self.generate('Mx1', which='col', p=3)
+ for col in range(r.shape[1] + 1):
+ q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+ a1 = np.insert(a, np.full(3, col, np.intp), u, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_Mx1_economic_1_row(self):
+ a, q, r, u = self.generate('Mx1', 'economic', 'row')
+ for row in range(r.shape[0] + 1):
+ q1, r1 = qr_insert(q, r, u, row)
+ a1 = np.insert(a, row, u, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+ def test_Mx1_economic_p_row(self):
+ a, q, r, u = self.generate('Mx1', 'economic', 'row', 3)
+ for row in range(r.shape[0] + 1):
+ q1, r1 = qr_insert(q, r, u, row)
+ a1 = np.insert(a, np.full(3, row, np.intp), u, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+ def test_Mx1_economic_1_col(self):
+ a, q, r, u = self.generate('Mx1', 'economic', 'col')
+ for col in range(r.shape[1] + 1):
+ q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+ a1 = np.insert(a, col, u, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+ def test_Mx1_economic_p_col(self):
+ a, q, r, u = self.generate('Mx1', 'economic', 'col', 3)
+ for col in range(r.shape[1] + 1):
+ q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+ a1 = np.insert(a, np.full(3, col, np.intp), u, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+ def test_1xN_1_row(self):
+ a, q, r, u = self.generate('1xN', which='row')
+ for row in range(r.shape[0] + 1):
+ q1, r1 = qr_insert(q, r, u, row)
+ a1 = np.insert(a, row, u, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_1xN_p_row(self):
+ a, q, r, u = self.generate('1xN', which='row', p=3)
+ for row in range(r.shape[0] + 1):
+ q1, r1 = qr_insert(q, r, u, row)
+ a1 = np.insert(a, np.full(3, row, np.intp), u, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_1xN_1_col(self):
+ a, q, r, u = self.generate('1xN', which='col')
+ for col in range(r.shape[1] + 1):
+ q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+ a1 = np.insert(a, col, u, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_1xN_p_col(self):
+ a, q, r, u = self.generate('1xN', which='col', p=3)
+ for col in range(r.shape[1] + 1):
+ q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+ a1 = np.insert(a, np.full(3, col, np.intp), u, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_1x1_1_row(self):
+ a, q, r, u = self.generate('1x1', which='row')
+ for row in range(r.shape[0] + 1):
+ q1, r1 = qr_insert(q, r, u, row)
+ a1 = np.insert(a, row, u, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_1x1_p_row(self):
+ a, q, r, u = self.generate('1x1', which='row', p=3)
+ for row in range(r.shape[0] + 1):
+ q1, r1 = qr_insert(q, r, u, row)
+ a1 = np.insert(a, np.full(3, row, np.intp), u, 0)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_1x1_1_col(self):
+ a, q, r, u = self.generate('1x1', which='col')
+ for col in range(r.shape[1] + 1):
+ q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+ a1 = np.insert(a, col, u, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_1x1_p_col(self):
+ a, q, r, u = self.generate('1x1', which='col', p=3)
+ for col in range(r.shape[1] + 1):
+ q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False)
+ a1 = np.insert(a, np.full(3, col, np.intp), u, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_1x1_1_scalar(self):
+ a, q, r, u = self.generate('1x1', which='row')
+ assert_raises(ValueError, qr_insert, q[0, 0], r, u, 0, 'row')
+ assert_raises(ValueError, qr_insert, q, r[0, 0], u, 0, 'row')
+ assert_raises(ValueError, qr_insert, q, r, u[0], 0, 'row')
+
+ assert_raises(ValueError, qr_insert, q[0, 0], r, u, 0, 'col')
+ assert_raises(ValueError, qr_insert, q, r[0, 0], u, 0, 'col')
+ assert_raises(ValueError, qr_insert, q, r, u[0], 0, 'col')
+
+ def base_non_simple_strides(self, adjust_strides, k, p, which):
+ for type in ['sqr', 'tall', 'fat']:
+ a, q0, r0, u0 = self.generate(type, which=which, p=p)
+ qs, rs, us = adjust_strides((q0, r0, u0))
+ if p == 1:
+ ai = np.insert(a, k, u0, 0 if which == 'row' else 1)
+ else:
+ ai = np.insert(a, np.full(p, k, np.intp),
+ u0 if which == 'row' else u0,
+ 0 if which == 'row' else 1)
+
+ # for each variable, q, r, u we try with it strided and
+ # overwrite=False. Then we try with overwrite=True. Nothing
+ # is checked to see if it can be overwritten, since only
+ # F ordered Q can be overwritten when adding columns.
+
+ q = q0.copy('F')
+ r = r0.copy('F')
+ u = u0.copy('F')
+ q1, r1 = qr_insert(qs, r, u, k, which, overwrite_qru=False)
+ check_qr(q1, r1, ai, self.rtol, self.atol)
+ q1o, r1o = qr_insert(qs, r, u, k, which, overwrite_qru=True)
+ check_qr(q1o, r1o, ai, self.rtol, self.atol)
+
+ q = q0.copy('F')
+ r = r0.copy('F')
+ u = u0.copy('F')
+ q2, r2 = qr_insert(q, rs, u, k, which, overwrite_qru=False)
+ check_qr(q2, r2, ai, self.rtol, self.atol)
+ q2o, r2o = qr_insert(q, rs, u, k, which, overwrite_qru=True)
+ check_qr(q2o, r2o, ai, self.rtol, self.atol)
+
+ q = q0.copy('F')
+ r = r0.copy('F')
+ u = u0.copy('F')
+ q3, r3 = qr_insert(q, r, us, k, which, overwrite_qru=False)
+ check_qr(q3, r3, ai, self.rtol, self.atol)
+ q3o, r3o = qr_insert(q, r, us, k, which, overwrite_qru=True)
+ check_qr(q3o, r3o, ai, self.rtol, self.atol)
+
+ q = q0.copy('F')
+ r = r0.copy('F')
+ u = u0.copy('F')
+ # since some of these were consumed above
+ qs, rs, us = adjust_strides((q, r, u))
+ q5, r5 = qr_insert(qs, rs, us, k, which, overwrite_qru=False)
+ check_qr(q5, r5, ai, self.rtol, self.atol)
+ q5o, r5o = qr_insert(qs, rs, us, k, which, overwrite_qru=True)
+ check_qr(q5o, r5o, ai, self.rtol, self.atol)
+
+ def test_non_unit_strides_1_row(self):
+ self.base_non_simple_strides(make_strided, 0, 1, 'row')
+
+ def test_non_unit_strides_p_row(self):
+ self.base_non_simple_strides(make_strided, 0, 3, 'row')
+
+ def test_non_unit_strides_1_col(self):
+ self.base_non_simple_strides(make_strided, 0, 1, 'col')
+
+ def test_non_unit_strides_p_col(self):
+ self.base_non_simple_strides(make_strided, 0, 3, 'col')
+
+ def test_neg_strides_1_row(self):
+ self.base_non_simple_strides(negate_strides, 0, 1, 'row')
+
+ def test_neg_strides_p_row(self):
+ self.base_non_simple_strides(negate_strides, 0, 3, 'row')
+
+ def test_neg_strides_1_col(self):
+ self.base_non_simple_strides(negate_strides, 0, 1, 'col')
+
+ def test_neg_strides_p_col(self):
+ self.base_non_simple_strides(negate_strides, 0, 3, 'col')
+
+ def test_non_itemsize_strides_1_row(self):
+ self.base_non_simple_strides(nonitemsize_strides, 0, 1, 'row')
+
+ def test_non_itemsize_strides_p_row(self):
+ self.base_non_simple_strides(nonitemsize_strides, 0, 3, 'row')
+
+ def test_non_itemsize_strides_1_col(self):
+ self.base_non_simple_strides(nonitemsize_strides, 0, 1, 'col')
+
+ def test_non_itemsize_strides_p_col(self):
+ self.base_non_simple_strides(nonitemsize_strides, 0, 3, 'col')
+
+ def test_non_native_byte_order_1_row(self):
+ self.base_non_simple_strides(make_nonnative, 0, 1, 'row')
+
+ def test_non_native_byte_order_p_row(self):
+ self.base_non_simple_strides(make_nonnative, 0, 3, 'row')
+
+ def test_non_native_byte_order_1_col(self):
+ self.base_non_simple_strides(make_nonnative, 0, 1, 'col')
+
+ def test_non_native_byte_order_p_col(self):
+ self.base_non_simple_strides(make_nonnative, 0, 3, 'col')
+
+ def test_overwrite_qu_rank_1(self):
+ # when inserting rows, the size of both Q and R change, so only
+ # column inserts can overwrite q. Only complex column inserts
+ # with C ordered Q overwrite u. Any contiguous Q is overwritten
+ # when inserting 1 column
+ a, q0, r, u, = self.generate('sqr', which='col', p=1)
+ q = q0.copy('C')
+ u0 = u.copy()
+ # don't overwrite
+ q1, r1 = qr_insert(q, r, u, 0, 'col', overwrite_qru=False)
+ a1 = np.insert(a, 0, u0, 1)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+ check_qr(q, r, a, self.rtol, self.atol)
+
+ # try overwriting
+ q2, r2 = qr_insert(q, r, u, 0, 'col', overwrite_qru=True)
+ check_qr(q2, r2, a1, self.rtol, self.atol)
+ # verify the overwriting
+ assert_allclose(q2, q, rtol=self.rtol, atol=self.atol)
+ assert_allclose(u, u0.conj(), self.rtol, self.atol)
+
+ # now try with a fortran ordered Q
+ qF = q0.copy('F')
+ u1 = u0.copy()
+ q3, r3 = qr_insert(qF, r, u1, 0, 'col', overwrite_qru=False)
+ check_qr(q3, r3, a1, self.rtol, self.atol)
+ check_qr(qF, r, a, self.rtol, self.atol)
+
+ # try overwriting
+ q4, r4 = qr_insert(qF, r, u1, 0, 'col', overwrite_qru=True)
+ check_qr(q4, r4, a1, self.rtol, self.atol)
+ assert_allclose(q4, qF, rtol=self.rtol, atol=self.atol)
+
+ def test_overwrite_qu_rank_p(self):
+ # when inserting rows, the size of both Q and R change, so only
+ # column inserts can potentially overwrite Q. In practice, only
+ # F ordered Q are overwritten with a rank p update.
+ a, q0, r, u, = self.generate('sqr', which='col', p=3)
+ q = q0.copy('F')
+ a1 = np.insert(a, np.zeros(3, np.intp), u, 1)
+
+ # don't overwrite
+ q1, r1 = qr_insert(q, r, u, 0, 'col', overwrite_qru=False)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+ check_qr(q, r, a, self.rtol, self.atol)
+
+ # try overwriting
+ q2, r2 = qr_insert(q, r, u, 0, 'col', overwrite_qru=True)
+ check_qr(q2, r2, a1, self.rtol, self.atol)
+ assert_allclose(q2, q, rtol=self.rtol, atol=self.atol)
+
+ def test_empty_inputs(self):
+ a, q, r, u = self.generate('sqr', which='row')
+ assert_raises(ValueError, qr_insert, np.array([]), r, u, 0, 'row')
+ assert_raises(ValueError, qr_insert, q, np.array([]), u, 0, 'row')
+ assert_raises(ValueError, qr_insert, q, r, np.array([]), 0, 'row')
+ assert_raises(ValueError, qr_insert, np.array([]), r, u, 0, 'col')
+ assert_raises(ValueError, qr_insert, q, np.array([]), u, 0, 'col')
+ assert_raises(ValueError, qr_insert, q, r, np.array([]), 0, 'col')
+
+ def test_mismatched_shapes(self):
+ a, q, r, u = self.generate('tall', which='row')
+ assert_raises(ValueError, qr_insert, q, r[1:], u, 0, 'row')
+ assert_raises(ValueError, qr_insert, q[:-2], r, u, 0, 'row')
+ assert_raises(ValueError, qr_insert, q, r, u[1:], 0, 'row')
+ assert_raises(ValueError, qr_insert, q, r[1:], u, 0, 'col')
+ assert_raises(ValueError, qr_insert, q[:-2], r, u, 0, 'col')
+ assert_raises(ValueError, qr_insert, q, r, u[1:], 0, 'col')
+
+ def test_unsupported_dtypes(self):
+ dts = ['int8', 'int16', 'int32', 'int64',
+ 'uint8', 'uint16', 'uint32', 'uint64',
+ 'float16', 'longdouble', 'clongdouble',
+ 'bool']
+ a, q0, r0, u0 = self.generate('sqr', which='row')
+ for dtype in dts:
+ q = q0.real.astype(dtype)
+ with np.errstate(invalid="ignore"):
+ r = r0.real.astype(dtype)
+ u = u0.real.astype(dtype)
+ assert_raises(ValueError, qr_insert, q, r0, u0, 0, 'row')
+ assert_raises(ValueError, qr_insert, q, r0, u0, 0, 'col')
+ assert_raises(ValueError, qr_insert, q0, r, u0, 0, 'row')
+ assert_raises(ValueError, qr_insert, q0, r, u0, 0, 'col')
+ assert_raises(ValueError, qr_insert, q0, r0, u, 0, 'row')
+ assert_raises(ValueError, qr_insert, q0, r0, u, 0, 'col')
+
+ def test_check_finite(self):
+ a0, q0, r0, u0 = self.generate('sqr', which='row', p=3)
+
+ q = q0.copy('F')
+ q[1,1] = np.nan
+ assert_raises(ValueError, qr_insert, q, r0, u0[:,0], 0, 'row')
+ assert_raises(ValueError, qr_insert, q, r0, u0, 0, 'row')
+ assert_raises(ValueError, qr_insert, q, r0, u0[:,0], 0, 'col')
+ assert_raises(ValueError, qr_insert, q, r0, u0, 0, 'col')
+
+ r = r0.copy('F')
+ r[1,1] = np.nan
+ assert_raises(ValueError, qr_insert, q0, r, u0[:,0], 0, 'row')
+ assert_raises(ValueError, qr_insert, q0, r, u0, 0, 'row')
+ assert_raises(ValueError, qr_insert, q0, r, u0[:,0], 0, 'col')
+ assert_raises(ValueError, qr_insert, q0, r, u0, 0, 'col')
+
+ u = u0.copy('F')
+ u[0,0] = np.nan
+ assert_raises(ValueError, qr_insert, q0, r0, u[:,0], 0, 'row')
+ assert_raises(ValueError, qr_insert, q0, r0, u, 0, 'row')
+ assert_raises(ValueError, qr_insert, q0, r0, u[:,0], 0, 'col')
+ assert_raises(ValueError, qr_insert, q0, r0, u, 0, 'col')
+
+class TestQRinsert_f(BaseQRinsert):
+ dtype = np.dtype('f')
+
+class TestQRinsert_F(BaseQRinsert):
+ dtype = np.dtype('F')
+
+class TestQRinsert_d(BaseQRinsert):
+ dtype = np.dtype('d')
+
+class TestQRinsert_D(BaseQRinsert):
+ dtype = np.dtype('D')
+
+class BaseQRupdate(BaseQRdeltas):
+ def generate(self, type, mode='full', p=1):
+ a, q, r = super().generate(type, mode)
+
+ # super call set the seed...
+ if p == 1:
+ u = np.random.random(q.shape[0])
+ v = np.random.random(r.shape[1])
+ else:
+ u = np.random.random((q.shape[0], p))
+ v = np.random.random((r.shape[1], p))
+
+ if np.iscomplexobj(self.dtype.type(1)):
+ b = np.random.random(u.shape)
+ u = u + 1j * b
+
+ c = np.random.random(v.shape)
+ v = v + 1j * c
+
+ u = u.astype(self.dtype)
+ v = v.astype(self.dtype)
+ return a, q, r, u, v
+
+ def test_sqr_rank_1(self):
+ a, q, r, u, v = self.generate('sqr')
+ q1, r1 = qr_update(q, r, u, v, False)
+ a1 = a + np.outer(u, v.conj())
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_sqr_rank_p(self):
+ # test ndim = 2, rank 1 updates here too
+ for p in [1, 2, 3, 5]:
+ a, q, r, u, v = self.generate('sqr', p=p)
+ if p == 1:
+ u = u.reshape(u.size, 1)
+ v = v.reshape(v.size, 1)
+ q1, r1 = qr_update(q, r, u, v, False)
+ a1 = a + np.dot(u, v.T.conj())
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_tall_rank_1(self):
+ a, q, r, u, v = self.generate('tall')
+ q1, r1 = qr_update(q, r, u, v, False)
+ a1 = a + np.outer(u, v.conj())
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_tall_rank_p(self):
+ for p in [1, 2, 3, 5]:
+ a, q, r, u, v = self.generate('tall', p=p)
+ if p == 1:
+ u = u.reshape(u.size, 1)
+ v = v.reshape(v.size, 1)
+ q1, r1 = qr_update(q, r, u, v, False)
+ a1 = a + np.dot(u, v.T.conj())
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_fat_rank_1(self):
+ a, q, r, u, v = self.generate('fat')
+ q1, r1 = qr_update(q, r, u, v, False)
+ a1 = a + np.outer(u, v.conj())
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_fat_rank_p(self):
+ for p in [1, 2, 3, 5]:
+ a, q, r, u, v = self.generate('fat', p=p)
+ if p == 1:
+ u = u.reshape(u.size, 1)
+ v = v.reshape(v.size, 1)
+ q1, r1 = qr_update(q, r, u, v, False)
+ a1 = a + np.dot(u, v.T.conj())
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_economic_rank_1(self):
+ a, q, r, u, v = self.generate('tall', 'economic')
+ q1, r1 = qr_update(q, r, u, v, False)
+ a1 = a + np.outer(u, v.conj())
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+ def test_economic_rank_p(self):
+ for p in [1, 2, 3, 5]:
+ a, q, r, u, v = self.generate('tall', 'economic', p)
+ if p == 1:
+ u = u.reshape(u.size, 1)
+ v = v.reshape(v.size, 1)
+ q1, r1 = qr_update(q, r, u, v, False)
+ a1 = a + np.dot(u, v.T.conj())
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+ def test_Mx1_rank_1(self):
+ a, q, r, u, v = self.generate('Mx1')
+ q1, r1 = qr_update(q, r, u, v, False)
+ a1 = a + np.outer(u, v.conj())
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_Mx1_rank_p(self):
+ # when M or N == 1, only a rank 1 update is allowed. This isn't
+ # fundamental limitation, but the code does not support it.
+ a, q, r, u, v = self.generate('Mx1', p=1)
+ u = u.reshape(u.size, 1)
+ v = v.reshape(v.size, 1)
+ q1, r1 = qr_update(q, r, u, v, False)
+ a1 = a + np.dot(u, v.T.conj())
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_Mx1_economic_rank_1(self):
+ a, q, r, u, v = self.generate('Mx1', 'economic')
+ q1, r1 = qr_update(q, r, u, v, False)
+ a1 = a + np.outer(u, v.conj())
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+ def test_Mx1_economic_rank_p(self):
+ # when M or N == 1, only a rank 1 update is allowed. This isn't
+ # fundamental limitation, but the code does not support it.
+ a, q, r, u, v = self.generate('Mx1', 'economic', p=1)
+ u = u.reshape(u.size, 1)
+ v = v.reshape(v.size, 1)
+ q1, r1 = qr_update(q, r, u, v, False)
+ a1 = a + np.dot(u, v.T.conj())
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+ def test_1xN_rank_1(self):
+ a, q, r, u, v = self.generate('1xN')
+ q1, r1 = qr_update(q, r, u, v, False)
+ a1 = a + np.outer(u, v.conj())
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_1xN_rank_p(self):
+ # when M or N == 1, only a rank 1 update is allowed. This isn't
+ # fundamental limitation, but the code does not support it.
+ a, q, r, u, v = self.generate('1xN', p=1)
+ u = u.reshape(u.size, 1)
+ v = v.reshape(v.size, 1)
+ q1, r1 = qr_update(q, r, u, v, False)
+ a1 = a + np.dot(u, v.T.conj())
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_1x1_rank_1(self):
+ a, q, r, u, v = self.generate('1x1')
+ q1, r1 = qr_update(q, r, u, v, False)
+ a1 = a + np.outer(u, v.conj())
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_1x1_rank_p(self):
+ # when M or N == 1, only a rank 1 update is allowed. This isn't
+ # fundamental limitation, but the code does not support it.
+ a, q, r, u, v = self.generate('1x1', p=1)
+ u = u.reshape(u.size, 1)
+ v = v.reshape(v.size, 1)
+ q1, r1 = qr_update(q, r, u, v, False)
+ a1 = a + np.dot(u, v.T.conj())
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+
+ def test_1x1_rank_1_scalar(self):
+ a, q, r, u, v = self.generate('1x1')
+ assert_raises(ValueError, qr_update, q[0, 0], r, u, v)
+ assert_raises(ValueError, qr_update, q, r[0, 0], u, v)
+ assert_raises(ValueError, qr_update, q, r, u[0], v)
+ assert_raises(ValueError, qr_update, q, r, u, v[0])
+
+ def base_non_simple_strides(self, adjust_strides, mode, p, overwriteable):
+ assert_sqr = False if mode == 'economic' else True
+ for type in ['sqr', 'tall', 'fat']:
+ a, q0, r0, u0, v0 = self.generate(type, mode, p)
+ qs, rs, us, vs = adjust_strides((q0, r0, u0, v0))
+ if p == 1:
+ aup = a + np.outer(u0, v0.conj())
+ else:
+ aup = a + np.dot(u0, v0.T.conj())
+
+ # for each variable, q, r, u, v we try with it strided and
+ # overwrite=False. Then we try with overwrite=True, and make
+ # sure that if p == 1, r and v are still overwritten.
+ # a strided q and u must always be copied.
+
+ q = q0.copy('F')
+ r = r0.copy('F')
+ u = u0.copy('F')
+ v = v0.copy('C')
+ q1, r1 = qr_update(qs, r, u, v, False)
+ check_qr(q1, r1, aup, self.rtol, self.atol, assert_sqr)
+ q1o, r1o = qr_update(qs, r, u, v, True)
+ check_qr(q1o, r1o, aup, self.rtol, self.atol, assert_sqr)
+ if overwriteable:
+ assert_allclose(r1o, r, rtol=self.rtol, atol=self.atol)
+ assert_allclose(v, v0.conj(), rtol=self.rtol, atol=self.atol)
+
+ q = q0.copy('F')
+ r = r0.copy('F')
+ u = u0.copy('F')
+ v = v0.copy('C')
+ q2, r2 = qr_update(q, rs, u, v, False)
+ check_qr(q2, r2, aup, self.rtol, self.atol, assert_sqr)
+ q2o, r2o = qr_update(q, rs, u, v, True)
+ check_qr(q2o, r2o, aup, self.rtol, self.atol, assert_sqr)
+ if overwriteable:
+ assert_allclose(r2o, rs, rtol=self.rtol, atol=self.atol)
+ assert_allclose(v, v0.conj(), rtol=self.rtol, atol=self.atol)
+
+ q = q0.copy('F')
+ r = r0.copy('F')
+ u = u0.copy('F')
+ v = v0.copy('C')
+ q3, r3 = qr_update(q, r, us, v, False)
+ check_qr(q3, r3, aup, self.rtol, self.atol, assert_sqr)
+ q3o, r3o = qr_update(q, r, us, v, True)
+ check_qr(q3o, r3o, aup, self.rtol, self.atol, assert_sqr)
+ if overwriteable:
+ assert_allclose(r3o, r, rtol=self.rtol, atol=self.atol)
+ assert_allclose(v, v0.conj(), rtol=self.rtol, atol=self.atol)
+
+ q = q0.copy('F')
+ r = r0.copy('F')
+ u = u0.copy('F')
+ v = v0.copy('C')
+ q4, r4 = qr_update(q, r, u, vs, False)
+ check_qr(q4, r4, aup, self.rtol, self.atol, assert_sqr)
+ q4o, r4o = qr_update(q, r, u, vs, True)
+ check_qr(q4o, r4o, aup, self.rtol, self.atol, assert_sqr)
+ if overwriteable:
+ assert_allclose(r4o, r, rtol=self.rtol, atol=self.atol)
+ assert_allclose(vs, v0.conj(), rtol=self.rtol, atol=self.atol)
+
+ q = q0.copy('F')
+ r = r0.copy('F')
+ u = u0.copy('F')
+ v = v0.copy('C')
+ # since some of these were consumed above
+ qs, rs, us, vs = adjust_strides((q, r, u, v))
+ q5, r5 = qr_update(qs, rs, us, vs, False)
+ check_qr(q5, r5, aup, self.rtol, self.atol, assert_sqr)
+ q5o, r5o = qr_update(qs, rs, us, vs, True)
+ check_qr(q5o, r5o, aup, self.rtol, self.atol, assert_sqr)
+ if overwriteable:
+ assert_allclose(r5o, rs, rtol=self.rtol, atol=self.atol)
+ assert_allclose(vs, v0.conj(), rtol=self.rtol, atol=self.atol)
+
+ def test_non_unit_strides_rank_1(self):
+ self.base_non_simple_strides(make_strided, 'full', 1, True)
+
+ def test_non_unit_strides_economic_rank_1(self):
+ self.base_non_simple_strides(make_strided, 'economic', 1, True)
+
+ def test_non_unit_strides_rank_p(self):
+ self.base_non_simple_strides(make_strided, 'full', 3, False)
+
+ def test_non_unit_strides_economic_rank_p(self):
+ self.base_non_simple_strides(make_strided, 'economic', 3, False)
+
+ def test_neg_strides_rank_1(self):
+ self.base_non_simple_strides(negate_strides, 'full', 1, False)
+
+ def test_neg_strides_economic_rank_1(self):
+ self.base_non_simple_strides(negate_strides, 'economic', 1, False)
+
+ def test_neg_strides_rank_p(self):
+ self.base_non_simple_strides(negate_strides, 'full', 3, False)
+
+ def test_neg_strides_economic_rank_p(self):
+ self.base_non_simple_strides(negate_strides, 'economic', 3, False)
+
+ def test_non_itemsize_strides_rank_1(self):
+ self.base_non_simple_strides(nonitemsize_strides, 'full', 1, False)
+
+ def test_non_itemsize_strides_economic_rank_1(self):
+ self.base_non_simple_strides(nonitemsize_strides, 'economic', 1, False)
+
+ def test_non_itemsize_strides_rank_p(self):
+ self.base_non_simple_strides(nonitemsize_strides, 'full', 3, False)
+
+ def test_non_itemsize_strides_economic_rank_p(self):
+ self.base_non_simple_strides(nonitemsize_strides, 'economic', 3, False)
+
+ def test_non_native_byte_order_rank_1(self):
+ self.base_non_simple_strides(make_nonnative, 'full', 1, False)
+
+ def test_non_native_byte_order_economic_rank_1(self):
+ self.base_non_simple_strides(make_nonnative, 'economic', 1, False)
+
+ def test_non_native_byte_order_rank_p(self):
+ self.base_non_simple_strides(make_nonnative, 'full', 3, False)
+
+ def test_non_native_byte_order_economic_rank_p(self):
+ self.base_non_simple_strides(make_nonnative, 'economic', 3, False)
+
+ def test_overwrite_qruv_rank_1(self):
+ # Any positive strided q, r, u, and v can be overwritten for a rank 1
+ # update, only checking C and F contiguous.
+ a, q0, r0, u0, v0 = self.generate('sqr')
+ a1 = a + np.outer(u0, v0.conj())
+ q = q0.copy('F')
+ r = r0.copy('F')
+ u = u0.copy('F')
+ v = v0.copy('F')
+
+ # don't overwrite
+ q1, r1 = qr_update(q, r, u, v, False)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+ check_qr(q, r, a, self.rtol, self.atol)
+
+ q2, r2 = qr_update(q, r, u, v, True)
+ check_qr(q2, r2, a1, self.rtol, self.atol)
+ # verify the overwriting, no good way to check u and v.
+ assert_allclose(q2, q, rtol=self.rtol, atol=self.atol)
+ assert_allclose(r2, r, rtol=self.rtol, atol=self.atol)
+
+ q = q0.copy('C')
+ r = r0.copy('C')
+ u = u0.copy('C')
+ v = v0.copy('C')
+ q3, r3 = qr_update(q, r, u, v, True)
+ check_qr(q3, r3, a1, self.rtol, self.atol)
+ assert_allclose(q3, q, rtol=self.rtol, atol=self.atol)
+ assert_allclose(r3, r, rtol=self.rtol, atol=self.atol)
+
+ def test_overwrite_qruv_rank_1_economic(self):
+ # updating economic decompositions can overwrite any contiguous r,
+ # and positively strided r and u. V is only ever read.
+ # only checking C and F contiguous.
+ a, q0, r0, u0, v0 = self.generate('tall', 'economic')
+ a1 = a + np.outer(u0, v0.conj())
+ q = q0.copy('F')
+ r = r0.copy('F')
+ u = u0.copy('F')
+ v = v0.copy('F')
+
+ # don't overwrite
+ q1, r1 = qr_update(q, r, u, v, False)
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+ check_qr(q, r, a, self.rtol, self.atol, False)
+
+ q2, r2 = qr_update(q, r, u, v, True)
+ check_qr(q2, r2, a1, self.rtol, self.atol, False)
+ # verify the overwriting, no good way to check u and v.
+ assert_allclose(q2, q, rtol=self.rtol, atol=self.atol)
+ assert_allclose(r2, r, rtol=self.rtol, atol=self.atol)
+
+ q = q0.copy('C')
+ r = r0.copy('C')
+ u = u0.copy('C')
+ v = v0.copy('C')
+ q3, r3 = qr_update(q, r, u, v, True)
+ check_qr(q3, r3, a1, self.rtol, self.atol, False)
+ assert_allclose(q3, q, rtol=self.rtol, atol=self.atol)
+ assert_allclose(r3, r, rtol=self.rtol, atol=self.atol)
+
+ def test_overwrite_qruv_rank_p(self):
+ # for rank p updates, q r must be F contiguous, v must be C (v.T --> F)
+ # and u can be C or F, but is only overwritten if Q is C and complex
+ a, q0, r0, u0, v0 = self.generate('sqr', p=3)
+ a1 = a + np.dot(u0, v0.T.conj())
+ q = q0.copy('F')
+ r = r0.copy('F')
+ u = u0.copy('F')
+ v = v0.copy('C')
+
+ # don't overwrite
+ q1, r1 = qr_update(q, r, u, v, False)
+ check_qr(q1, r1, a1, self.rtol, self.atol)
+ check_qr(q, r, a, self.rtol, self.atol)
+
+ q2, r2 = qr_update(q, r, u, v, True)
+ check_qr(q2, r2, a1, self.rtol, self.atol)
+ # verify the overwriting, no good way to check u and v.
+ assert_allclose(q2, q, rtol=self.rtol, atol=self.atol)
+ assert_allclose(r2, r, rtol=self.rtol, atol=self.atol)
+
+ def test_empty_inputs(self):
+ a, q, r, u, v = self.generate('tall')
+ assert_raises(ValueError, qr_update, np.array([]), r, u, v)
+ assert_raises(ValueError, qr_update, q, np.array([]), u, v)
+ assert_raises(ValueError, qr_update, q, r, np.array([]), v)
+ assert_raises(ValueError, qr_update, q, r, u, np.array([]))
+
+ def test_mismatched_shapes(self):
+ a, q, r, u, v = self.generate('tall')
+ assert_raises(ValueError, qr_update, q, r[1:], u, v)
+ assert_raises(ValueError, qr_update, q[:-2], r, u, v)
+ assert_raises(ValueError, qr_update, q, r, u[1:], v)
+ assert_raises(ValueError, qr_update, q, r, u, v[1:])
+
+ def test_unsupported_dtypes(self):
+ dts = ['int8', 'int16', 'int32', 'int64',
+ 'uint8', 'uint16', 'uint32', 'uint64',
+ 'float16', 'longdouble', 'clongdouble',
+ 'bool']
+ a, q0, r0, u0, v0 = self.generate('tall')
+ for dtype in dts:
+ q = q0.real.astype(dtype)
+ with np.errstate(invalid="ignore"):
+ r = r0.real.astype(dtype)
+ u = u0.real.astype(dtype)
+ v = v0.real.astype(dtype)
+ assert_raises(ValueError, qr_update, q, r0, u0, v0)
+ assert_raises(ValueError, qr_update, q0, r, u0, v0)
+ assert_raises(ValueError, qr_update, q0, r0, u, v0)
+ assert_raises(ValueError, qr_update, q0, r0, u0, v)
+
+ def test_integer_input(self):
+ q = np.arange(16).reshape(4, 4)
+ r = q.copy() # doesn't matter
+ u = q[:, 0].copy()
+ v = r[0, :].copy()
+ assert_raises(ValueError, qr_update, q, r, u, v)
+
+ def test_check_finite(self):
+ a0, q0, r0, u0, v0 = self.generate('tall', p=3)
+
+ q = q0.copy('F')
+ q[1,1] = np.nan
+ assert_raises(ValueError, qr_update, q, r0, u0[:,0], v0[:,0])
+ assert_raises(ValueError, qr_update, q, r0, u0, v0)
+
+ r = r0.copy('F')
+ r[1,1] = np.nan
+ assert_raises(ValueError, qr_update, q0, r, u0[:,0], v0[:,0])
+ assert_raises(ValueError, qr_update, q0, r, u0, v0)
+
+ u = u0.copy('F')
+ u[0,0] = np.nan
+ assert_raises(ValueError, qr_update, q0, r0, u[:,0], v0[:,0])
+ assert_raises(ValueError, qr_update, q0, r0, u, v0)
+
+ v = v0.copy('F')
+ v[0,0] = np.nan
+ assert_raises(ValueError, qr_update, q0, r0, u[:,0], v[:,0])
+ assert_raises(ValueError, qr_update, q0, r0, u, v)
+
+ def test_economic_check_finite(self):
+ a0, q0, r0, u0, v0 = self.generate('tall', mode='economic', p=3)
+
+ q = q0.copy('F')
+ q[1,1] = np.nan
+ assert_raises(ValueError, qr_update, q, r0, u0[:,0], v0[:,0])
+ assert_raises(ValueError, qr_update, q, r0, u0, v0)
+
+ r = r0.copy('F')
+ r[1,1] = np.nan
+ assert_raises(ValueError, qr_update, q0, r, u0[:,0], v0[:,0])
+ assert_raises(ValueError, qr_update, q0, r, u0, v0)
+
+ u = u0.copy('F')
+ u[0,0] = np.nan
+ assert_raises(ValueError, qr_update, q0, r0, u[:,0], v0[:,0])
+ assert_raises(ValueError, qr_update, q0, r0, u, v0)
+
+ v = v0.copy('F')
+ v[0,0] = np.nan
+ assert_raises(ValueError, qr_update, q0, r0, u[:,0], v[:,0])
+ assert_raises(ValueError, qr_update, q0, r0, u, v)
+
+ def test_u_exactly_in_span_q(self):
+ q = np.array([[0, 0], [0, 0], [1, 0], [0, 1]], self.dtype)
+ r = np.array([[1, 0], [0, 1]], self.dtype)
+ u = np.array([0, 0, 0, -1], self.dtype)
+ v = np.array([1, 2], self.dtype)
+ q1, r1 = qr_update(q, r, u, v)
+ a1 = np.dot(q, r) + np.outer(u, v.conj())
+ check_qr(q1, r1, a1, self.rtol, self.atol, False)
+
+class TestQRupdate_f(BaseQRupdate):
+ dtype = np.dtype('f')
+
+class TestQRupdate_F(BaseQRupdate):
+ dtype = np.dtype('F')
+
+class TestQRupdate_d(BaseQRupdate):
+ dtype = np.dtype('d')
+
+class TestQRupdate_D(BaseQRupdate):
+ dtype = np.dtype('D')
+
+def test_form_qTu():
+ # We want to ensure that all of the code paths through this function are
+ # tested. Most of them should be hit with the rest of test suite, but
+ # explicit tests make clear precisely what is being tested.
+ #
+ # This function expects that Q is either C or F contiguous and square.
+ # Economic mode decompositions (Q is (M, N), M != N) do not go through this
+ # function. U may have any positive strides.
+ #
+ # Some of these test are duplicates, since contiguous 1d arrays are both C
+ # and F.
+
+ q_order = ['F', 'C']
+ q_shape = [(8, 8), ]
+ u_order = ['F', 'C', 'A'] # here A means is not F not C
+ u_shape = [1, 3]
+ dtype = ['f', 'd', 'F', 'D']
+
+ for qo, qs, uo, us, d in \
+ itertools.product(q_order, q_shape, u_order, u_shape, dtype):
+ if us == 1:
+ check_form_qTu(qo, qs, uo, us, 1, d)
+ check_form_qTu(qo, qs, uo, us, 2, d)
+ else:
+ check_form_qTu(qo, qs, uo, us, 2, d)
+
+def check_form_qTu(q_order, q_shape, u_order, u_shape, u_ndim, dtype):
+ np.random.seed(47)
+ if u_shape == 1 and u_ndim == 1:
+ u_shape = (q_shape[0],)
+ else:
+ u_shape = (q_shape[0], u_shape)
+ dtype = np.dtype(dtype)
+
+ if dtype.char in 'fd':
+ q = np.random.random(q_shape)
+ u = np.random.random(u_shape)
+ elif dtype.char in 'FD':
+ q = np.random.random(q_shape) + 1j*np.random.random(q_shape)
+ u = np.random.random(u_shape) + 1j*np.random.random(u_shape)
+ else:
+ ValueError("form_qTu doesn't support this dtype")
+
+ q = np.require(q, dtype, q_order)
+ if u_order != 'A':
+ u = np.require(u, dtype, u_order)
+ else:
+ u, = make_strided((u.astype(dtype),))
+
+ rtol = 10.0 ** -(np.finfo(dtype).precision-2)
+ atol = 2*np.finfo(dtype).eps
+
+ expected = np.dot(q.T.conj(), u)
+ res = _decomp_update._form_qTu(q, u)
+ assert_allclose(res, expected, rtol=rtol, atol=atol)
diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_fblas.py b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_fblas.py
new file mode 100644
index 0000000000000000000000000000000000000000..7c5ada830043af0eecb6d04bf39aef13d29d777c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_fblas.py
@@ -0,0 +1,607 @@
+# Test interfaces to fortran blas.
+#
+# The tests are more of interface than they are of the underlying blas.
+# Only very small matrices checked -- N=3 or so.
+#
+# !! Complex calculations really aren't checked that carefully.
+# !! Only real valued complex numbers are used in tests.
+
+from numpy import float32, float64, complex64, complex128, arange, array, \
+ zeros, shape, transpose, newaxis, common_type, conjugate
+
+from scipy.linalg import _fblas as fblas
+
+from numpy.testing import assert_array_equal, \
+ assert_allclose, assert_array_almost_equal, assert_
+
+import pytest
+
+# decimal accuracy to require between Python and LAPACK/BLAS calculations
+accuracy = 5
+
+# Since numpy.dot likely uses the same blas, use this routine
+# to check.
+
+
+def matrixmultiply(a, b):
+ if len(b.shape) == 1:
+ b_is_vector = True
+ b = b[:, newaxis]
+ else:
+ b_is_vector = False
+ assert_(a.shape[1] == b.shape[0])
+ c = zeros((a.shape[0], b.shape[1]), common_type(a, b))
+ for i in range(a.shape[0]):
+ for j in range(b.shape[1]):
+ s = 0
+ for k in range(a.shape[1]):
+ s += a[i, k] * b[k, j]
+ c[i, j] = s
+ if b_is_vector:
+ c = c.reshape((a.shape[0],))
+ return c
+
+##################################################
+# Test blas ?axpy
+
+
+class BaseAxpy:
+ ''' Mixin class for axpy tests '''
+
+ def test_default_a(self):
+ x = arange(3., dtype=self.dtype)
+ y = arange(3., dtype=x.dtype)
+ real_y = x*1.+y
+ y = self.blas_func(x, y)
+ assert_array_equal(real_y, y)
+
+ def test_simple(self):
+ x = arange(3., dtype=self.dtype)
+ y = arange(3., dtype=x.dtype)
+ real_y = x*3.+y
+ y = self.blas_func(x, y, a=3.)
+ assert_array_equal(real_y, y)
+
+ def test_x_stride(self):
+ x = arange(6., dtype=self.dtype)
+ y = zeros(3, x.dtype)
+ y = arange(3., dtype=x.dtype)
+ real_y = x[::2]*3.+y
+ y = self.blas_func(x, y, a=3., n=3, incx=2)
+ assert_array_equal(real_y, y)
+
+ def test_y_stride(self):
+ x = arange(3., dtype=self.dtype)
+ y = zeros(6, x.dtype)
+ real_y = x*3.+y[::2]
+ y = self.blas_func(x, y, a=3., n=3, incy=2)
+ assert_array_equal(real_y, y[::2])
+
+ def test_x_and_y_stride(self):
+ x = arange(12., dtype=self.dtype)
+ y = zeros(6, x.dtype)
+ real_y = x[::4]*3.+y[::2]
+ y = self.blas_func(x, y, a=3., n=3, incx=4, incy=2)
+ assert_array_equal(real_y, y[::2])
+
+ def test_x_bad_size(self):
+ x = arange(12., dtype=self.dtype)
+ y = zeros(6, x.dtype)
+ with pytest.raises(Exception, match='failed for 1st keyword'):
+ self.blas_func(x, y, n=4, incx=5)
+
+ def test_y_bad_size(self):
+ x = arange(12., dtype=self.dtype)
+ y = zeros(6, x.dtype)
+ with pytest.raises(Exception, match='failed for 1st keyword'):
+ self.blas_func(x, y, n=3, incy=5)
+
+
+try:
+ class TestSaxpy(BaseAxpy):
+ blas_func = fblas.saxpy
+ dtype = float32
+except AttributeError:
+ class TestSaxpy:
+ pass
+
+
+class TestDaxpy(BaseAxpy):
+ blas_func = fblas.daxpy
+ dtype = float64
+
+
+try:
+ class TestCaxpy(BaseAxpy):
+ blas_func = fblas.caxpy
+ dtype = complex64
+except AttributeError:
+ class TestCaxpy:
+ pass
+
+
+class TestZaxpy(BaseAxpy):
+ blas_func = fblas.zaxpy
+ dtype = complex128
+
+
+##################################################
+# Test blas ?scal
+
+class BaseScal:
+ ''' Mixin class for scal testing '''
+
+ def test_simple(self):
+ x = arange(3., dtype=self.dtype)
+ real_x = x*3.
+ x = self.blas_func(3., x)
+ assert_array_equal(real_x, x)
+
+ def test_x_stride(self):
+ x = arange(6., dtype=self.dtype)
+ real_x = x.copy()
+ real_x[::2] = x[::2]*array(3., self.dtype)
+ x = self.blas_func(3., x, n=3, incx=2)
+ assert_array_equal(real_x, x)
+
+ def test_x_bad_size(self):
+ x = arange(12., dtype=self.dtype)
+ with pytest.raises(Exception, match='failed for 1st keyword'):
+ self.blas_func(2., x, n=4, incx=5)
+
+
+try:
+ class TestSscal(BaseScal):
+ blas_func = fblas.sscal
+ dtype = float32
+except AttributeError:
+ class TestSscal:
+ pass
+
+
+class TestDscal(BaseScal):
+ blas_func = fblas.dscal
+ dtype = float64
+
+
+try:
+ class TestCscal(BaseScal):
+ blas_func = fblas.cscal
+ dtype = complex64
+except AttributeError:
+ class TestCscal:
+ pass
+
+
+class TestZscal(BaseScal):
+ blas_func = fblas.zscal
+ dtype = complex128
+
+
+##################################################
+# Test blas ?copy
+
+class BaseCopy:
+ ''' Mixin class for copy testing '''
+
+ def test_simple(self):
+ x = arange(3., dtype=self.dtype)
+ y = zeros(shape(x), x.dtype)
+ y = self.blas_func(x, y)
+ assert_array_equal(x, y)
+
+ def test_x_stride(self):
+ x = arange(6., dtype=self.dtype)
+ y = zeros(3, x.dtype)
+ y = self.blas_func(x, y, n=3, incx=2)
+ assert_array_equal(x[::2], y)
+
+ def test_y_stride(self):
+ x = arange(3., dtype=self.dtype)
+ y = zeros(6, x.dtype)
+ y = self.blas_func(x, y, n=3, incy=2)
+ assert_array_equal(x, y[::2])
+
+ def test_x_and_y_stride(self):
+ x = arange(12., dtype=self.dtype)
+ y = zeros(6, x.dtype)
+ y = self.blas_func(x, y, n=3, incx=4, incy=2)
+ assert_array_equal(x[::4], y[::2])
+
+ def test_x_bad_size(self):
+ x = arange(12., dtype=self.dtype)
+ y = zeros(6, x.dtype)
+ with pytest.raises(Exception, match='failed for 1st keyword'):
+ self.blas_func(x, y, n=4, incx=5)
+
+ def test_y_bad_size(self):
+ x = arange(12., dtype=self.dtype)
+ y = zeros(6, x.dtype)
+ with pytest.raises(Exception, match='failed for 1st keyword'):
+ self.blas_func(x, y, n=3, incy=5)
+
+ # def test_y_bad_type(self):
+ ## Hmmm. Should this work? What should be the output.
+ # x = arange(3.,dtype=self.dtype)
+ # y = zeros(shape(x))
+ # self.blas_func(x,y)
+ # assert_array_equal(x,y)
+
+
+try:
+ class TestScopy(BaseCopy):
+ blas_func = fblas.scopy
+ dtype = float32
+except AttributeError:
+ class TestScopy:
+ pass
+
+
+class TestDcopy(BaseCopy):
+ blas_func = fblas.dcopy
+ dtype = float64
+
+
+try:
+ class TestCcopy(BaseCopy):
+ blas_func = fblas.ccopy
+ dtype = complex64
+except AttributeError:
+ class TestCcopy:
+ pass
+
+
+class TestZcopy(BaseCopy):
+ blas_func = fblas.zcopy
+ dtype = complex128
+
+
+##################################################
+# Test blas ?swap
+
+class BaseSwap:
+ ''' Mixin class for swap tests '''
+
+ def test_simple(self):
+ x = arange(3., dtype=self.dtype)
+ y = zeros(shape(x), x.dtype)
+ desired_x = y.copy()
+ desired_y = x.copy()
+ x, y = self.blas_func(x, y)
+ assert_array_equal(desired_x, x)
+ assert_array_equal(desired_y, y)
+
+ def test_x_stride(self):
+ x = arange(6., dtype=self.dtype)
+ y = zeros(3, x.dtype)
+ desired_x = y.copy()
+ desired_y = x.copy()[::2]
+ x, y = self.blas_func(x, y, n=3, incx=2)
+ assert_array_equal(desired_x, x[::2])
+ assert_array_equal(desired_y, y)
+
+ def test_y_stride(self):
+ x = arange(3., dtype=self.dtype)
+ y = zeros(6, x.dtype)
+ desired_x = y.copy()[::2]
+ desired_y = x.copy()
+ x, y = self.blas_func(x, y, n=3, incy=2)
+ assert_array_equal(desired_x, x)
+ assert_array_equal(desired_y, y[::2])
+
+ def test_x_and_y_stride(self):
+ x = arange(12., dtype=self.dtype)
+ y = zeros(6, x.dtype)
+ desired_x = y.copy()[::2]
+ desired_y = x.copy()[::4]
+ x, y = self.blas_func(x, y, n=3, incx=4, incy=2)
+ assert_array_equal(desired_x, x[::4])
+ assert_array_equal(desired_y, y[::2])
+
+ def test_x_bad_size(self):
+ x = arange(12., dtype=self.dtype)
+ y = zeros(6, x.dtype)
+ with pytest.raises(Exception, match='failed for 1st keyword'):
+ self.blas_func(x, y, n=4, incx=5)
+
+ def test_y_bad_size(self):
+ x = arange(12., dtype=self.dtype)
+ y = zeros(6, x.dtype)
+ with pytest.raises(Exception, match='failed for 1st keyword'):
+ self.blas_func(x, y, n=3, incy=5)
+
+
+try:
+ class TestSswap(BaseSwap):
+ blas_func = fblas.sswap
+ dtype = float32
+except AttributeError:
+ class TestSswap:
+ pass
+
+
+class TestDswap(BaseSwap):
+ blas_func = fblas.dswap
+ dtype = float64
+
+
+try:
+ class TestCswap(BaseSwap):
+ blas_func = fblas.cswap
+ dtype = complex64
+except AttributeError:
+ class TestCswap:
+ pass
+
+
+class TestZswap(BaseSwap):
+ blas_func = fblas.zswap
+ dtype = complex128
+
+##################################################
+# Test blas ?gemv
+# This will be a mess to test all cases.
+
+
+class BaseGemv:
+ ''' Mixin class for gemv tests '''
+
+ def get_data(self, x_stride=1, y_stride=1):
+ mult = array(1, dtype=self.dtype)
+ if self.dtype in [complex64, complex128]:
+ mult = array(1+1j, dtype=self.dtype)
+ from numpy.random import normal, seed
+ seed(1234)
+ alpha = array(1., dtype=self.dtype) * mult
+ beta = array(1., dtype=self.dtype) * mult
+ a = normal(0., 1., (3, 3)).astype(self.dtype) * mult
+ x = arange(shape(a)[0]*x_stride, dtype=self.dtype) * mult
+ y = arange(shape(a)[1]*y_stride, dtype=self.dtype) * mult
+ return alpha, beta, a, x, y
+
+ def test_simple(self):
+ alpha, beta, a, x, y = self.get_data()
+ desired_y = alpha*matrixmultiply(a, x)+beta*y
+ y = self.blas_func(alpha, a, x, beta, y)
+ assert_array_almost_equal(desired_y, y)
+
+ def test_default_beta_y(self):
+ alpha, beta, a, x, y = self.get_data()
+ desired_y = matrixmultiply(a, x)
+ y = self.blas_func(1, a, x)
+ assert_array_almost_equal(desired_y, y)
+
+ def test_simple_transpose(self):
+ alpha, beta, a, x, y = self.get_data()
+ desired_y = alpha*matrixmultiply(transpose(a), x)+beta*y
+ y = self.blas_func(alpha, a, x, beta, y, trans=1)
+ assert_array_almost_equal(desired_y, y)
+
+ def test_simple_transpose_conj(self):
+ alpha, beta, a, x, y = self.get_data()
+ desired_y = alpha*matrixmultiply(transpose(conjugate(a)), x)+beta*y
+ y = self.blas_func(alpha, a, x, beta, y, trans=2)
+ assert_array_almost_equal(desired_y, y)
+
+ def test_x_stride(self):
+ alpha, beta, a, x, y = self.get_data(x_stride=2)
+ desired_y = alpha*matrixmultiply(a, x[::2])+beta*y
+ y = self.blas_func(alpha, a, x, beta, y, incx=2)
+ assert_array_almost_equal(desired_y, y)
+
+ def test_x_stride_transpose(self):
+ alpha, beta, a, x, y = self.get_data(x_stride=2)
+ desired_y = alpha*matrixmultiply(transpose(a), x[::2])+beta*y
+ y = self.blas_func(alpha, a, x, beta, y, trans=1, incx=2)
+ assert_array_almost_equal(desired_y, y)
+
+ def test_x_stride_assert(self):
+ # What is the use of this test?
+ alpha, beta, a, x, y = self.get_data(x_stride=2)
+ with pytest.raises(Exception, match='failed for 3rd argument'):
+ y = self.blas_func(1, a, x, 1, y, trans=0, incx=3)
+ with pytest.raises(Exception, match='failed for 3rd argument'):
+ y = self.blas_func(1, a, x, 1, y, trans=1, incx=3)
+
+ def test_y_stride(self):
+ alpha, beta, a, x, y = self.get_data(y_stride=2)
+ desired_y = y.copy()
+ desired_y[::2] = alpha*matrixmultiply(a, x)+beta*y[::2]
+ y = self.blas_func(alpha, a, x, beta, y, incy=2)
+ assert_array_almost_equal(desired_y, y)
+
+ def test_y_stride_transpose(self):
+ alpha, beta, a, x, y = self.get_data(y_stride=2)
+ desired_y = y.copy()
+ desired_y[::2] = alpha*matrixmultiply(transpose(a), x)+beta*y[::2]
+ y = self.blas_func(alpha, a, x, beta, y, trans=1, incy=2)
+ assert_array_almost_equal(desired_y, y)
+
+ def test_y_stride_assert(self):
+ # What is the use of this test?
+ alpha, beta, a, x, y = self.get_data(y_stride=2)
+ with pytest.raises(Exception, match='failed for 2nd keyword'):
+ y = self.blas_func(1, a, x, 1, y, trans=0, incy=3)
+ with pytest.raises(Exception, match='failed for 2nd keyword'):
+ y = self.blas_func(1, a, x, 1, y, trans=1, incy=3)
+
+
+try:
+ class TestSgemv(BaseGemv):
+ blas_func = fblas.sgemv
+ dtype = float32
+
+ def test_sgemv_on_osx(self):
+ from itertools import product
+ import sys
+ import numpy as np
+
+ if sys.platform != 'darwin':
+ return
+
+ def aligned_array(shape, align, dtype, order='C'):
+ # Make array shape `shape` with aligned at `align` bytes
+ d = dtype()
+ # Make array of correct size with `align` extra bytes
+ N = np.prod(shape)
+ tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8)
+ address = tmp.__array_interface__["data"][0]
+ # Find offset into array giving desired alignment
+ for offset in range(align):
+ if (address + offset) % align == 0:
+ break
+ tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype)
+ return tmp.reshape(shape, order=order)
+
+ def as_aligned(arr, align, dtype, order='C'):
+ # Copy `arr` into an aligned array with same shape
+ aligned = aligned_array(arr.shape, align, dtype, order)
+ aligned[:] = arr[:]
+ return aligned
+
+ def assert_dot_close(A, X, desired):
+ assert_allclose(self.blas_func(1.0, A, X), desired,
+ rtol=1e-5, atol=1e-7)
+
+ testdata = product((15, 32), (10000,), (200, 89), ('C', 'F'))
+ for align, m, n, a_order in testdata:
+ A_d = np.random.rand(m, n)
+ X_d = np.random.rand(n)
+ desired = np.dot(A_d, X_d)
+ # Calculation with aligned single precision
+ A_f = as_aligned(A_d, align, np.float32, order=a_order)
+ X_f = as_aligned(X_d, align, np.float32, order=a_order)
+ assert_dot_close(A_f, X_f, desired)
+
+except AttributeError:
+ class TestSgemv:
+ pass
+
+
+class TestDgemv(BaseGemv):
+ blas_func = fblas.dgemv
+ dtype = float64
+
+
+try:
+ class TestCgemv(BaseGemv):
+ blas_func = fblas.cgemv
+ dtype = complex64
+except AttributeError:
+ class TestCgemv:
+ pass
+
+
+class TestZgemv(BaseGemv):
+ blas_func = fblas.zgemv
+ dtype = complex128
+
+
+"""
+##################################################
+### Test blas ?ger
+### This will be a mess to test all cases.
+
+class BaseGer:
+ def get_data(self,x_stride=1,y_stride=1):
+ from numpy.random import normal, seed
+ seed(1234)
+ alpha = array(1., dtype = self.dtype)
+ a = normal(0.,1.,(3,3)).astype(self.dtype)
+ x = arange(shape(a)[0]*x_stride,dtype=self.dtype)
+ y = arange(shape(a)[1]*y_stride,dtype=self.dtype)
+ return alpha,a,x,y
+ def test_simple(self):
+ alpha,a,x,y = self.get_data()
+ # transpose takes care of Fortran vs. C(and Python) memory layout
+ desired_a = alpha*transpose(x[:,newaxis]*y) + a
+ self.blas_func(x,y,a)
+ assert_array_almost_equal(desired_a,a)
+ def test_x_stride(self):
+ alpha,a,x,y = self.get_data(x_stride=2)
+ desired_a = alpha*transpose(x[::2,newaxis]*y) + a
+ self.blas_func(x,y,a,incx=2)
+ assert_array_almost_equal(desired_a,a)
+ def test_x_stride_assert(self):
+ alpha,a,x,y = self.get_data(x_stride=2)
+ with pytest.raises(ValueError, match='foo'):
+ self.blas_func(x,y,a,incx=3)
+ def test_y_stride(self):
+ alpha,a,x,y = self.get_data(y_stride=2)
+ desired_a = alpha*transpose(x[:,newaxis]*y[::2]) + a
+ self.blas_func(x,y,a,incy=2)
+ assert_array_almost_equal(desired_a,a)
+
+ def test_y_stride_assert(self):
+ alpha,a,x,y = self.get_data(y_stride=2)
+ with pytest.raises(ValueError, match='foo'):
+ self.blas_func(a,x,y,incy=3)
+
+class TestSger(BaseGer):
+ blas_func = fblas.sger
+ dtype = float32
+class TestDger(BaseGer):
+ blas_func = fblas.dger
+ dtype = float64
+"""
+##################################################
+# Test blas ?gerc
+# This will be a mess to test all cases.
+
+"""
+class BaseGerComplex(BaseGer):
+ def get_data(self,x_stride=1,y_stride=1):
+ from numpy.random import normal, seed
+ seed(1234)
+ alpha = array(1+1j, dtype = self.dtype)
+ a = normal(0.,1.,(3,3)).astype(self.dtype)
+ a = a + normal(0.,1.,(3,3)) * array(1j, dtype = self.dtype)
+ x = normal(0.,1.,shape(a)[0]*x_stride).astype(self.dtype)
+ x = x + x * array(1j, dtype = self.dtype)
+ y = normal(0.,1.,shape(a)[1]*y_stride).astype(self.dtype)
+ y = y + y * array(1j, dtype = self.dtype)
+ return alpha,a,x,y
+ def test_simple(self):
+ alpha,a,x,y = self.get_data()
+ # transpose takes care of Fortran vs. C(and Python) memory layout
+ a = a * array(0.,dtype = self.dtype)
+ #desired_a = alpha*transpose(x[:,newaxis]*self.transform(y)) + a
+ desired_a = alpha*transpose(x[:,newaxis]*y) + a
+ #self.blas_func(x,y,a,alpha = alpha)
+ fblas.cgeru(x,y,a,alpha = alpha)
+ assert_array_almost_equal(desired_a,a)
+
+ #def test_x_stride(self):
+ # alpha,a,x,y = self.get_data(x_stride=2)
+ # desired_a = alpha*transpose(x[::2,newaxis]*self.transform(y)) + a
+ # self.blas_func(x,y,a,incx=2)
+ # assert_array_almost_equal(desired_a,a)
+ #def test_y_stride(self):
+ # alpha,a,x,y = self.get_data(y_stride=2)
+ # desired_a = alpha*transpose(x[:,newaxis]*self.transform(y[::2])) + a
+ # self.blas_func(x,y,a,incy=2)
+ # assert_array_almost_equal(desired_a,a)
+
+class TestCgeru(BaseGerComplex):
+ blas_func = fblas.cgeru
+ dtype = complex64
+ def transform(self,x):
+ return x
+class TestZgeru(BaseGerComplex):
+ blas_func = fblas.zgeru
+ dtype = complex128
+ def transform(self,x):
+ return x
+
+class TestCgerc(BaseGerComplex):
+ blas_func = fblas.cgerc
+ dtype = complex64
+ def transform(self,x):
+ return conjugate(x)
+
+class TestZgerc(BaseGerComplex):
+ blas_func = fblas.zgerc
+ dtype = complex128
+ def transform(self,x):
+ return conjugate(x)
+"""
diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_interpolative.py b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_interpolative.py
new file mode 100644
index 0000000000000000000000000000000000000000..ddc56f7c7366fe2bf8967a722bcdd0f32dd19036
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_interpolative.py
@@ -0,0 +1,241 @@
+#******************************************************************************
+# Copyright (C) 2013 Kenneth L. Ho
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer. Redistributions in binary
+# form must reproduce the above copyright notice, this list of conditions and
+# the following disclaimer in the documentation and/or other materials
+# provided with the distribution.
+#
+# None of the names of the copyright holders may be used to endorse or
+# promote products derived from this software without specific prior written
+# permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#******************************************************************************
+
+import scipy.linalg.interpolative as pymatrixid
+import numpy as np
+from scipy.linalg import hilbert, svdvals, norm
+from scipy.sparse.linalg import aslinearoperator
+from scipy.linalg.interpolative import interp_decomp
+
+from numpy.testing import (assert_, assert_allclose, assert_equal,
+ assert_array_equal)
+import pytest
+from pytest import raises as assert_raises
+import sys
+_IS_32BIT = (sys.maxsize < 2**32)
+
+
+@pytest.fixture()
+def eps():
+ yield 1e-12
+
+
+@pytest.fixture(params=[np.float64, np.complex128])
+def A(request):
+ # construct Hilbert matrix
+ # set parameters
+ n = 300
+ yield hilbert(n).astype(request.param)
+
+
+@pytest.fixture()
+def L(A):
+ yield aslinearoperator(A)
+
+
+@pytest.fixture()
+def rank(A, eps):
+ S = np.linalg.svd(A, compute_uv=False)
+ try:
+ rank = np.nonzero(S < eps)[0][0]
+ except IndexError:
+ rank = A.shape[0]
+ return rank
+
+
+class TestInterpolativeDecomposition:
+
+ @pytest.mark.parametrize(
+ "rand,lin_op",
+ [(False, False), (True, False), (True, True)])
+ def test_real_id_fixed_precision(self, A, L, eps, rand, lin_op):
+ if _IS_32BIT and A.dtype == np.complex128 and rand:
+ pytest.xfail("bug in external fortran code")
+ # Test ID routines on a Hilbert matrix.
+ A_or_L = A if not lin_op else L
+
+ k, idx, proj = pymatrixid.interp_decomp(A_or_L, eps, rand=rand)
+ B = pymatrixid.reconstruct_matrix_from_id(A[:, idx[:k]], idx, proj)
+ assert_allclose(A, B, rtol=eps, atol=1e-08)
+
+ @pytest.mark.parametrize(
+ "rand,lin_op",
+ [(False, False), (True, False), (True, True)])
+ def test_real_id_fixed_rank(self, A, L, eps, rank, rand, lin_op):
+ if _IS_32BIT and A.dtype == np.complex128 and rand:
+ pytest.xfail("bug in external fortran code")
+ k = rank
+ A_or_L = A if not lin_op else L
+
+ idx, proj = pymatrixid.interp_decomp(A_or_L, k, rand=rand)
+ B = pymatrixid.reconstruct_matrix_from_id(A[:, idx[:k]], idx, proj)
+ assert_allclose(A, B, rtol=eps, atol=1e-08)
+
+ @pytest.mark.parametrize("rand,lin_op", [(False, False)])
+ def test_real_id_skel_and_interp_matrices(
+ self, A, L, eps, rank, rand, lin_op):
+ k = rank
+ A_or_L = A if not lin_op else L
+
+ idx, proj = pymatrixid.interp_decomp(A_or_L, k, rand=rand)
+ P = pymatrixid.reconstruct_interp_matrix(idx, proj)
+ B = pymatrixid.reconstruct_skel_matrix(A, k, idx)
+ assert_allclose(B, A[:, idx[:k]], rtol=eps, atol=1e-08)
+ assert_allclose(B @ P, A, rtol=eps, atol=1e-08)
+
+ @pytest.mark.parametrize(
+ "rand,lin_op",
+ [(False, False), (True, False), (True, True)])
+ def test_svd_fixed_precison(self, A, L, eps, rand, lin_op):
+ if _IS_32BIT and A.dtype == np.complex128 and rand:
+ pytest.xfail("bug in external fortran code")
+ A_or_L = A if not lin_op else L
+
+ U, S, V = pymatrixid.svd(A_or_L, eps, rand=rand)
+ B = U * S @ V.T.conj()
+ assert_allclose(A, B, rtol=eps, atol=1e-08)
+
+ @pytest.mark.parametrize(
+ "rand,lin_op",
+ [(False, False), (True, False), (True, True)])
+ def test_svd_fixed_rank(self, A, L, eps, rank, rand, lin_op):
+ if _IS_32BIT and A.dtype == np.complex128 and rand:
+ pytest.xfail("bug in external fortran code")
+ k = rank
+ A_or_L = A if not lin_op else L
+
+ U, S, V = pymatrixid.svd(A_or_L, k, rand=rand)
+ B = U * S @ V.T.conj()
+ assert_allclose(A, B, rtol=eps, atol=1e-08)
+
+ def test_id_to_svd(self, A, eps, rank):
+ k = rank
+
+ idx, proj = pymatrixid.interp_decomp(A, k, rand=False)
+ U, S, V = pymatrixid.id_to_svd(A[:, idx[:k]], idx, proj)
+ B = U * S @ V.T.conj()
+ assert_allclose(A, B, rtol=eps, atol=1e-08)
+
+ def test_estimate_spectral_norm(self, A):
+ s = svdvals(A)
+ norm_2_est = pymatrixid.estimate_spectral_norm(A)
+ assert_allclose(norm_2_est, s[0], rtol=1e-6, atol=1e-8)
+
+ def test_estimate_spectral_norm_diff(self, A):
+ B = A.copy()
+ B[:, 0] *= 1.2
+ s = svdvals(A - B)
+ norm_2_est = pymatrixid.estimate_spectral_norm_diff(A, B)
+ assert_allclose(norm_2_est, s[0], rtol=1e-6, atol=1e-8)
+
+ def test_rank_estimates_array(self, A):
+ B = np.array([[1, 1, 0], [0, 0, 1], [0, 0, 1]], dtype=A.dtype)
+
+ for M in [A, B]:
+ rank_tol = 1e-9
+ rank_np = np.linalg.matrix_rank(M, norm(M, 2) * rank_tol)
+ rank_est = pymatrixid.estimate_rank(M, rank_tol)
+ assert_(rank_est >= rank_np)
+ assert_(rank_est <= rank_np + 10)
+
+ def test_rank_estimates_lin_op(self, A):
+ B = np.array([[1, 1, 0], [0, 0, 1], [0, 0, 1]], dtype=A.dtype)
+
+ for M in [A, B]:
+ ML = aslinearoperator(M)
+ rank_tol = 1e-9
+ rank_np = np.linalg.matrix_rank(M, norm(M, 2) * rank_tol)
+ rank_est = pymatrixid.estimate_rank(ML, rank_tol)
+ assert_(rank_est >= rank_np - 4)
+ assert_(rank_est <= rank_np + 4)
+
+ def test_rand(self):
+ pymatrixid.seed('default')
+ assert_allclose(pymatrixid.rand(2), [0.8932059, 0.64500803],
+ rtol=1e-4, atol=1e-8)
+
+ pymatrixid.seed(1234)
+ x1 = pymatrixid.rand(2)
+ assert_allclose(x1, [0.7513823, 0.06861718], rtol=1e-4, atol=1e-8)
+
+ np.random.seed(1234)
+ pymatrixid.seed()
+ x2 = pymatrixid.rand(2)
+
+ np.random.seed(1234)
+ pymatrixid.seed(np.random.rand(55))
+ x3 = pymatrixid.rand(2)
+
+ assert_allclose(x1, x2)
+ assert_allclose(x1, x3)
+
+ def test_badcall(self):
+ A = hilbert(5).astype(np.float32)
+ with assert_raises(ValueError):
+ pymatrixid.interp_decomp(A, 1e-6, rand=False)
+
+ def test_rank_too_large(self):
+ # svd(array, k) should not segfault
+ a = np.ones((4, 3))
+ with assert_raises(ValueError):
+ pymatrixid.svd(a, 4)
+
+ def test_full_rank(self):
+ eps = 1.0e-12
+
+ # fixed precision
+ A = np.random.rand(16, 8)
+ k, idx, proj = pymatrixid.interp_decomp(A, eps)
+ assert_equal(k, A.shape[1])
+
+ P = pymatrixid.reconstruct_interp_matrix(idx, proj)
+ B = pymatrixid.reconstruct_skel_matrix(A, k, idx)
+ assert_allclose(A, B @ P)
+
+ # fixed rank
+ idx, proj = pymatrixid.interp_decomp(A, k)
+
+ P = pymatrixid.reconstruct_interp_matrix(idx, proj)
+ B = pymatrixid.reconstruct_skel_matrix(A, k, idx)
+ assert_allclose(A, B @ P)
+
+ @pytest.mark.parametrize("dtype", [np.float64, np.complex128])
+ @pytest.mark.parametrize("rand", [True, False])
+ @pytest.mark.parametrize("eps", [1, 0.1])
+ def test_bug_9793(self, dtype, rand, eps):
+ if _IS_32BIT and dtype == np.complex128 and rand:
+ pytest.xfail("bug in external fortran code")
+ A = np.array([[-1, -1, -1, 0, 0, 0],
+ [0, 0, 0, 1, 1, 1],
+ [1, 0, 0, 1, 0, 0],
+ [0, 1, 0, 0, 1, 0],
+ [0, 0, 1, 0, 0, 1]],
+ dtype=dtype, order="C")
+ B = A.copy()
+ interp_decomp(A.T, eps, rand=rand)
+ assert_array_equal(A, B)
diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_lapack.py b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_lapack.py
new file mode 100644
index 0000000000000000000000000000000000000000..4792a86066b46b56ba28c87427fc288b78049a8c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_lapack.py
@@ -0,0 +1,3399 @@
+#
+# Created by: Pearu Peterson, September 2002
+#
+
+import sys
+from functools import reduce
+
+from numpy.testing import (assert_equal, assert_array_almost_equal, assert_,
+ assert_allclose, assert_almost_equal,
+ assert_array_equal)
+import pytest
+from pytest import raises as assert_raises
+
+import numpy as np
+from numpy import (eye, ones, zeros, zeros_like, triu, tril, tril_indices,
+ triu_indices)
+
+from numpy.random import rand, randint, seed
+
+from scipy.linalg import (_flapack as flapack, lapack, inv, svd, cholesky,
+ solve, ldl, norm, block_diag, qr, eigh, qz)
+
+from scipy.linalg.lapack import _compute_lwork
+from scipy.stats import ortho_group, unitary_group
+
+import scipy.sparse as sps
+try:
+ from scipy.__config__ import CONFIG
+except ImportError:
+ CONFIG = None
+
+try:
+ from scipy.linalg import _clapack as clapack
+except ImportError:
+ clapack = None
+from scipy.linalg.lapack import get_lapack_funcs
+from scipy.linalg.blas import get_blas_funcs
+
+REAL_DTYPES = [np.float32, np.float64]
+COMPLEX_DTYPES = [np.complex64, np.complex128]
+DTYPES = REAL_DTYPES + COMPLEX_DTYPES
+
+blas_provider = blas_version = None
+if CONFIG is not None:
+ blas_provider = CONFIG['Build Dependencies']['blas']['name']
+ blas_version = CONFIG['Build Dependencies']['blas']['version']
+
+
+def generate_random_dtype_array(shape, dtype):
+ # generates a random matrix of desired data type of shape
+ if dtype in COMPLEX_DTYPES:
+ return (np.random.rand(*shape)
+ + np.random.rand(*shape)*1.0j).astype(dtype)
+ return np.random.rand(*shape).astype(dtype)
+
+
+def test_lapack_documented():
+ """Test that all entries are in the doc."""
+ if lapack.__doc__ is None: # just in case there is a python -OO
+ pytest.skip('lapack.__doc__ is None')
+ names = set(lapack.__doc__.split())
+ ignore_list = {
+ 'absolute_import', 'clapack', 'division', 'find_best_lapack_type',
+ 'flapack', 'print_function', 'HAS_ILP64',
+ }
+ missing = list()
+ for name in dir(lapack):
+ if (not name.startswith('_') and name not in ignore_list and
+ name not in names):
+ missing.append(name)
+ assert missing == [], 'Name(s) missing from lapack.__doc__ or ignore_list'
+
+
+class TestFlapackSimple:
+
+ def test_gebal(self):
+ a = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
+ a1 = [[1, 0, 0, 3e-4],
+ [4, 0, 0, 2e-3],
+ [7, 1, 0, 0],
+ [0, 1, 0, 0]]
+ for p in 'sdzc':
+ f = getattr(flapack, p+'gebal', None)
+ if f is None:
+ continue
+ ba, lo, hi, pivscale, info = f(a)
+ assert_(not info, repr(info))
+ assert_array_almost_equal(ba, a)
+ assert_equal((lo, hi), (0, len(a[0])-1))
+ assert_array_almost_equal(pivscale, np.ones(len(a)))
+
+ ba, lo, hi, pivscale, info = f(a1, permute=1, scale=1)
+ assert_(not info, repr(info))
+ # print(a1)
+ # print(ba, lo, hi, pivscale)
+
+ def test_gehrd(self):
+ a = [[-149, -50, -154],
+ [537, 180, 546],
+ [-27, -9, -25]]
+ for p in 'd':
+ f = getattr(flapack, p+'gehrd', None)
+ if f is None:
+ continue
+ ht, tau, info = f(a)
+ assert_(not info, repr(info))
+
+ def test_trsyl(self):
+ a = np.array([[1, 2], [0, 4]])
+ b = np.array([[5, 6], [0, 8]])
+ c = np.array([[9, 10], [11, 12]])
+ trans = 'T'
+
+ # Test single and double implementations, including most
+ # of the options
+ for dtype in 'fdFD':
+ a1, b1, c1 = a.astype(dtype), b.astype(dtype), c.astype(dtype)
+ trsyl, = get_lapack_funcs(('trsyl',), (a1,))
+ if dtype.isupper(): # is complex dtype
+ a1[0] += 1j
+ trans = 'C'
+
+ x, scale, info = trsyl(a1, b1, c1)
+ assert_array_almost_equal(np.dot(a1, x) + np.dot(x, b1),
+ scale * c1)
+
+ x, scale, info = trsyl(a1, b1, c1, trana=trans, tranb=trans)
+ assert_array_almost_equal(
+ np.dot(a1.conjugate().T, x) + np.dot(x, b1.conjugate().T),
+ scale * c1, decimal=4)
+
+ x, scale, info = trsyl(a1, b1, c1, isgn=-1)
+ assert_array_almost_equal(np.dot(a1, x) - np.dot(x, b1),
+ scale * c1, decimal=4)
+
+ def test_lange(self):
+ a = np.array([
+ [-149, -50, -154],
+ [537, 180, 546],
+ [-27, -9, -25]])
+
+ for dtype in 'fdFD':
+ for norm_str in 'Mm1OoIiFfEe':
+ a1 = a.astype(dtype)
+ if dtype.isupper():
+ # is complex dtype
+ a1[0, 0] += 1j
+
+ lange, = get_lapack_funcs(('lange',), (a1,))
+ value = lange(norm_str, a1)
+
+ if norm_str in 'FfEe':
+ if dtype in 'Ff':
+ decimal = 3
+ else:
+ decimal = 7
+ ref = np.sqrt(np.sum(np.square(np.abs(a1))))
+ assert_almost_equal(value, ref, decimal)
+ else:
+ if norm_str in 'Mm':
+ ref = np.max(np.abs(a1))
+ elif norm_str in '1Oo':
+ ref = np.max(np.sum(np.abs(a1), axis=0))
+ elif norm_str in 'Ii':
+ ref = np.max(np.sum(np.abs(a1), axis=1))
+
+ assert_equal(value, ref)
+
+
+class TestLapack:
+
+ def test_flapack(self):
+ if hasattr(flapack, 'empty_module'):
+ # flapack module is empty
+ pass
+
+ def test_clapack(self):
+ if hasattr(clapack, 'empty_module'):
+ # clapack module is empty
+ pass
+
+
+class TestLeastSquaresSolvers:
+
+ def test_gels(self):
+ seed(1234)
+ # Test fat/tall matrix argument handling - gh-issue #8329
+ for ind, dtype in enumerate(DTYPES):
+ m = 10
+ n = 20
+ nrhs = 1
+ a1 = rand(m, n).astype(dtype)
+ b1 = rand(n).astype(dtype)
+ gls, glslw = get_lapack_funcs(('gels', 'gels_lwork'), dtype=dtype)
+
+ # Request of sizes
+ lwork = _compute_lwork(glslw, m, n, nrhs)
+ _, _, info = gls(a1, b1, lwork=lwork)
+ assert_(info >= 0)
+ _, _, info = gls(a1, b1, trans='TTCC'[ind], lwork=lwork)
+ assert_(info >= 0)
+
+ for dtype in REAL_DTYPES:
+ a1 = np.array([[1.0, 2.0],
+ [4.0, 5.0],
+ [7.0, 8.0]], dtype=dtype)
+ b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
+ gels, gels_lwork, geqrf = get_lapack_funcs(
+ ('gels', 'gels_lwork', 'geqrf'), (a1, b1))
+
+ m, n = a1.shape
+ if len(b1.shape) == 2:
+ nrhs = b1.shape[1]
+ else:
+ nrhs = 1
+
+ # Request of sizes
+ lwork = _compute_lwork(gels_lwork, m, n, nrhs)
+
+ lqr, x, info = gels(a1, b1, lwork=lwork)
+ assert_allclose(x[:-1], np.array([-14.333333333333323,
+ 14.999999999999991],
+ dtype=dtype),
+ rtol=25*np.finfo(dtype).eps)
+ lqr_truth, _, _, _ = geqrf(a1)
+ assert_array_equal(lqr, lqr_truth)
+
+ for dtype in COMPLEX_DTYPES:
+ a1 = np.array([[1.0+4.0j, 2.0],
+ [4.0+0.5j, 5.0-3.0j],
+ [7.0-2.0j, 8.0+0.7j]], dtype=dtype)
+ b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
+ gels, gels_lwork, geqrf = get_lapack_funcs(
+ ('gels', 'gels_lwork', 'geqrf'), (a1, b1))
+
+ m, n = a1.shape
+ if len(b1.shape) == 2:
+ nrhs = b1.shape[1]
+ else:
+ nrhs = 1
+
+ # Request of sizes
+ lwork = _compute_lwork(gels_lwork, m, n, nrhs)
+
+ lqr, x, info = gels(a1, b1, lwork=lwork)
+ assert_allclose(x[:-1],
+ np.array([1.161753632288328-1.901075709391912j,
+ 1.735882340522193+1.521240901196909j],
+ dtype=dtype), rtol=25*np.finfo(dtype).eps)
+ lqr_truth, _, _, _ = geqrf(a1)
+ assert_array_equal(lqr, lqr_truth)
+
+ def test_gelsd(self):
+ for dtype in REAL_DTYPES:
+ a1 = np.array([[1.0, 2.0],
+ [4.0, 5.0],
+ [7.0, 8.0]], dtype=dtype)
+ b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
+ gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'),
+ (a1, b1))
+
+ m, n = a1.shape
+ if len(b1.shape) == 2:
+ nrhs = b1.shape[1]
+ else:
+ nrhs = 1
+
+ # Request of sizes
+ work, iwork, info = gelsd_lwork(m, n, nrhs, -1)
+ lwork = int(np.real(work))
+ iwork_size = iwork
+
+ x, s, rank, info = gelsd(a1, b1, lwork, iwork_size,
+ -1, False, False)
+ assert_allclose(x[:-1], np.array([-14.333333333333323,
+ 14.999999999999991],
+ dtype=dtype),
+ rtol=25*np.finfo(dtype).eps)
+ assert_allclose(s, np.array([12.596017180511966,
+ 0.583396253199685], dtype=dtype),
+ rtol=25*np.finfo(dtype).eps)
+
+ for dtype in COMPLEX_DTYPES:
+ a1 = np.array([[1.0+4.0j, 2.0],
+ [4.0+0.5j, 5.0-3.0j],
+ [7.0-2.0j, 8.0+0.7j]], dtype=dtype)
+ b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
+ gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'),
+ (a1, b1))
+
+ m, n = a1.shape
+ if len(b1.shape) == 2:
+ nrhs = b1.shape[1]
+ else:
+ nrhs = 1
+
+ # Request of sizes
+ work, rwork, iwork, info = gelsd_lwork(m, n, nrhs, -1)
+ lwork = int(np.real(work))
+ rwork_size = int(rwork)
+ iwork_size = iwork
+
+ x, s, rank, info = gelsd(a1, b1, lwork, rwork_size, iwork_size,
+ -1, False, False)
+ assert_allclose(x[:-1],
+ np.array([1.161753632288328-1.901075709391912j,
+ 1.735882340522193+1.521240901196909j],
+ dtype=dtype), rtol=25*np.finfo(dtype).eps)
+ assert_allclose(s,
+ np.array([13.035514762572043, 4.337666985231382],
+ dtype=dtype), rtol=25*np.finfo(dtype).eps)
+
+ def test_gelss(self):
+
+ for dtype in REAL_DTYPES:
+ a1 = np.array([[1.0, 2.0],
+ [4.0, 5.0],
+ [7.0, 8.0]], dtype=dtype)
+ b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
+ gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'),
+ (a1, b1))
+
+ m, n = a1.shape
+ if len(b1.shape) == 2:
+ nrhs = b1.shape[1]
+ else:
+ nrhs = 1
+
+ # Request of sizes
+ work, info = gelss_lwork(m, n, nrhs, -1)
+ lwork = int(np.real(work))
+
+ v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False)
+ assert_allclose(x[:-1], np.array([-14.333333333333323,
+ 14.999999999999991],
+ dtype=dtype),
+ rtol=25*np.finfo(dtype).eps)
+ assert_allclose(s, np.array([12.596017180511966,
+ 0.583396253199685], dtype=dtype),
+ rtol=25*np.finfo(dtype).eps)
+
+ for dtype in COMPLEX_DTYPES:
+ a1 = np.array([[1.0+4.0j, 2.0],
+ [4.0+0.5j, 5.0-3.0j],
+ [7.0-2.0j, 8.0+0.7j]], dtype=dtype)
+ b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
+ gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'),
+ (a1, b1))
+
+ m, n = a1.shape
+ if len(b1.shape) == 2:
+ nrhs = b1.shape[1]
+ else:
+ nrhs = 1
+
+ # Request of sizes
+ work, info = gelss_lwork(m, n, nrhs, -1)
+ lwork = int(np.real(work))
+
+ v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False)
+ assert_allclose(x[:-1],
+ np.array([1.161753632288328-1.901075709391912j,
+ 1.735882340522193+1.521240901196909j],
+ dtype=dtype),
+ rtol=25*np.finfo(dtype).eps)
+ assert_allclose(s, np.array([13.035514762572043,
+ 4.337666985231382], dtype=dtype),
+ rtol=25*np.finfo(dtype).eps)
+
+ def test_gelsy(self):
+
+ for dtype in REAL_DTYPES:
+ a1 = np.array([[1.0, 2.0],
+ [4.0, 5.0],
+ [7.0, 8.0]], dtype=dtype)
+ b1 = np.array([16.0, 17.0, 20.0], dtype=dtype)
+ gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'),
+ (a1, b1))
+
+ m, n = a1.shape
+ if len(b1.shape) == 2:
+ nrhs = b1.shape[1]
+ else:
+ nrhs = 1
+
+ # Request of sizes
+ work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps)
+ lwork = int(np.real(work))
+
+ jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
+ v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps,
+ lwork, False, False)
+ assert_allclose(x[:-1], np.array([-14.333333333333323,
+ 14.999999999999991],
+ dtype=dtype),
+ rtol=25*np.finfo(dtype).eps)
+
+ for dtype in COMPLEX_DTYPES:
+ a1 = np.array([[1.0+4.0j, 2.0],
+ [4.0+0.5j, 5.0-3.0j],
+ [7.0-2.0j, 8.0+0.7j]], dtype=dtype)
+ b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype)
+ gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'),
+ (a1, b1))
+
+ m, n = a1.shape
+ if len(b1.shape) == 2:
+ nrhs = b1.shape[1]
+ else:
+ nrhs = 1
+
+ # Request of sizes
+ work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps)
+ lwork = int(np.real(work))
+
+ jptv = np.zeros((a1.shape[1], 1), dtype=np.int32)
+ v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps,
+ lwork, False, False)
+ assert_allclose(x[:-1],
+ np.array([1.161753632288328-1.901075709391912j,
+ 1.735882340522193+1.521240901196909j],
+ dtype=dtype),
+ rtol=25*np.finfo(dtype).eps)
+
+
+@pytest.mark.parametrize('dtype', DTYPES)
+@pytest.mark.parametrize('shape', [(3, 4), (5, 2), (2**18, 2**18)])
+def test_geqrf_lwork(dtype, shape):
+ geqrf_lwork = get_lapack_funcs(('geqrf_lwork'), dtype=dtype)
+ m, n = shape
+ lwork, info = geqrf_lwork(m=m, n=n)
+ assert_equal(info, 0)
+
+
+class TestRegression:
+
+ def test_ticket_1645(self):
+ # Check that RQ routines have correct lwork
+ for dtype in DTYPES:
+ a = np.zeros((300, 2), dtype=dtype)
+
+ gerqf, = get_lapack_funcs(['gerqf'], [a])
+ assert_raises(Exception, gerqf, a, lwork=2)
+ rq, tau, work, info = gerqf(a)
+
+ if dtype in REAL_DTYPES:
+ orgrq, = get_lapack_funcs(['orgrq'], [a])
+ assert_raises(Exception, orgrq, rq[-2:], tau, lwork=1)
+ orgrq(rq[-2:], tau, lwork=2)
+ elif dtype in COMPLEX_DTYPES:
+ ungrq, = get_lapack_funcs(['ungrq'], [a])
+ assert_raises(Exception, ungrq, rq[-2:], tau, lwork=1)
+ ungrq(rq[-2:], tau, lwork=2)
+
+
+class TestDpotr:
+ def test_gh_2691(self):
+ # 'lower' argument of dportf/dpotri
+ for lower in [True, False]:
+ for clean in [True, False]:
+ np.random.seed(42)
+ x = np.random.normal(size=(3, 3))
+ a = x.dot(x.T)
+
+ dpotrf, dpotri = get_lapack_funcs(("potrf", "potri"), (a, ))
+
+ c, info = dpotrf(a, lower, clean=clean)
+ dpt = dpotri(c, lower)[0]
+
+ if lower:
+ assert_allclose(np.tril(dpt), np.tril(inv(a)))
+ else:
+ assert_allclose(np.triu(dpt), np.triu(inv(a)))
+
+
+class TestDlasd4:
+ def test_sing_val_update(self):
+
+ sigmas = np.array([4., 3., 2., 0])
+ m_vec = np.array([3.12, 5.7, -4.8, -2.2])
+
+ M = np.hstack((np.vstack((np.diag(sigmas[0:-1]),
+ np.zeros((1, len(m_vec) - 1)))),
+ m_vec[:, np.newaxis]))
+ SM = svd(M, full_matrices=False, compute_uv=False, overwrite_a=False,
+ check_finite=False)
+
+ it_len = len(sigmas)
+ sgm = np.concatenate((sigmas[::-1], [sigmas[0] + it_len*norm(m_vec)]))
+ mvc = np.concatenate((m_vec[::-1], (0,)))
+
+ lasd4 = get_lapack_funcs('lasd4', (sigmas,))
+
+ roots = []
+ for i in range(0, it_len):
+ res = lasd4(i, sgm, mvc)
+ roots.append(res[1])
+
+ assert_((res[3] <= 0), "LAPACK root finding dlasd4 failed to find \
+ the singular value %i" % i)
+ roots = np.array(roots)[::-1]
+
+ assert_((not np.any(np.isnan(roots)), "There are NaN roots"))
+ assert_allclose(SM, roots, atol=100*np.finfo(np.float64).eps,
+ rtol=100*np.finfo(np.float64).eps)
+
+
+class TestTbtrs:
+
+ @pytest.mark.parametrize('dtype', DTYPES)
+ def test_nag_example_f07vef_f07vsf(self, dtype):
+ """Test real (f07vef) and complex (f07vsf) examples from NAG
+
+ Examples available from:
+ * https://www.nag.com/numeric/fl/nagdoc_latest/html/f07/f07vef.html
+ * https://www.nag.com/numeric/fl/nagdoc_latest/html/f07/f07vsf.html
+
+ """
+ if dtype in REAL_DTYPES:
+ ab = np.array([[-4.16, 4.78, 6.32, 0.16],
+ [-2.25, 5.86, -4.82, 0]],
+ dtype=dtype)
+ b = np.array([[-16.64, -4.16],
+ [-13.78, -16.59],
+ [13.10, -4.94],
+ [-14.14, -9.96]],
+ dtype=dtype)
+ x_out = np.array([[4, 1],
+ [-1, -3],
+ [3, 2],
+ [2, -2]],
+ dtype=dtype)
+ elif dtype in COMPLEX_DTYPES:
+ ab = np.array([[-1.94+4.43j, 4.12-4.27j, 0.43-2.66j, 0.44+0.1j],
+ [-3.39+3.44j, -1.84+5.52j, 1.74 - 0.04j, 0],
+ [1.62+3.68j, -2.77-1.93j, 0, 0]],
+ dtype=dtype)
+ b = np.array([[-8.86 - 3.88j, -24.09 - 5.27j],
+ [-15.57 - 23.41j, -57.97 + 8.14j],
+ [-7.63 + 22.78j, 19.09 - 29.51j],
+ [-14.74 - 2.40j, 19.17 + 21.33j]],
+ dtype=dtype)
+ x_out = np.array([[2j, 1 + 5j],
+ [1 - 3j, -7 - 2j],
+ [-4.001887 - 4.988417j, 3.026830 + 4.003182j],
+ [1.996158 - 1.045105j, -6.103357 - 8.986653j]],
+ dtype=dtype)
+ else:
+ raise ValueError(f"Datatype {dtype} not understood.")
+
+ tbtrs = get_lapack_funcs(('tbtrs'), dtype=dtype)
+ x, info = tbtrs(ab=ab, b=b, uplo='L')
+ assert_equal(info, 0)
+ assert_allclose(x, x_out, rtol=0, atol=1e-5)
+
+ @pytest.mark.parametrize('dtype,trans',
+ [(dtype, trans)
+ for dtype in DTYPES for trans in ['N', 'T', 'C']
+ if not (trans == 'C' and dtype in REAL_DTYPES)])
+ @pytest.mark.parametrize('uplo', ['U', 'L'])
+ @pytest.mark.parametrize('diag', ['N', 'U'])
+ def test_random_matrices(self, dtype, trans, uplo, diag):
+ seed(1724)
+ # n, nrhs, kd are used to specify A and b.
+ # A is of shape n x n with kd super/sub-diagonals
+ # b is of shape n x nrhs matrix
+ n, nrhs, kd = 4, 3, 2
+ tbtrs = get_lapack_funcs('tbtrs', dtype=dtype)
+
+ is_upper = (uplo == 'U')
+ ku = kd * is_upper
+ kl = kd - ku
+
+ # Construct the diagonal and kd super/sub diagonals of A with
+ # the corresponding offsets.
+ band_offsets = range(ku, -kl - 1, -1)
+ band_widths = [n - abs(x) for x in band_offsets]
+ bands = [generate_random_dtype_array((width,), dtype)
+ for width in band_widths]
+
+ if diag == 'U': # A must be unit triangular
+ bands[ku] = np.ones(n, dtype=dtype)
+
+ # Construct the diagonal banded matrix A from the bands and offsets.
+ a = sps.diags(bands, band_offsets, format='dia')
+
+ # Convert A into banded storage form
+ ab = np.zeros((kd + 1, n), dtype)
+ for row, k in enumerate(band_offsets):
+ ab[row, max(k, 0):min(n+k, n)] = a.diagonal(k)
+
+ # The RHS values.
+ b = generate_random_dtype_array((n, nrhs), dtype)
+
+ x, info = tbtrs(ab=ab, b=b, uplo=uplo, trans=trans, diag=diag)
+ assert_equal(info, 0)
+
+ if trans == 'N':
+ assert_allclose(a @ x, b, rtol=5e-5)
+ elif trans == 'T':
+ assert_allclose(a.T @ x, b, rtol=5e-5)
+ elif trans == 'C':
+ assert_allclose(a.H @ x, b, rtol=5e-5)
+ else:
+ raise ValueError('Invalid trans argument')
+
+ @pytest.mark.parametrize('uplo,trans,diag',
+ [['U', 'N', 'Invalid'],
+ ['U', 'Invalid', 'N'],
+ ['Invalid', 'N', 'N']])
+ def test_invalid_argument_raises_exception(self, uplo, trans, diag):
+ """Test if invalid values of uplo, trans and diag raise exceptions"""
+ # Argument checks occur independently of used datatype.
+ # This mean we must not parameterize all available datatypes.
+ tbtrs = get_lapack_funcs('tbtrs', dtype=np.float64)
+ ab = rand(4, 2)
+ b = rand(2, 4)
+ assert_raises(Exception, tbtrs, ab, b, uplo, trans, diag)
+
+ def test_zero_element_in_diagonal(self):
+ """Test if a matrix with a zero diagonal element is singular
+
+ If the i-th diagonal of A is zero, ?tbtrs should return `i` in `info`
+ indicating the provided matrix is singular.
+
+ Note that ?tbtrs requires the matrix A to be stored in banded form.
+ In this form the diagonal corresponds to the last row."""
+ ab = np.ones((3, 4), dtype=float)
+ b = np.ones(4, dtype=float)
+ tbtrs = get_lapack_funcs('tbtrs', dtype=float)
+
+ ab[-1, 3] = 0
+ _, info = tbtrs(ab=ab, b=b, uplo='U')
+ assert_equal(info, 4)
+
+ @pytest.mark.parametrize('ldab,n,ldb,nrhs', [
+ (5, 5, 0, 5),
+ (5, 5, 3, 5)
+ ])
+ def test_invalid_matrix_shapes(self, ldab, n, ldb, nrhs):
+ """Test ?tbtrs fails correctly if shapes are invalid."""
+ ab = np.ones((ldab, n), dtype=float)
+ b = np.ones((ldb, nrhs), dtype=float)
+ tbtrs = get_lapack_funcs('tbtrs', dtype=float)
+ assert_raises(Exception, tbtrs, ab, b)
+
+
+def test_lartg():
+ for dtype in 'fdFD':
+ lartg = get_lapack_funcs('lartg', dtype=dtype)
+
+ f = np.array(3, dtype)
+ g = np.array(4, dtype)
+
+ if np.iscomplexobj(g):
+ g *= 1j
+
+ cs, sn, r = lartg(f, g)
+
+ assert_allclose(cs, 3.0/5.0)
+ assert_allclose(r, 5.0)
+
+ if np.iscomplexobj(g):
+ assert_allclose(sn, -4.0j/5.0)
+ assert_(isinstance(r, complex))
+ assert_(isinstance(cs, float))
+ else:
+ assert_allclose(sn, 4.0/5.0)
+
+
+def test_rot():
+ # srot, drot from blas and crot and zrot from lapack.
+
+ for dtype in 'fdFD':
+ c = 0.6
+ s = 0.8
+
+ u = np.full(4, 3, dtype)
+ v = np.full(4, 4, dtype)
+ atol = 10**-(np.finfo(dtype).precision-1)
+
+ if dtype in 'fd':
+ rot = get_blas_funcs('rot', dtype=dtype)
+ f = 4
+ else:
+ rot = get_lapack_funcs('rot', dtype=dtype)
+ s *= -1j
+ v *= 1j
+ f = 4j
+
+ assert_allclose(rot(u, v, c, s), [[5, 5, 5, 5],
+ [0, 0, 0, 0]], atol=atol)
+ assert_allclose(rot(u, v, c, s, n=2), [[5, 5, 3, 3],
+ [0, 0, f, f]], atol=atol)
+ assert_allclose(rot(u, v, c, s, offx=2, offy=2),
+ [[3, 3, 5, 5], [f, f, 0, 0]], atol=atol)
+ assert_allclose(rot(u, v, c, s, incx=2, offy=2, n=2),
+ [[5, 3, 5, 3], [f, f, 0, 0]], atol=atol)
+ assert_allclose(rot(u, v, c, s, offx=2, incy=2, n=2),
+ [[3, 3, 5, 5], [0, f, 0, f]], atol=atol)
+ assert_allclose(rot(u, v, c, s, offx=2, incx=2, offy=2, incy=2, n=1),
+ [[3, 3, 5, 3], [f, f, 0, f]], atol=atol)
+ assert_allclose(rot(u, v, c, s, incx=-2, incy=-2, n=2),
+ [[5, 3, 5, 3], [0, f, 0, f]], atol=atol)
+
+ a, b = rot(u, v, c, s, overwrite_x=1, overwrite_y=1)
+ assert_(a is u)
+ assert_(b is v)
+ assert_allclose(a, [5, 5, 5, 5], atol=atol)
+ assert_allclose(b, [0, 0, 0, 0], atol=atol)
+
+
+def test_larfg_larf():
+ np.random.seed(1234)
+ a0 = np.random.random((4, 4))
+ a0 = a0.T.dot(a0)
+
+ a0j = np.random.random((4, 4)) + 1j*np.random.random((4, 4))
+ a0j = a0j.T.conj().dot(a0j)
+
+ # our test here will be to do one step of reducing a hermetian matrix to
+ # tridiagonal form using householder transforms.
+
+ for dtype in 'fdFD':
+ larfg, larf = get_lapack_funcs(['larfg', 'larf'], dtype=dtype)
+
+ if dtype in 'FD':
+ a = a0j.copy()
+ else:
+ a = a0.copy()
+
+ # generate a householder transform to clear a[2:,0]
+ alpha, x, tau = larfg(a.shape[0]-1, a[1, 0], a[2:, 0])
+
+ # create expected output
+ expected = np.zeros_like(a[:, 0])
+ expected[0] = a[0, 0]
+ expected[1] = alpha
+
+ # assemble householder vector
+ v = np.zeros_like(a[1:, 0])
+ v[0] = 1.0
+ v[1:] = x
+
+ # apply transform from the left
+ a[1:, :] = larf(v, tau.conjugate(), a[1:, :], np.zeros(a.shape[1]))
+
+ # apply transform from the right
+ a[:, 1:] = larf(v, tau, a[:, 1:], np.zeros(a.shape[0]), side='R')
+
+ assert_allclose(a[:, 0], expected, atol=1e-5)
+ assert_allclose(a[0, :], expected, atol=1e-5)
+
+
+def test_sgesdd_lwork_bug_workaround():
+ # Test that SGESDD lwork is sufficiently large for LAPACK.
+ #
+ # This checks that _compute_lwork() correctly works around a bug in
+ # LAPACK versions older than 3.10.1.
+
+ sgesdd_lwork = get_lapack_funcs('gesdd_lwork', dtype=np.float32,
+ ilp64='preferred')
+ n = 9537
+ lwork = _compute_lwork(sgesdd_lwork, n, n,
+ compute_uv=True, full_matrices=True)
+ # If we called the Fortran function SGESDD directly with IWORK=-1, the
+ # LAPACK bug would result in lwork being 272929856, which was too small.
+ # (The result was returned in a single precision float, which does not
+ # have sufficient precision to represent the exact integer value that it
+ # computed internally.) The work-around implemented in _compute_lwork()
+ # will convert that to 272929888. If we are using LAPACK 3.10.1 or later
+ # (such as in OpenBLAS 0.3.21 or later), the work-around will return
+ # 272929920, because it does not know which version of LAPACK is being
+ # used, so it always applies the correction to whatever it is given. We
+ # will accept either 272929888 or 272929920.
+ # Note that the acceptable values are a LAPACK implementation detail.
+ # If a future version of LAPACK changes how SGESDD works, and therefore
+ # changes the required LWORK size, the acceptable values might have to
+ # be updated.
+ assert lwork == 272929888 or lwork == 272929920
+
+
+class TestSytrd:
+ @pytest.mark.parametrize('dtype', REAL_DTYPES)
+ def test_sytrd_with_zero_dim_array(self, dtype):
+ # Assert that a 0x0 matrix raises an error
+ A = np.zeros((0, 0), dtype=dtype)
+ sytrd = get_lapack_funcs('sytrd', (A,))
+ assert_raises(ValueError, sytrd, A)
+
+ @pytest.mark.parametrize('dtype', REAL_DTYPES)
+ @pytest.mark.parametrize('n', (1, 3))
+ def test_sytrd(self, dtype, n):
+ A = np.zeros((n, n), dtype=dtype)
+
+ sytrd, sytrd_lwork = \
+ get_lapack_funcs(('sytrd', 'sytrd_lwork'), (A,))
+
+ # some upper triangular array
+ A[np.triu_indices_from(A)] = \
+ np.arange(1, n*(n+1)//2+1, dtype=dtype)
+
+ # query lwork
+ lwork, info = sytrd_lwork(n)
+ assert_equal(info, 0)
+
+ # check lower=1 behavior (shouldn't do much since the matrix is
+ # upper triangular)
+ data, d, e, tau, info = sytrd(A, lower=1, lwork=lwork)
+ assert_equal(info, 0)
+
+ assert_allclose(data, A, atol=5*np.finfo(dtype).eps, rtol=1.0)
+ assert_allclose(d, np.diag(A))
+ assert_allclose(e, 0.0)
+ assert_allclose(tau, 0.0)
+
+ # and now for the proper test (lower=0 is the default)
+ data, d, e, tau, info = sytrd(A, lwork=lwork)
+ assert_equal(info, 0)
+
+ # assert Q^T*A*Q = tridiag(e, d, e)
+
+ # build tridiagonal matrix
+ T = np.zeros_like(A, dtype=dtype)
+ k = np.arange(A.shape[0])
+ T[k, k] = d
+ k2 = np.arange(A.shape[0]-1)
+ T[k2+1, k2] = e
+ T[k2, k2+1] = e
+
+ # build Q
+ Q = np.eye(n, n, dtype=dtype)
+ for i in range(n-1):
+ v = np.zeros(n, dtype=dtype)
+ v[:i] = data[:i, i+1]
+ v[i] = 1.0
+ H = np.eye(n, n, dtype=dtype) - tau[i] * np.outer(v, v)
+ Q = np.dot(H, Q)
+
+ # Make matrix fully symmetric
+ i_lower = np.tril_indices(n, -1)
+ A[i_lower] = A.T[i_lower]
+
+ QTAQ = np.dot(Q.T, np.dot(A, Q))
+
+ # disable rtol here since some values in QTAQ and T are very close
+ # to 0.
+ assert_allclose(QTAQ, T, atol=5*np.finfo(dtype).eps, rtol=1.0)
+
+
+class TestHetrd:
+ @pytest.mark.parametrize('complex_dtype', COMPLEX_DTYPES)
+ def test_hetrd_with_zero_dim_array(self, complex_dtype):
+ # Assert that a 0x0 matrix raises an error
+ A = np.zeros((0, 0), dtype=complex_dtype)
+ hetrd = get_lapack_funcs('hetrd', (A,))
+ assert_raises(ValueError, hetrd, A)
+
+ @pytest.mark.parametrize('real_dtype,complex_dtype',
+ zip(REAL_DTYPES, COMPLEX_DTYPES))
+ @pytest.mark.parametrize('n', (1, 3))
+ def test_hetrd(self, n, real_dtype, complex_dtype):
+ A = np.zeros((n, n), dtype=complex_dtype)
+ hetrd, hetrd_lwork = \
+ get_lapack_funcs(('hetrd', 'hetrd_lwork'), (A,))
+
+ # some upper triangular array
+ A[np.triu_indices_from(A)] = (
+ np.arange(1, n*(n+1)//2+1, dtype=real_dtype)
+ + 1j * np.arange(1, n*(n+1)//2+1, dtype=real_dtype)
+ )
+ np.fill_diagonal(A, np.real(np.diag(A)))
+
+ # test query lwork
+ for x in [0, 1]:
+ _, info = hetrd_lwork(n, lower=x)
+ assert_equal(info, 0)
+ # lwork returns complex which segfaults hetrd call (gh-10388)
+ # use the safe and recommended option
+ lwork = _compute_lwork(hetrd_lwork, n)
+
+ # check lower=1 behavior (shouldn't do much since the matrix is
+ # upper triangular)
+ data, d, e, tau, info = hetrd(A, lower=1, lwork=lwork)
+ assert_equal(info, 0)
+
+ assert_allclose(data, A, atol=5*np.finfo(real_dtype).eps, rtol=1.0)
+
+ assert_allclose(d, np.real(np.diag(A)))
+ assert_allclose(e, 0.0)
+ assert_allclose(tau, 0.0)
+
+ # and now for the proper test (lower=0 is the default)
+ data, d, e, tau, info = hetrd(A, lwork=lwork)
+ assert_equal(info, 0)
+
+ # assert Q^T*A*Q = tridiag(e, d, e)
+
+ # build tridiagonal matrix
+ T = np.zeros_like(A, dtype=real_dtype)
+ k = np.arange(A.shape[0], dtype=int)
+ T[k, k] = d
+ k2 = np.arange(A.shape[0]-1, dtype=int)
+ T[k2+1, k2] = e
+ T[k2, k2+1] = e
+
+ # build Q
+ Q = np.eye(n, n, dtype=complex_dtype)
+ for i in range(n-1):
+ v = np.zeros(n, dtype=complex_dtype)
+ v[:i] = data[:i, i+1]
+ v[i] = 1.0
+ H = np.eye(n, n, dtype=complex_dtype) \
+ - tau[i] * np.outer(v, np.conj(v))
+ Q = np.dot(H, Q)
+
+ # Make matrix fully Hermitian
+ i_lower = np.tril_indices(n, -1)
+ A[i_lower] = np.conj(A.T[i_lower])
+
+ QHAQ = np.dot(np.conj(Q.T), np.dot(A, Q))
+
+ # disable rtol here since some values in QTAQ and T are very close
+ # to 0.
+ assert_allclose(
+ QHAQ, T, atol=10*np.finfo(real_dtype).eps, rtol=1.0
+ )
+
+
+def test_gglse():
+ # Example data taken from NAG manual
+ for ind, dtype in enumerate(DTYPES):
+ # DTYPES = gglse
+ func, func_lwork = get_lapack_funcs(('gglse', 'gglse_lwork'),
+ dtype=dtype)
+ lwork = _compute_lwork(func_lwork, m=6, n=4, p=2)
+ # For gglse
+ if ind < 2:
+ a = np.array([[-0.57, -1.28, -0.39, 0.25],
+ [-1.93, 1.08, -0.31, -2.14],
+ [2.30, 0.24, 0.40, -0.35],
+ [-1.93, 0.64, -0.66, 0.08],
+ [0.15, 0.30, 0.15, -2.13],
+ [-0.02, 1.03, -1.43, 0.50]], dtype=dtype)
+ c = np.array([-1.50, -2.14, 1.23, -0.54, -1.68, 0.82], dtype=dtype)
+ d = np.array([0., 0.], dtype=dtype)
+ # For gglse
+ else:
+ a = np.array([[0.96-0.81j, -0.03+0.96j, -0.91+2.06j, -0.05+0.41j],
+ [-0.98+1.98j, -1.20+0.19j, -0.66+0.42j, -0.81+0.56j],
+ [0.62-0.46j, 1.01+0.02j, 0.63-0.17j, -1.11+0.60j],
+ [0.37+0.38j, 0.19-0.54j, -0.98-0.36j, 0.22-0.20j],
+ [0.83+0.51j, 0.20+0.01j, -0.17-0.46j, 1.47+1.59j],
+ [1.08-0.28j, 0.20-0.12j, -0.07+1.23j, 0.26+0.26j]])
+ c = np.array([[-2.54+0.09j],
+ [1.65-2.26j],
+ [-2.11-3.96j],
+ [1.82+3.30j],
+ [-6.41+3.77j],
+ [2.07+0.66j]])
+ d = np.zeros(2, dtype=dtype)
+
+ b = np.array([[1., 0., -1., 0.], [0., 1., 0., -1.]], dtype=dtype)
+
+ _, _, _, result, _ = func(a, b, c, d, lwork=lwork)
+ if ind < 2:
+ expected = np.array([0.48904455,
+ 0.99754786,
+ 0.48904455,
+ 0.99754786])
+ else:
+ expected = np.array([1.08742917-1.96205783j,
+ -0.74093902+3.72973919j,
+ 1.08742917-1.96205759j,
+ -0.74093896+3.72973895j])
+ assert_array_almost_equal(result, expected, decimal=4)
+
+
+def test_sycon_hecon():
+ seed(1234)
+ for ind, dtype in enumerate(DTYPES+COMPLEX_DTYPES):
+ # DTYPES + COMPLEX DTYPES = sycon + sycon
+ if ind < 4:
+ func_lwork = get_lapack_funcs('sytrf_lwork', dtype=dtype)
+ funcon, functrf = get_lapack_funcs(('sycon', 'sytrf'), dtype=dtype)
+ A = (rand(n, n)).astype(dtype)
+ # For sygst
+ n = 10
+
+ potrf, sygst, syevd, sygvd = get_lapack_funcs(('potrf', 'sygst',
+ 'syevd', 'sygvd'),
+ dtype=dtype)
+
+ A = rand(n, n).astype(dtype)
+ A = (A + A.T)/2
+ # B must be positive definite
+ B = rand(n, n).astype(dtype)
+ B = (B + B.T)/2 + 2 * np.eye(n, dtype=dtype)
+
+ # Perform eig (sygvd)
+ eig_gvd, _, info = sygvd(A, B)
+ assert_(info == 0)
+
+ # Convert to std problem potrf
+ b, info = potrf(B)
+ assert_(info == 0)
+ a, info = sygst(A, b)
+ assert_(info == 0)
+
+ eig, _, info = syevd(a)
+ assert_(info == 0)
+ assert_allclose(eig, eig_gvd, rtol=1.2e-4)
+
+
+def test_hegst():
+ seed(1234)
+ for ind, dtype in enumerate(COMPLEX_DTYPES):
+ # DTYPES = pstrf
+ n = 10
+ r = 2
+ pstrf = get_lapack_funcs('pstrf', dtype=dtype)
+
+ # Create positive semidefinite A
+ if ind > 1:
+ A = rand(n, n-r).astype(dtype) + 1j * rand(n, n-r).astype(dtype)
+ A = A @ A.conj().T
+ else:
+ A = rand(n, n-r).astype(dtype)
+ A = A @ A.T
+
+ c, piv, r_c, info = pstrf(A)
+ U = triu(c)
+ U[r_c - n:, r_c - n:] = 0.
+
+ assert_equal(info, 1)
+ # python-dbg 3.5.2 runs cause trouble with the following assertion.
+ # assert_equal(r_c, n - r)
+ single_atol = 1000 * np.finfo(np.float32).eps
+ double_atol = 1000 * np.finfo(np.float64).eps
+ atol = single_atol if ind in [0, 2] else double_atol
+ assert_allclose(A[piv-1][:, piv-1], U.conj().T @ U, rtol=0., atol=atol)
+
+ c, piv, r_c, info = pstrf(A, lower=1)
+ L = tril(c)
+ L[r_c - n:, r_c - n:] = 0.
+
+ assert_equal(info, 1)
+ # assert_equal(r_c, n - r)
+ single_atol = 1000 * np.finfo(np.float32).eps
+ double_atol = 1000 * np.finfo(np.float64).eps
+ atol = single_atol if ind in [0, 2] else double_atol
+ assert_allclose(A[piv-1][:, piv-1], L @ L.conj().T, rtol=0., atol=atol)
+
+
+def test_pstf2():
+ seed(1234)
+ for ind, dtype in enumerate(DTYPES):
+ # DTYPES = pstf2
+ n = 10
+ r = 2
+ pstf2 = get_lapack_funcs('pstf2', dtype=dtype)
+
+ # Create positive semidefinite A
+ if ind > 1:
+ A = rand(n, n-r).astype(dtype) + 1j * rand(n, n-r).astype(dtype)
+ A = A @ A.conj().T
+ else:
+ A = rand(n, n-r).astype(dtype)
+ A = A @ A.T
+
+ c, piv, r_c, info = pstf2(A)
+ U = triu(c)
+ U[r_c - n:, r_c - n:] = 0.
+
+ assert_equal(info, 1)
+ # python-dbg 3.5.2 runs cause trouble with the commented assertions.
+ # assert_equal(r_c, n - r)
+ single_atol = 1000 * np.finfo(np.float32).eps
+ double_atol = 1000 * np.finfo(np.float64).eps
+ atol = single_atol if ind in [0, 2] else double_atol
+ assert_allclose(A[piv-1][:, piv-1], U.conj().T @ U, rtol=0., atol=atol)
+
+ c, piv, r_c, info = pstf2(A, lower=1)
+ L = tril(c)
+ L[r_c - n:, r_c - n:] = 0.
+
+ assert_equal(info, 1)
+ # assert_equal(r_c, n - r)
+ single_atol = 1000 * np.finfo(np.float32).eps
+ double_atol = 1000 * np.finfo(np.float64).eps
+ atol = single_atol if ind in [0, 2] else double_atol
+ assert_allclose(A[piv-1][:, piv-1], L @ L.conj().T, rtol=0., atol=atol)
+
+
+def test_geequ():
+ desired_real = np.array([[0.6250, 1.0000, 0.0393, -0.4269],
+ [1.0000, -0.5619, -1.0000, -1.0000],
+ [0.5874, -1.0000, -0.0596, -0.5341],
+ [-1.0000, -0.5946, -0.0294, 0.9957]])
+
+ desired_cplx = np.array([[-0.2816+0.5359*1j,
+ 0.0812+0.9188*1j,
+ -0.7439-0.2561*1j],
+ [-0.3562-0.2954*1j,
+ 0.9566-0.0434*1j,
+ -0.0174+0.1555*1j],
+ [0.8607+0.1393*1j,
+ -0.2759+0.7241*1j,
+ -0.1642-0.1365*1j]])
+
+ for ind, dtype in enumerate(DTYPES):
+ if ind < 2:
+ # Use examples from the NAG documentation
+ A = np.array([[1.80e+10, 2.88e+10, 2.05e+00, -8.90e+09],
+ [5.25e+00, -2.95e+00, -9.50e-09, -3.80e+00],
+ [1.58e+00, -2.69e+00, -2.90e-10, -1.04e+00],
+ [-1.11e+00, -6.60e-01, -5.90e-11, 8.00e-01]])
+ A = A.astype(dtype)
+ else:
+ A = np.array([[-1.34e+00, 0.28e+10, -6.39e+00],
+ [-1.70e+00, 3.31e+10, -0.15e+00],
+ [2.41e-10, -0.56e+00, -0.83e-10]], dtype=dtype)
+ A += np.array([[2.55e+00, 3.17e+10, -2.20e+00],
+ [-1.41e+00, -0.15e+10, 1.34e+00],
+ [0.39e-10, 1.47e+00, -0.69e-10]])*1j
+
+ A = A.astype(dtype)
+
+ geequ = get_lapack_funcs('geequ', dtype=dtype)
+ r, c, rowcnd, colcnd, amax, info = geequ(A)
+
+ if ind < 2:
+ assert_allclose(desired_real.astype(dtype), r[:, None]*A*c,
+ rtol=0, atol=1e-4)
+ else:
+ assert_allclose(desired_cplx.astype(dtype), r[:, None]*A*c,
+ rtol=0, atol=1e-4)
+
+
+def test_syequb():
+ desired_log2s = np.array([0, 0, 0, 0, 0, 0, -1, -1, -2, -3])
+
+ for ind, dtype in enumerate(DTYPES):
+ A = np.eye(10, dtype=dtype)
+ alpha = dtype(1. if ind < 2 else 1.j)
+ d = np.array([alpha * 2.**x for x in range(-5, 5)], dtype=dtype)
+ A += np.rot90(np.diag(d))
+
+ syequb = get_lapack_funcs('syequb', dtype=dtype)
+ s, scond, amax, info = syequb(A)
+
+ assert_equal(np.log2(s).astype(int), desired_log2s)
+
+
+@pytest.mark.skipif(True,
+ reason="Failing on some OpenBLAS version, see gh-12276")
+def test_heequb():
+ # zheequb has a bug for versions =< LAPACK 3.9.0
+ # See Reference-LAPACK gh-61 and gh-408
+ # Hence the zheequb test is customized accordingly to avoid
+ # work scaling.
+ A = np.diag([2]*5 + [1002]*5) + np.diag(np.ones(9), k=1)*1j
+ s, scond, amax, info = lapack.zheequb(A)
+ assert_equal(info, 0)
+ assert_allclose(np.log2(s), [0., -1.]*2 + [0.] + [-4]*5)
+
+ A = np.diag(2**np.abs(np.arange(-5, 6)) + 0j)
+ A[5, 5] = 1024
+ A[5, 0] = 16j
+ s, scond, amax, info = lapack.cheequb(A.astype(np.complex64), lower=1)
+ assert_equal(info, 0)
+ assert_allclose(np.log2(s), [-2, -1, -1, 0, 0, -5, 0, -1, -1, -2, -2])
+
+
+def test_getc2_gesc2():
+ np.random.seed(42)
+ n = 10
+ desired_real = np.random.rand(n)
+ desired_cplx = np.random.rand(n) + np.random.rand(n)*1j
+
+ for ind, dtype in enumerate(DTYPES):
+ if ind < 2:
+ A = np.random.rand(n, n)
+ A = A.astype(dtype)
+ b = A @ desired_real
+ b = b.astype(dtype)
+ else:
+ A = np.random.rand(n, n) + np.random.rand(n, n)*1j
+ A = A.astype(dtype)
+ b = A @ desired_cplx
+ b = b.astype(dtype)
+
+ getc2 = get_lapack_funcs('getc2', dtype=dtype)
+ gesc2 = get_lapack_funcs('gesc2', dtype=dtype)
+ lu, ipiv, jpiv, info = getc2(A, overwrite_a=0)
+ x, scale = gesc2(lu, b, ipiv, jpiv, overwrite_rhs=0)
+
+ if ind < 2:
+ assert_array_almost_equal(desired_real.astype(dtype),
+ x/scale, decimal=4)
+ else:
+ assert_array_almost_equal(desired_cplx.astype(dtype),
+ x/scale, decimal=4)
+
+
+@pytest.mark.parametrize('size', [(6, 5), (5, 5)])
+@pytest.mark.parametrize('dtype', REAL_DTYPES)
+@pytest.mark.parametrize('joba', range(6)) # 'C', 'E', 'F', 'G', 'A', 'R'
+@pytest.mark.parametrize('jobu', range(4)) # 'U', 'F', 'W', 'N'
+@pytest.mark.parametrize('jobv', range(4)) # 'V', 'J', 'W', 'N'
+@pytest.mark.parametrize('jobr', [0, 1])
+@pytest.mark.parametrize('jobp', [0, 1])
+def test_gejsv_general(size, dtype, joba, jobu, jobv, jobr, jobp, jobt=0):
+ """Test the lapack routine ?gejsv.
+
+ This function tests that a singular value decomposition can be performed
+ on the random M-by-N matrix A. The test performs the SVD using ?gejsv
+ then performs the following checks:
+
+ * ?gejsv exist successfully (info == 0)
+ * The returned singular values are correct
+ * `A` can be reconstructed from `u`, `SIGMA`, `v`
+ * Ensure that u.T @ u is the identity matrix
+ * Ensure that v.T @ v is the identity matrix
+ * The reported matrix rank
+ * The reported number of singular values
+ * If denormalized floats are required
+
+ Notes
+ -----
+ joba specifies several choices effecting the calculation's accuracy
+ Although all arguments are tested, the tests only check that the correct
+ solution is returned - NOT that the prescribed actions are performed
+ internally.
+
+ jobt is, as of v3.9.0, still experimental and removed to cut down number of
+ test cases. However keyword itself is tested externally.
+ """
+ seed(42)
+
+ # Define some constants for later use:
+ m, n = size
+ atol = 100 * np.finfo(dtype).eps
+ A = generate_random_dtype_array(size, dtype)
+ gejsv = get_lapack_funcs('gejsv', dtype=dtype)
+
+ # Set up checks for invalid job? combinations
+ # if an invalid combination occurs we set the appropriate
+ # exit status.
+ lsvec = jobu < 2 # Calculate left singular vectors
+ rsvec = jobv < 2 # Calculate right singular vectors
+ l2tran = (jobt == 1) and (m == n)
+ is_complex = np.iscomplexobj(A)
+
+ invalid_real_jobv = (jobv == 1) and (not lsvec) and (not is_complex)
+ invalid_cplx_jobu = (jobu == 2) and not (rsvec and l2tran) and is_complex
+ invalid_cplx_jobv = (jobv == 2) and not (lsvec and l2tran) and is_complex
+
+ # Set the exit status to the expected value.
+ # Here we only check for invalid combinations, not individual
+ # parameters.
+ if invalid_cplx_jobu:
+ exit_status = -2
+ elif invalid_real_jobv or invalid_cplx_jobv:
+ exit_status = -3
+ else:
+ exit_status = 0
+
+ if (jobu > 1) and (jobv == 1):
+ assert_raises(Exception, gejsv, A, joba, jobu, jobv, jobr, jobt, jobp)
+ else:
+ sva, u, v, work, iwork, info = gejsv(A,
+ joba=joba,
+ jobu=jobu,
+ jobv=jobv,
+ jobr=jobr,
+ jobt=jobt,
+ jobp=jobp)
+
+ # Check that ?gejsv exited successfully/as expected
+ assert_equal(info, exit_status)
+
+ # If exit_status is non-zero the combination of jobs is invalid.
+ # We test this above but no calculations are performed.
+ if not exit_status:
+
+ # Check the returned singular values
+ sigma = (work[0] / work[1]) * sva[:n]
+ assert_allclose(sigma, svd(A, compute_uv=False), atol=atol)
+
+ if jobu == 1:
+ # If JOBU = 'F', then u contains the M-by-M matrix of
+ # the left singular vectors, including an ONB of the orthogonal
+ # complement of the Range(A)
+ # However, to recalculate A we are concerned about the
+ # first n singular values and so can ignore the latter.
+ # TODO: Add a test for ONB?
+ u = u[:, :n]
+
+ if lsvec and rsvec:
+ assert_allclose(u @ np.diag(sigma) @ v.conj().T, A, atol=atol)
+ if lsvec:
+ assert_allclose(u.conj().T @ u, np.identity(n), atol=atol)
+ if rsvec:
+ assert_allclose(v.conj().T @ v, np.identity(n), atol=atol)
+
+ assert_equal(iwork[0], np.linalg.matrix_rank(A))
+ assert_equal(iwork[1], np.count_nonzero(sigma))
+ # iwork[2] is non-zero if requested accuracy is not warranted for
+ # the data. This should never occur for these tests.
+ assert_equal(iwork[2], 0)
+
+
+@pytest.mark.parametrize('dtype', REAL_DTYPES)
+def test_gejsv_edge_arguments(dtype):
+ """Test edge arguments return expected status"""
+ gejsv = get_lapack_funcs('gejsv', dtype=dtype)
+
+ # scalar A
+ sva, u, v, work, iwork, info = gejsv(1.)
+ assert_equal(info, 0)
+ assert_equal(u.shape, (1, 1))
+ assert_equal(v.shape, (1, 1))
+ assert_equal(sva, np.array([1.], dtype=dtype))
+
+ # 1d A
+ A = np.ones((1,), dtype=dtype)
+ sva, u, v, work, iwork, info = gejsv(A)
+ assert_equal(info, 0)
+ assert_equal(u.shape, (1, 1))
+ assert_equal(v.shape, (1, 1))
+ assert_equal(sva, np.array([1.], dtype=dtype))
+
+ # 2d empty A
+ A = np.ones((1, 0), dtype=dtype)
+ sva, u, v, work, iwork, info = gejsv(A)
+ assert_equal(info, 0)
+ assert_equal(u.shape, (1, 0))
+ assert_equal(v.shape, (1, 0))
+ assert_equal(sva, np.array([], dtype=dtype))
+
+ # make sure "overwrite_a" is respected - user reported in gh-13191
+ A = np.sin(np.arange(100).reshape(10, 10)).astype(dtype)
+ A = np.asfortranarray(A + A.T) # make it symmetric and column major
+ Ac = A.copy('A')
+ _ = gejsv(A)
+ assert_allclose(A, Ac)
+
+
+@pytest.mark.parametrize(('kwargs'),
+ ({'joba': 9},
+ {'jobu': 9},
+ {'jobv': 9},
+ {'jobr': 9},
+ {'jobt': 9},
+ {'jobp': 9})
+ )
+def test_gejsv_invalid_job_arguments(kwargs):
+ """Test invalid job arguments raise an Exception"""
+ A = np.ones((2, 2), dtype=float)
+ gejsv = get_lapack_funcs('gejsv', dtype=float)
+ assert_raises(Exception, gejsv, A, **kwargs)
+
+
+@pytest.mark.parametrize("A,sva_expect,u_expect,v_expect",
+ [(np.array([[2.27, -1.54, 1.15, -1.94],
+ [0.28, -1.67, 0.94, -0.78],
+ [-0.48, -3.09, 0.99, -0.21],
+ [1.07, 1.22, 0.79, 0.63],
+ [-2.35, 2.93, -1.45, 2.30],
+ [0.62, -7.39, 1.03, -2.57]]),
+ np.array([9.9966, 3.6831, 1.3569, 0.5000]),
+ np.array([[0.2774, -0.6003, -0.1277, 0.1323],
+ [0.2020, -0.0301, 0.2805, 0.7034],
+ [0.2918, 0.3348, 0.6453, 0.1906],
+ [-0.0938, -0.3699, 0.6781, -0.5399],
+ [-0.4213, 0.5266, 0.0413, -0.0575],
+ [0.7816, 0.3353, -0.1645, -0.3957]]),
+ np.array([[0.1921, -0.8030, 0.0041, -0.5642],
+ [-0.8794, -0.3926, -0.0752, 0.2587],
+ [0.2140, -0.2980, 0.7827, 0.5027],
+ [-0.3795, 0.3351, 0.6178, -0.6017]]))])
+def test_gejsv_NAG(A, sva_expect, u_expect, v_expect):
+ """
+ This test implements the example found in the NAG manual, f08khf.
+ An example was not found for the complex case.
+ """
+ # NAG manual provides accuracy up to 4 decimals
+ atol = 1e-4
+ gejsv = get_lapack_funcs('gejsv', dtype=A.dtype)
+
+ sva, u, v, work, iwork, info = gejsv(A)
+
+ assert_allclose(sva_expect, sva, atol=atol)
+ assert_allclose(u_expect, u, atol=atol)
+ assert_allclose(v_expect, v, atol=atol)
+
+
+@pytest.mark.parametrize("dtype", DTYPES)
+def test_gttrf_gttrs(dtype):
+ # The test uses ?gttrf and ?gttrs to solve a random system for each dtype,
+ # tests that the output of ?gttrf define LU matrices, that input
+ # parameters are unmodified, transposal options function correctly, that
+ # incompatible matrix shapes raise an error, and singular matrices return
+ # non zero info.
+
+ seed(42)
+ n = 10
+ atol = 100 * np.finfo(dtype).eps
+
+ # create the matrix in accordance with the data type
+ du = generate_random_dtype_array((n-1,), dtype=dtype)
+ d = generate_random_dtype_array((n,), dtype=dtype)
+ dl = generate_random_dtype_array((n-1,), dtype=dtype)
+
+ diag_cpy = [dl.copy(), d.copy(), du.copy()]
+
+ A = np.diag(d) + np.diag(dl, -1) + np.diag(du, 1)
+ x = np.random.rand(n)
+ b = A @ x
+
+ gttrf, gttrs = get_lapack_funcs(('gttrf', 'gttrs'), dtype=dtype)
+
+ _dl, _d, _du, du2, ipiv, info = gttrf(dl, d, du)
+ # test to assure that the inputs of ?gttrf are unmodified
+ assert_array_equal(dl, diag_cpy[0])
+ assert_array_equal(d, diag_cpy[1])
+ assert_array_equal(du, diag_cpy[2])
+
+ # generate L and U factors from ?gttrf return values
+ # L/U are lower/upper triangular by construction (initially and at end)
+ U = np.diag(_d, 0) + np.diag(_du, 1) + np.diag(du2, 2)
+ L = np.eye(n, dtype=dtype)
+
+ for i, m in enumerate(_dl):
+ # L is given in a factored form.
+ # See
+ # www.hpcavf.uclan.ac.uk/softwaredoc/sgi_scsl_html/sgi_html/ch03.html
+ piv = ipiv[i] - 1
+ # right multiply by permutation matrix
+ L[:, [i, piv]] = L[:, [piv, i]]
+ # right multiply by Li, rank-one modification of identity
+ L[:, i] += L[:, i+1]*m
+
+ # one last permutation
+ i, piv = -1, ipiv[-1] - 1
+ # right multiply by final permutation matrix
+ L[:, [i, piv]] = L[:, [piv, i]]
+
+ # check that the outputs of ?gttrf define an LU decomposition of A
+ assert_allclose(A, L @ U, atol=atol)
+
+ b_cpy = b.copy()
+ x_gttrs, info = gttrs(_dl, _d, _du, du2, ipiv, b)
+ # test that the inputs of ?gttrs are unmodified
+ assert_array_equal(b, b_cpy)
+ # test that the result of ?gttrs matches the expected input
+ assert_allclose(x, x_gttrs, atol=atol)
+
+ # test that ?gttrf and ?gttrs work with transposal options
+ if dtype in REAL_DTYPES:
+ trans = "T"
+ b_trans = A.T @ x
+ else:
+ trans = "C"
+ b_trans = A.conj().T @ x
+
+ x_gttrs, info = gttrs(_dl, _d, _du, du2, ipiv, b_trans, trans=trans)
+ assert_allclose(x, x_gttrs, atol=atol)
+
+ # test that ValueError is raised with incompatible matrix shapes
+ with assert_raises(ValueError):
+ gttrf(dl[:-1], d, du)
+ with assert_raises(ValueError):
+ gttrf(dl, d[:-1], du)
+ with assert_raises(ValueError):
+ gttrf(dl, d, du[:-1])
+
+ # test that matrix of size n=2 raises exception
+ with assert_raises(Exception):
+ gttrf(dl[0], d[:1], du[0])
+
+ # test that singular (row of all zeroes) matrix fails via info
+ du[0] = 0
+ d[0] = 0
+ __dl, __d, __du, _du2, _ipiv, _info = gttrf(dl, d, du)
+ np.testing.assert_(__d[info - 1] == 0,
+ "?gttrf: _d[info-1] is {}, not the illegal value :0."
+ .format(__d[info - 1]))
+
+
+@pytest.mark.parametrize("du, d, dl, du_exp, d_exp, du2_exp, ipiv_exp, b, x",
+ [(np.array([2.1, -1.0, 1.9, 8.0]),
+ np.array([3.0, 2.3, -5.0, -.9, 7.1]),
+ np.array([3.4, 3.6, 7.0, -6.0]),
+ np.array([2.3, -5, -.9, 7.1]),
+ np.array([3.4, 3.6, 7, -6, -1.015373]),
+ np.array([-1, 1.9, 8]),
+ np.array([2, 3, 4, 5, 5]),
+ np.array([[2.7, 6.6],
+ [-0.5, 10.8],
+ [2.6, -3.2],
+ [0.6, -11.2],
+ [2.7, 19.1]
+ ]),
+ np.array([[-4, 5],
+ [7, -4],
+ [3, -3],
+ [-4, -2],
+ [-3, 1]])),
+ (
+ np.array([2 - 1j, 2 + 1j, -1 + 1j, 1 - 1j]),
+ np.array([-1.3 + 1.3j, -1.3 + 1.3j,
+ -1.3 + 3.3j, - .3 + 4.3j,
+ -3.3 + 1.3j]),
+ np.array([1 - 2j, 1 + 1j, 2 - 3j, 1 + 1j]),
+ # du exp
+ np.array([-1.3 + 1.3j, -1.3 + 3.3j,
+ -0.3 + 4.3j, -3.3 + 1.3j]),
+ np.array([1 - 2j, 1 + 1j, 2 - 3j, 1 + 1j,
+ -1.3399 + 0.2875j]),
+ np.array([2 + 1j, -1 + 1j, 1 - 1j]),
+ np.array([2, 3, 4, 5, 5]),
+ np.array([[2.4 - 5j, 2.7 + 6.9j],
+ [3.4 + 18.2j, - 6.9 - 5.3j],
+ [-14.7 + 9.7j, - 6 - .6j],
+ [31.9 - 7.7j, -3.9 + 9.3j],
+ [-1 + 1.6j, -3 + 12.2j]]),
+ np.array([[1 + 1j, 2 - 1j],
+ [3 - 1j, 1 + 2j],
+ [4 + 5j, -1 + 1j],
+ [-1 - 2j, 2 + 1j],
+ [1 - 1j, 2 - 2j]])
+ )])
+def test_gttrf_gttrs_NAG_f07cdf_f07cef_f07crf_f07csf(du, d, dl, du_exp, d_exp,
+ du2_exp, ipiv_exp, b, x):
+ # test to assure that wrapper is consistent with NAG Library Manual Mark 26
+ # example problems: f07cdf and f07cef (real)
+ # examples: f07crf and f07csf (complex)
+ # (Links may expire, so search for "NAG Library Manual Mark 26" online)
+
+ gttrf, gttrs = get_lapack_funcs(('gttrf', "gttrs"), (du[0], du[0]))
+
+ _dl, _d, _du, du2, ipiv, info = gttrf(dl, d, du)
+ assert_allclose(du2, du2_exp)
+ assert_allclose(_du, du_exp)
+ assert_allclose(_d, d_exp, atol=1e-4) # NAG examples provide 4 decimals.
+ assert_allclose(ipiv, ipiv_exp)
+
+ x_gttrs, info = gttrs(_dl, _d, _du, du2, ipiv, b)
+
+ assert_allclose(x_gttrs, x)
+
+
+@pytest.mark.parametrize('dtype', DTYPES)
+@pytest.mark.parametrize('shape', [(3, 7), (7, 3), (2**18, 2**18)])
+def test_geqrfp_lwork(dtype, shape):
+ geqrfp_lwork = get_lapack_funcs(('geqrfp_lwork'), dtype=dtype)
+ m, n = shape
+ lwork, info = geqrfp_lwork(m=m, n=n)
+ assert_equal(info, 0)
+
+
+@pytest.mark.parametrize("ddtype,dtype",
+ zip(REAL_DTYPES + REAL_DTYPES, DTYPES))
+def test_pttrf_pttrs(ddtype, dtype):
+ seed(42)
+ # set test tolerance appropriate for dtype
+ atol = 100*np.finfo(dtype).eps
+ # n is the length diagonal of A
+ n = 10
+ # create diagonals according to size and dtype
+
+ # diagonal d should always be real.
+ # add 4 to d so it will be dominant for all dtypes
+ d = generate_random_dtype_array((n,), ddtype) + 4
+ # diagonal e may be real or complex.
+ e = generate_random_dtype_array((n-1,), dtype)
+
+ # assemble diagonals together into matrix
+ A = np.diag(d) + np.diag(e, -1) + np.diag(np.conj(e), 1)
+ # store a copy of diagonals to later verify
+ diag_cpy = [d.copy(), e.copy()]
+
+ pttrf = get_lapack_funcs('pttrf', dtype=dtype)
+
+ _d, _e, info = pttrf(d, e)
+ # test to assure that the inputs of ?pttrf are unmodified
+ assert_array_equal(d, diag_cpy[0])
+ assert_array_equal(e, diag_cpy[1])
+ assert_equal(info, 0, err_msg=f"pttrf: info = {info}, should be 0")
+
+ # test that the factors from pttrf can be recombined to make A
+ L = np.diag(_e, -1) + np.diag(np.ones(n))
+ D = np.diag(_d)
+
+ assert_allclose(A, L@D@L.conjugate().T, atol=atol)
+
+ # generate random solution x
+ x = generate_random_dtype_array((n,), dtype)
+ # determine accompanying b to get soln x
+ b = A@x
+
+ # determine _x from pttrs
+ pttrs = get_lapack_funcs('pttrs', dtype=dtype)
+ _x, info = pttrs(_d, _e.conj(), b)
+ assert_equal(info, 0, err_msg=f"pttrs: info = {info}, should be 0")
+
+ # test that _x from pttrs matches the expected x
+ assert_allclose(x, _x, atol=atol)
+
+
+@pytest.mark.parametrize("ddtype,dtype",
+ zip(REAL_DTYPES + REAL_DTYPES, DTYPES))
+def test_pttrf_pttrs_errors_incompatible_shape(ddtype, dtype):
+ n = 10
+ pttrf = get_lapack_funcs('pttrf', dtype=dtype)
+ d = generate_random_dtype_array((n,), ddtype) + 2
+ e = generate_random_dtype_array((n-1,), dtype)
+ # test that ValueError is raised with incompatible matrix shapes
+ assert_raises(ValueError, pttrf, d[:-1], e)
+ assert_raises(ValueError, pttrf, d, e[:-1])
+
+
+@pytest.mark.parametrize("ddtype,dtype",
+ zip(REAL_DTYPES + REAL_DTYPES, DTYPES))
+def test_pttrf_pttrs_errors_singular_nonSPD(ddtype, dtype):
+ n = 10
+ pttrf = get_lapack_funcs('pttrf', dtype=dtype)
+ d = generate_random_dtype_array((n,), ddtype) + 2
+ e = generate_random_dtype_array((n-1,), dtype)
+ # test that singular (row of all zeroes) matrix fails via info
+ d[0] = 0
+ e[0] = 0
+ _d, _e, info = pttrf(d, e)
+ assert_equal(_d[info - 1], 0,
+ f"?pttrf: _d[info-1] is {_d[info - 1]}, not the illegal value :0.")
+
+ # test with non-spd matrix
+ d = generate_random_dtype_array((n,), ddtype)
+ _d, _e, info = pttrf(d, e)
+ assert_(info != 0, "?pttrf should fail with non-spd matrix, but didn't")
+
+
+@pytest.mark.parametrize(("d, e, d_expect, e_expect, b, x_expect"), [
+ (np.array([4, 10, 29, 25, 5]),
+ np.array([-2, -6, 15, 8]),
+ np.array([4, 9, 25, 16, 1]),
+ np.array([-.5, -.6667, .6, .5]),
+ np.array([[6, 10], [9, 4], [2, 9], [14, 65],
+ [7, 23]]),
+ np.array([[2.5, 2], [2, -1], [1, -3], [-1, 6],
+ [3, -5]])
+ ), (
+ np.array([16, 41, 46, 21]),
+ np.array([16 + 16j, 18 - 9j, 1 - 4j]),
+ np.array([16, 9, 1, 4]),
+ np.array([1+1j, 2-1j, 1-4j]),
+ np.array([[64+16j, -16-32j], [93+62j, 61-66j],
+ [78-80j, 71-74j], [14-27j, 35+15j]]),
+ np.array([[2+1j, -3-2j], [1+1j, 1+1j], [1-2j, 1-2j],
+ [1-1j, 2+1j]])
+ )])
+def test_pttrf_pttrs_NAG(d, e, d_expect, e_expect, b, x_expect):
+ # test to assure that wrapper is consistent with NAG Manual Mark 26
+ # example problems: f07jdf and f07jef (real)
+ # examples: f07jrf and f07csf (complex)
+ # NAG examples provide 4 decimals.
+ # (Links expire, so please search for "NAG Library Manual Mark 26" online)
+
+ atol = 1e-4
+ pttrf = get_lapack_funcs('pttrf', dtype=e[0])
+ _d, _e, info = pttrf(d, e)
+ assert_allclose(_d, d_expect, atol=atol)
+ assert_allclose(_e, e_expect, atol=atol)
+
+ pttrs = get_lapack_funcs('pttrs', dtype=e[0])
+ _x, info = pttrs(_d, _e.conj(), b)
+ assert_allclose(_x, x_expect, atol=atol)
+
+ # also test option `lower`
+ if e.dtype in COMPLEX_DTYPES:
+ _x, info = pttrs(_d, _e, b, lower=1)
+ assert_allclose(_x, x_expect, atol=atol)
+
+
+def pteqr_get_d_e_A_z(dtype, realtype, n, compute_z):
+ # used by ?pteqr tests to build parameters
+ # returns tuple of (d, e, A, z)
+ if compute_z == 1:
+ # build Hermitian A from Q**T * tri * Q = A by creating Q and tri
+ A_eig = generate_random_dtype_array((n, n), dtype)
+ A_eig = A_eig + np.diag(np.zeros(n) + 4*n)
+ A_eig = (A_eig + A_eig.conj().T) / 2
+ # obtain right eigenvectors (orthogonal)
+ vr = eigh(A_eig)[1]
+ # create tridiagonal matrix
+ d = generate_random_dtype_array((n,), realtype) + 4
+ e = generate_random_dtype_array((n-1,), realtype)
+ tri = np.diag(d) + np.diag(e, 1) + np.diag(e, -1)
+ # Build A using these factors that sytrd would: (Q**T * tri * Q = A)
+ A = vr @ tri @ vr.conj().T
+ # vr is orthogonal
+ z = vr
+
+ else:
+ # d and e are always real per lapack docs.
+ d = generate_random_dtype_array((n,), realtype)
+ e = generate_random_dtype_array((n-1,), realtype)
+
+ # make SPD
+ d = d + 4
+ A = np.diag(d) + np.diag(e, 1) + np.diag(e, -1)
+ z = np.diag(d) + np.diag(e, -1) + np.diag(e, 1)
+ return (d, e, A, z)
+
+
+@pytest.mark.parametrize("dtype,realtype",
+ zip(DTYPES, REAL_DTYPES + REAL_DTYPES))
+@pytest.mark.parametrize("compute_z", range(3))
+def test_pteqr(dtype, realtype, compute_z):
+ '''
+ Tests the ?pteqr lapack routine for all dtypes and compute_z parameters.
+ It generates random SPD matrix diagonals d and e, and then confirms
+ correct eigenvalues with scipy.linalg.eig. With applicable compute_z=2 it
+ tests that z can reform A.
+ '''
+ seed(42)
+ atol = 1000*np.finfo(dtype).eps
+ pteqr = get_lapack_funcs(('pteqr'), dtype=dtype)
+
+ n = 10
+
+ d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z)
+
+ d_pteqr, e_pteqr, z_pteqr, info = pteqr(d=d, e=e, z=z, compute_z=compute_z)
+ assert_equal(info, 0, f"info = {info}, should be 0.")
+
+ # compare the routine's eigenvalues with scipy.linalg.eig's.
+ assert_allclose(np.sort(eigh(A)[0]), np.sort(d_pteqr), atol=atol)
+
+ if compute_z:
+ # verify z_pteqr as orthogonal
+ assert_allclose(z_pteqr @ np.conj(z_pteqr).T, np.identity(n),
+ atol=atol)
+ # verify that z_pteqr recombines to A
+ assert_allclose(z_pteqr @ np.diag(d_pteqr) @ np.conj(z_pteqr).T,
+ A, atol=atol)
+
+
+@pytest.mark.parametrize("dtype,realtype",
+ zip(DTYPES, REAL_DTYPES + REAL_DTYPES))
+@pytest.mark.parametrize("compute_z", range(3))
+def test_pteqr_error_non_spd(dtype, realtype, compute_z):
+ seed(42)
+ pteqr = get_lapack_funcs(('pteqr'), dtype=dtype)
+
+ n = 10
+ d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z)
+
+ # test with non-spd matrix
+ d_pteqr, e_pteqr, z_pteqr, info = pteqr(d - 4, e, z=z, compute_z=compute_z)
+ assert info > 0
+
+
+@pytest.mark.parametrize("dtype,realtype",
+ zip(DTYPES, REAL_DTYPES + REAL_DTYPES))
+@pytest.mark.parametrize("compute_z", range(3))
+def test_pteqr_raise_error_wrong_shape(dtype, realtype, compute_z):
+ seed(42)
+ pteqr = get_lapack_funcs(('pteqr'), dtype=dtype)
+ n = 10
+ d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z)
+ # test with incorrect/incompatible array sizes
+ assert_raises(ValueError, pteqr, d[:-1], e, z=z, compute_z=compute_z)
+ assert_raises(ValueError, pteqr, d, e[:-1], z=z, compute_z=compute_z)
+ if compute_z:
+ assert_raises(ValueError, pteqr, d, e, z=z[:-1], compute_z=compute_z)
+
+
+@pytest.mark.parametrize("dtype,realtype",
+ zip(DTYPES, REAL_DTYPES + REAL_DTYPES))
+@pytest.mark.parametrize("compute_z", range(3))
+def test_pteqr_error_singular(dtype, realtype, compute_z):
+ seed(42)
+ pteqr = get_lapack_funcs(('pteqr'), dtype=dtype)
+ n = 10
+ d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z)
+ # test with singular matrix
+ d[0] = 0
+ e[0] = 0
+ d_pteqr, e_pteqr, z_pteqr, info = pteqr(d, e, z=z, compute_z=compute_z)
+ assert info > 0
+
+
+@pytest.mark.parametrize("compute_z,d,e,d_expect,z_expect",
+ [(2, # "I"
+ np.array([4.16, 5.25, 1.09, .62]),
+ np.array([3.17, -.97, .55]),
+ np.array([8.0023, 1.9926, 1.0014, 0.1237]),
+ np.array([[0.6326, 0.6245, -0.4191, 0.1847],
+ [0.7668, -0.4270, 0.4176, -0.2352],
+ [-0.1082, 0.6071, 0.4594, -0.6393],
+ [-0.0081, 0.2432, 0.6625, 0.7084]])),
+ ])
+def test_pteqr_NAG_f08jgf(compute_z, d, e, d_expect, z_expect):
+ '''
+ Implements real (f08jgf) example from NAG Manual Mark 26.
+ Tests for correct outputs.
+ '''
+ # the NAG manual has 4 decimals accuracy
+ atol = 1e-4
+ pteqr = get_lapack_funcs(('pteqr'), dtype=d.dtype)
+
+ z = np.diag(d) + np.diag(e, 1) + np.diag(e, -1)
+ _d, _e, _z, info = pteqr(d=d, e=e, z=z, compute_z=compute_z)
+ assert_allclose(_d, d_expect, atol=atol)
+ assert_allclose(np.abs(_z), np.abs(z_expect), atol=atol)
+
+
+@pytest.mark.parametrize('dtype', DTYPES)
+@pytest.mark.parametrize('matrix_size', [(3, 4), (7, 6), (6, 6)])
+def test_geqrfp(dtype, matrix_size):
+ # Tests for all dytpes, tall, wide, and square matrices.
+ # Using the routine with random matrix A, Q and R are obtained and then
+ # tested such that R is upper triangular and non-negative on the diagonal,
+ # and Q is an orthogonal matrix. Verifies that A=Q@R. It also
+ # tests against a matrix that for which the linalg.qr method returns
+ # negative diagonals, and for error messaging.
+
+ # set test tolerance appropriate for dtype
+ np.random.seed(42)
+ rtol = 250*np.finfo(dtype).eps
+ atol = 100*np.finfo(dtype).eps
+ # get appropriate ?geqrfp for dtype
+ geqrfp = get_lapack_funcs(('geqrfp'), dtype=dtype)
+ gqr = get_lapack_funcs(("orgqr"), dtype=dtype)
+
+ m, n = matrix_size
+
+ # create random matrix of dimensions m x n
+ A = generate_random_dtype_array((m, n), dtype=dtype)
+ # create qr matrix using geqrfp
+ qr_A, tau, info = geqrfp(A)
+
+ # obtain r from the upper triangular area
+ r = np.triu(qr_A)
+
+ # obtain q from the orgqr lapack routine
+ # based on linalg.qr's extraction strategy of q with orgqr
+
+ if m > n:
+ # this adds an extra column to the end of qr_A
+ # let qqr be an empty m x m matrix
+ qqr = np.zeros((m, m), dtype=dtype)
+ # set first n columns of qqr to qr_A
+ qqr[:, :n] = qr_A
+ # determine q from this qqr
+ # note that m is a sufficient for lwork based on LAPACK documentation
+ q = gqr(qqr, tau=tau, lwork=m)[0]
+ else:
+ q = gqr(qr_A[:, :m], tau=tau, lwork=m)[0]
+
+ # test that q and r still make A
+ assert_allclose(q@r, A, rtol=rtol)
+ # ensure that q is orthogonal (that q @ transposed q is the identity)
+ assert_allclose(np.eye(q.shape[0]), q@(q.conj().T), rtol=rtol,
+ atol=atol)
+ # ensure r is upper tri by comparing original r to r as upper triangular
+ assert_allclose(r, np.triu(r), rtol=rtol)
+ # make sure diagonals of r are positive for this random solution
+ assert_(np.all(np.diag(r) > np.zeros(len(np.diag(r)))))
+ # ensure that info is zero for this success
+ assert_(info == 0)
+
+ # test that this routine gives r diagonals that are positive for a
+ # matrix that returns negatives in the diagonal with scipy.linalg.rq
+ A_negative = generate_random_dtype_array((n, m), dtype=dtype) * -1
+ r_rq_neg, q_rq_neg = qr(A_negative)
+ rq_A_neg, tau_neg, info_neg = geqrfp(A_negative)
+ # assert that any of the entries on the diagonal from linalg.qr
+ # are negative and that all of geqrfp are positive.
+ assert_(np.any(np.diag(r_rq_neg) < 0) and
+ np.all(np.diag(r) > 0))
+
+
+def test_geqrfp_errors_with_empty_array():
+ # check that empty array raises good error message
+ A_empty = np.array([])
+ geqrfp = get_lapack_funcs('geqrfp', dtype=A_empty.dtype)
+ assert_raises(Exception, geqrfp, A_empty)
+
+
+@pytest.mark.parametrize("driver", ['ev', 'evd', 'evr', 'evx'])
+@pytest.mark.parametrize("pfx", ['sy', 'he'])
+def test_standard_eigh_lworks(pfx, driver):
+ n = 1200 # Some sufficiently big arbitrary number
+ dtype = REAL_DTYPES if pfx == 'sy' else COMPLEX_DTYPES
+ sc_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[0])
+ dz_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[1])
+ try:
+ _compute_lwork(sc_dlw, n, lower=1)
+ _compute_lwork(dz_dlw, n, lower=1)
+ except Exception as e:
+ pytest.fail(f"{pfx+driver}_lwork raised unexpected exception: {e}")
+
+
+@pytest.mark.parametrize("driver", ['gv', 'gvx'])
+@pytest.mark.parametrize("pfx", ['sy', 'he'])
+def test_generalized_eigh_lworks(pfx, driver):
+ n = 1200 # Some sufficiently big arbitrary number
+ dtype = REAL_DTYPES if pfx == 'sy' else COMPLEX_DTYPES
+ sc_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[0])
+ dz_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[1])
+ # Shouldn't raise any exceptions
+ try:
+ _compute_lwork(sc_dlw, n, uplo="L")
+ _compute_lwork(dz_dlw, n, uplo="L")
+ except Exception as e:
+ pytest.fail(f"{pfx+driver}_lwork raised unexpected exception: {e}")
+
+
+@pytest.mark.parametrize("dtype_", DTYPES)
+@pytest.mark.parametrize("m", [1, 10, 100, 1000])
+def test_orcsd_uncsd_lwork(dtype_, m):
+ seed(1234)
+ p = randint(0, m)
+ q = m - p
+ pfx = 'or' if dtype_ in REAL_DTYPES else 'un'
+ dlw = pfx + 'csd_lwork'
+ lw = get_lapack_funcs(dlw, dtype=dtype_)
+ lwval = _compute_lwork(lw, m, p, q)
+ lwval = lwval if pfx == 'un' else (lwval,)
+ assert all([x > 0 for x in lwval])
+
+
+@pytest.mark.parametrize("dtype_", DTYPES)
+def test_orcsd_uncsd(dtype_):
+ m, p, q = 250, 80, 170
+
+ pfx = 'or' if dtype_ in REAL_DTYPES else 'un'
+ X = ortho_group.rvs(m) if pfx == 'or' else unitary_group.rvs(m)
+
+ drv, dlw = get_lapack_funcs((pfx + 'csd', pfx + 'csd_lwork'), dtype=dtype_)
+ lwval = _compute_lwork(dlw, m, p, q)
+ lwvals = {'lwork': lwval} if pfx == 'or' else dict(zip(['lwork',
+ 'lrwork'], lwval))
+
+ cs11, cs12, cs21, cs22, theta, u1, u2, v1t, v2t, info =\
+ drv(X[:p, :q], X[:p, q:], X[p:, :q], X[p:, q:], **lwvals)
+
+ assert info == 0
+
+ U = block_diag(u1, u2)
+ VH = block_diag(v1t, v2t)
+ r = min(min(p, q), min(m-p, m-q))
+ n11 = min(p, q) - r
+ n12 = min(p, m-q) - r
+ n21 = min(m-p, q) - r
+ n22 = min(m-p, m-q) - r
+
+ S = np.zeros((m, m), dtype=dtype_)
+ one = dtype_(1.)
+ for i in range(n11):
+ S[i, i] = one
+ for i in range(n22):
+ S[p+i, q+i] = one
+ for i in range(n12):
+ S[i+n11+r, i+n11+r+n21+n22+r] = -one
+ for i in range(n21):
+ S[p+n22+r+i, n11+r+i] = one
+
+ for i in range(r):
+ S[i+n11, i+n11] = np.cos(theta[i])
+ S[p+n22+i, i+r+n21+n22] = np.cos(theta[i])
+
+ S[i+n11, i+n11+n21+n22+r] = -np.sin(theta[i])
+ S[p+n22+i, i+n11] = np.sin(theta[i])
+
+ Xc = U @ S @ VH
+ assert_allclose(X, Xc, rtol=0., atol=1e4*np.finfo(dtype_).eps)
+
+
+@pytest.mark.parametrize("dtype", DTYPES)
+@pytest.mark.parametrize("trans_bool", [False, True])
+@pytest.mark.parametrize("fact", ["F", "N"])
+def test_gtsvx(dtype, trans_bool, fact):
+ """
+ These tests uses ?gtsvx to solve a random Ax=b system for each dtype.
+ It tests that the outputs define an LU matrix, that inputs are unmodified,
+ transposal options, incompatible shapes, singular matrices, and
+ singular factorizations. It parametrizes DTYPES and the 'fact' value along
+ with the fact related inputs.
+ """
+ seed(42)
+ # set test tolerance appropriate for dtype
+ atol = 100 * np.finfo(dtype).eps
+ # obtain routine
+ gtsvx, gttrf = get_lapack_funcs(('gtsvx', 'gttrf'), dtype=dtype)
+ # Generate random tridiagonal matrix A
+ n = 10
+ dl = generate_random_dtype_array((n-1,), dtype=dtype)
+ d = generate_random_dtype_array((n,), dtype=dtype)
+ du = generate_random_dtype_array((n-1,), dtype=dtype)
+ A = np.diag(dl, -1) + np.diag(d) + np.diag(du, 1)
+ # generate random solution x
+ x = generate_random_dtype_array((n, 2), dtype=dtype)
+ # create b from x for equation Ax=b
+ trans = ("T" if dtype in REAL_DTYPES else "C") if trans_bool else "N"
+ b = (A.conj().T if trans_bool else A) @ x
+
+ # store a copy of the inputs to check they haven't been modified later
+ inputs_cpy = [dl.copy(), d.copy(), du.copy(), b.copy()]
+
+ # set these to None if fact = 'N', or the output of gttrf is fact = 'F'
+ dlf_, df_, duf_, du2f_, ipiv_, info_ = \
+ gttrf(dl, d, du) if fact == 'F' else [None]*6
+
+ gtsvx_out = gtsvx(dl, d, du, b, fact=fact, trans=trans, dlf=dlf_, df=df_,
+ duf=duf_, du2=du2f_, ipiv=ipiv_)
+ dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
+ assert_(info == 0, f"?gtsvx info = {info}, should be zero")
+
+ # assure that inputs are unmodified
+ assert_array_equal(dl, inputs_cpy[0])
+ assert_array_equal(d, inputs_cpy[1])
+ assert_array_equal(du, inputs_cpy[2])
+ assert_array_equal(b, inputs_cpy[3])
+
+ # test that x_soln matches the expected x
+ assert_allclose(x, x_soln, atol=atol)
+
+ # assert that the outputs are of correct type or shape
+ # rcond should be a scalar
+ assert_(hasattr(rcond, "__len__") is not True,
+ f"rcond should be scalar but is {rcond}")
+ # ferr should be length of # of cols in x
+ assert_(ferr.shape[0] == b.shape[1], "ferr.shape is {} but should be {},"
+ .format(ferr.shape[0], b.shape[1]))
+ # berr should be length of # of cols in x
+ assert_(berr.shape[0] == b.shape[1], "berr.shape is {} but should be {},"
+ .format(berr.shape[0], b.shape[1]))
+
+
+@pytest.mark.parametrize("dtype", DTYPES)
+@pytest.mark.parametrize("trans_bool", [0, 1])
+@pytest.mark.parametrize("fact", ["F", "N"])
+def test_gtsvx_error_singular(dtype, trans_bool, fact):
+ seed(42)
+ # obtain routine
+ gtsvx, gttrf = get_lapack_funcs(('gtsvx', 'gttrf'), dtype=dtype)
+ # Generate random tridiagonal matrix A
+ n = 10
+ dl = generate_random_dtype_array((n-1,), dtype=dtype)
+ d = generate_random_dtype_array((n,), dtype=dtype)
+ du = generate_random_dtype_array((n-1,), dtype=dtype)
+ A = np.diag(dl, -1) + np.diag(d) + np.diag(du, 1)
+ # generate random solution x
+ x = generate_random_dtype_array((n, 2), dtype=dtype)
+ # create b from x for equation Ax=b
+ trans = "T" if dtype in REAL_DTYPES else "C"
+ b = (A.conj().T if trans_bool else A) @ x
+
+ # set these to None if fact = 'N', or the output of gttrf is fact = 'F'
+ dlf_, df_, duf_, du2f_, ipiv_, info_ = \
+ gttrf(dl, d, du) if fact == 'F' else [None]*6
+
+ gtsvx_out = gtsvx(dl, d, du, b, fact=fact, trans=trans, dlf=dlf_, df=df_,
+ duf=duf_, du2=du2f_, ipiv=ipiv_)
+ dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
+ # test with singular matrix
+ # no need to test inputs with fact "F" since ?gttrf already does.
+ if fact == "N":
+ # Construct a singular example manually
+ d[-1] = 0
+ dl[-1] = 0
+ # solve using routine
+ gtsvx_out = gtsvx(dl, d, du, b)
+ dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
+ # test for the singular matrix.
+ assert info > 0, "info should be > 0 for singular matrix"
+
+ elif fact == 'F':
+ # assuming that a singular factorization is input
+ df_[-1] = 0
+ duf_[-1] = 0
+ du2f_[-1] = 0
+
+ gtsvx_out = gtsvx(dl, d, du, b, fact=fact, dlf=dlf_, df=df_, duf=duf_,
+ du2=du2f_, ipiv=ipiv_)
+ dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
+ # info should not be zero and should provide index of illegal value
+ assert info > 0, "info should be > 0 for singular matrix"
+
+
+@pytest.mark.parametrize("dtype", DTYPES*2)
+@pytest.mark.parametrize("trans_bool", [False, True])
+@pytest.mark.parametrize("fact", ["F", "N"])
+def test_gtsvx_error_incompatible_size(dtype, trans_bool, fact):
+ seed(42)
+ # obtain routine
+ gtsvx, gttrf = get_lapack_funcs(('gtsvx', 'gttrf'), dtype=dtype)
+ # Generate random tridiagonal matrix A
+ n = 10
+ dl = generate_random_dtype_array((n-1,), dtype=dtype)
+ d = generate_random_dtype_array((n,), dtype=dtype)
+ du = generate_random_dtype_array((n-1,), dtype=dtype)
+ A = np.diag(dl, -1) + np.diag(d) + np.diag(du, 1)
+ # generate random solution x
+ x = generate_random_dtype_array((n, 2), dtype=dtype)
+ # create b from x for equation Ax=b
+ trans = "T" if dtype in REAL_DTYPES else "C"
+ b = (A.conj().T if trans_bool else A) @ x
+
+ # set these to None if fact = 'N', or the output of gttrf is fact = 'F'
+ dlf_, df_, duf_, du2f_, ipiv_, info_ = \
+ gttrf(dl, d, du) if fact == 'F' else [None]*6
+
+ if fact == "N":
+ assert_raises(ValueError, gtsvx, dl[:-1], d, du, b,
+ fact=fact, trans=trans, dlf=dlf_, df=df_,
+ duf=duf_, du2=du2f_, ipiv=ipiv_)
+ assert_raises(ValueError, gtsvx, dl, d[:-1], du, b,
+ fact=fact, trans=trans, dlf=dlf_, df=df_,
+ duf=duf_, du2=du2f_, ipiv=ipiv_)
+ assert_raises(ValueError, gtsvx, dl, d, du[:-1], b,
+ fact=fact, trans=trans, dlf=dlf_, df=df_,
+ duf=duf_, du2=du2f_, ipiv=ipiv_)
+ assert_raises(Exception, gtsvx, dl, d, du, b[:-1],
+ fact=fact, trans=trans, dlf=dlf_, df=df_,
+ duf=duf_, du2=du2f_, ipiv=ipiv_)
+ else:
+ assert_raises(ValueError, gtsvx, dl, d, du, b,
+ fact=fact, trans=trans, dlf=dlf_[:-1], df=df_,
+ duf=duf_, du2=du2f_, ipiv=ipiv_)
+ assert_raises(ValueError, gtsvx, dl, d, du, b,
+ fact=fact, trans=trans, dlf=dlf_, df=df_[:-1],
+ duf=duf_, du2=du2f_, ipiv=ipiv_)
+ assert_raises(ValueError, gtsvx, dl, d, du, b,
+ fact=fact, trans=trans, dlf=dlf_, df=df_,
+ duf=duf_[:-1], du2=du2f_, ipiv=ipiv_)
+ assert_raises(ValueError, gtsvx, dl, d, du, b,
+ fact=fact, trans=trans, dlf=dlf_, df=df_,
+ duf=duf_, du2=du2f_[:-1], ipiv=ipiv_)
+
+
+@pytest.mark.parametrize("du,d,dl,b,x",
+ [(np.array([2.1, -1.0, 1.9, 8.0]),
+ np.array([3.0, 2.3, -5.0, -0.9, 7.1]),
+ np.array([3.4, 3.6, 7.0, -6.0]),
+ np.array([[2.7, 6.6], [-.5, 10.8], [2.6, -3.2],
+ [.6, -11.2], [2.7, 19.1]]),
+ np.array([[-4, 5], [7, -4], [3, -3], [-4, -2],
+ [-3, 1]])),
+ (np.array([2 - 1j, 2 + 1j, -1 + 1j, 1 - 1j]),
+ np.array([-1.3 + 1.3j, -1.3 + 1.3j, -1.3 + 3.3j,
+ -.3 + 4.3j, -3.3 + 1.3j]),
+ np.array([1 - 2j, 1 + 1j, 2 - 3j, 1 + 1j]),
+ np.array([[2.4 - 5j, 2.7 + 6.9j],
+ [3.4 + 18.2j, -6.9 - 5.3j],
+ [-14.7 + 9.7j, -6 - .6j],
+ [31.9 - 7.7j, -3.9 + 9.3j],
+ [-1 + 1.6j, -3 + 12.2j]]),
+ np.array([[1 + 1j, 2 - 1j], [3 - 1j, 1 + 2j],
+ [4 + 5j, -1 + 1j], [-1 - 2j, 2 + 1j],
+ [1 - 1j, 2 - 2j]]))])
+def test_gtsvx_NAG(du, d, dl, b, x):
+ # Test to ensure wrapper is consistent with NAG Manual Mark 26
+ # example problems: real (f07cbf) and complex (f07cpf)
+ gtsvx = get_lapack_funcs('gtsvx', dtype=d.dtype)
+
+ gtsvx_out = gtsvx(dl, d, du, b)
+ dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out
+
+ assert_array_almost_equal(x, x_soln)
+
+
+@pytest.mark.parametrize("dtype,realtype", zip(DTYPES, REAL_DTYPES
+ + REAL_DTYPES))
+@pytest.mark.parametrize("fact,df_de_lambda",
+ [("F",
+ lambda d, e:get_lapack_funcs('pttrf',
+ dtype=e.dtype)(d, e)),
+ ("N", lambda d, e: (None, None, None))])
+def test_ptsvx(dtype, realtype, fact, df_de_lambda):
+ '''
+ This tests the ?ptsvx lapack routine wrapper to solve a random system
+ Ax = b for all dtypes and input variations. Tests for: unmodified
+ input parameters, fact options, incompatible matrix shapes raise an error,
+ and singular matrices return info of illegal value.
+ '''
+ seed(42)
+ # set test tolerance appropriate for dtype
+ atol = 100 * np.finfo(dtype).eps
+ ptsvx = get_lapack_funcs('ptsvx', dtype=dtype)
+ n = 5
+ # create diagonals according to size and dtype
+ d = generate_random_dtype_array((n,), realtype) + 4
+ e = generate_random_dtype_array((n-1,), dtype)
+ A = np.diag(d) + np.diag(e, -1) + np.diag(np.conj(e), 1)
+ x_soln = generate_random_dtype_array((n, 2), dtype=dtype)
+ b = A @ x_soln
+
+ # use lambda to determine what df, ef are
+ df, ef, info = df_de_lambda(d, e)
+
+ # create copy to later test that they are unmodified
+ diag_cpy = [d.copy(), e.copy(), b.copy()]
+
+ # solve using routine
+ df, ef, x, rcond, ferr, berr, info = ptsvx(d, e, b, fact=fact,
+ df=df, ef=ef)
+ # d, e, and b should be unmodified
+ assert_array_equal(d, diag_cpy[0])
+ assert_array_equal(e, diag_cpy[1])
+ assert_array_equal(b, diag_cpy[2])
+ assert_(info == 0, f"info should be 0 but is {info}.")
+ assert_array_almost_equal(x_soln, x)
+
+ # test that the factors from ptsvx can be recombined to make A
+ L = np.diag(ef, -1) + np.diag(np.ones(n))
+ D = np.diag(df)
+ assert_allclose(A, L@D@(np.conj(L).T), atol=atol)
+
+ # assert that the outputs are of correct type or shape
+ # rcond should be a scalar
+ assert not hasattr(rcond, "__len__"), \
+ f"rcond should be scalar but is {rcond}"
+ # ferr should be length of # of cols in x
+ assert_(ferr.shape == (2,), "ferr.shape is {} but should be ({},)"
+ .format(ferr.shape, x_soln.shape[1]))
+ # berr should be length of # of cols in x
+ assert_(berr.shape == (2,), "berr.shape is {} but should be ({},)"
+ .format(berr.shape, x_soln.shape[1]))
+
+
+@pytest.mark.parametrize("dtype,realtype", zip(DTYPES, REAL_DTYPES
+ + REAL_DTYPES))
+@pytest.mark.parametrize("fact,df_de_lambda",
+ [("F",
+ lambda d, e:get_lapack_funcs('pttrf',
+ dtype=e.dtype)(d, e)),
+ ("N", lambda d, e: (None, None, None))])
+def test_ptsvx_error_raise_errors(dtype, realtype, fact, df_de_lambda):
+ seed(42)
+ ptsvx = get_lapack_funcs('ptsvx', dtype=dtype)
+ n = 5
+ # create diagonals according to size and dtype
+ d = generate_random_dtype_array((n,), realtype) + 4
+ e = generate_random_dtype_array((n-1,), dtype)
+ A = np.diag(d) + np.diag(e, -1) + np.diag(np.conj(e), 1)
+ x_soln = generate_random_dtype_array((n, 2), dtype=dtype)
+ b = A @ x_soln
+
+ # use lambda to determine what df, ef are
+ df, ef, info = df_de_lambda(d, e)
+
+ # test with malformatted array sizes
+ assert_raises(ValueError, ptsvx, d[:-1], e, b, fact=fact, df=df, ef=ef)
+ assert_raises(ValueError, ptsvx, d, e[:-1], b, fact=fact, df=df, ef=ef)
+ assert_raises(Exception, ptsvx, d, e, b[:-1], fact=fact, df=df, ef=ef)
+
+
+@pytest.mark.parametrize("dtype,realtype", zip(DTYPES, REAL_DTYPES
+ + REAL_DTYPES))
+@pytest.mark.parametrize("fact,df_de_lambda",
+ [("F",
+ lambda d, e:get_lapack_funcs('pttrf',
+ dtype=e.dtype)(d, e)),
+ ("N", lambda d, e: (None, None, None))])
+def test_ptsvx_non_SPD_singular(dtype, realtype, fact, df_de_lambda):
+ seed(42)
+ ptsvx = get_lapack_funcs('ptsvx', dtype=dtype)
+ n = 5
+ # create diagonals according to size and dtype
+ d = generate_random_dtype_array((n,), realtype) + 4
+ e = generate_random_dtype_array((n-1,), dtype)
+ A = np.diag(d) + np.diag(e, -1) + np.diag(np.conj(e), 1)
+ x_soln = generate_random_dtype_array((n, 2), dtype=dtype)
+ b = A @ x_soln
+
+ # use lambda to determine what df, ef are
+ df, ef, info = df_de_lambda(d, e)
+
+ if fact == "N":
+ d[3] = 0
+ # obtain new df, ef
+ df, ef, info = df_de_lambda(d, e)
+ # solve using routine
+ df, ef, x, rcond, ferr, berr, info = ptsvx(d, e, b)
+ # test for the singular matrix.
+ assert info > 0 and info <= n
+
+ # non SPD matrix
+ d = generate_random_dtype_array((n,), realtype)
+ df, ef, x, rcond, ferr, berr, info = ptsvx(d, e, b)
+ assert info > 0 and info <= n
+ else:
+ # assuming that someone is using a singular factorization
+ df, ef, info = df_de_lambda(d, e)
+ df[0] = 0
+ ef[0] = 0
+ df, ef, x, rcond, ferr, berr, info = ptsvx(d, e, b, fact=fact,
+ df=df, ef=ef)
+ assert info > 0
+
+
+@pytest.mark.parametrize('d,e,b,x',
+ [(np.array([4, 10, 29, 25, 5]),
+ np.array([-2, -6, 15, 8]),
+ np.array([[6, 10], [9, 4], [2, 9], [14, 65],
+ [7, 23]]),
+ np.array([[2.5, 2], [2, -1], [1, -3],
+ [-1, 6], [3, -5]])),
+ (np.array([16, 41, 46, 21]),
+ np.array([16 + 16j, 18 - 9j, 1 - 4j]),
+ np.array([[64 + 16j, -16 - 32j],
+ [93 + 62j, 61 - 66j],
+ [78 - 80j, 71 - 74j],
+ [14 - 27j, 35 + 15j]]),
+ np.array([[2 + 1j, -3 - 2j],
+ [1 + 1j, 1 + 1j],
+ [1 - 2j, 1 - 2j],
+ [1 - 1j, 2 + 1j]]))])
+def test_ptsvx_NAG(d, e, b, x):
+ # test to assure that wrapper is consistent with NAG Manual Mark 26
+ # example problemss: f07jbf, f07jpf
+ # (Links expire, so please search for "NAG Library Manual Mark 26" online)
+
+ # obtain routine with correct type based on e.dtype
+ ptsvx = get_lapack_funcs('ptsvx', dtype=e.dtype)
+ # solve using routine
+ df, ef, x_ptsvx, rcond, ferr, berr, info = ptsvx(d, e, b)
+ # determine ptsvx's solution and x are the same.
+ assert_array_almost_equal(x, x_ptsvx)
+
+
+@pytest.mark.parametrize('lower', [False, True])
+@pytest.mark.parametrize('dtype', DTYPES)
+def test_pptrs_pptri_pptrf_ppsv_ppcon(dtype, lower):
+ seed(1234)
+ atol = np.finfo(dtype).eps*100
+ # Manual conversion to/from packed format is feasible here.
+ n, nrhs = 10, 4
+ a = generate_random_dtype_array([n, n], dtype=dtype)
+ b = generate_random_dtype_array([n, nrhs], dtype=dtype)
+
+ a = a.conj().T + a + np.eye(n, dtype=dtype) * dtype(5.)
+ if lower:
+ inds = ([x for y in range(n) for x in range(y, n)],
+ [y for y in range(n) for x in range(y, n)])
+ else:
+ inds = ([x for y in range(1, n+1) for x in range(y)],
+ [y-1 for y in range(1, n+1) for x in range(y)])
+ ap = a[inds]
+ ppsv, pptrf, pptrs, pptri, ppcon = get_lapack_funcs(
+ ('ppsv', 'pptrf', 'pptrs', 'pptri', 'ppcon'),
+ dtype=dtype,
+ ilp64="preferred")
+
+ ul, info = pptrf(n, ap, lower=lower)
+ assert_equal(info, 0)
+ aul = cholesky(a, lower=lower)[inds]
+ assert_allclose(ul, aul, rtol=0, atol=atol)
+
+ uli, info = pptri(n, ul, lower=lower)
+ assert_equal(info, 0)
+ auli = inv(a)[inds]
+ assert_allclose(uli, auli, rtol=0, atol=atol)
+
+ x, info = pptrs(n, ul, b, lower=lower)
+ assert_equal(info, 0)
+ bx = solve(a, b)
+ assert_allclose(x, bx, rtol=0, atol=atol)
+
+ xv, info = ppsv(n, ap, b, lower=lower)
+ assert_equal(info, 0)
+ assert_allclose(xv, bx, rtol=0, atol=atol)
+
+ anorm = np.linalg.norm(a, 1)
+ rcond, info = ppcon(n, ap, anorm=anorm, lower=lower)
+ assert_equal(info, 0)
+ assert_(abs(1/rcond - np.linalg.cond(a, p=1))*rcond < 1)
+
+
+@pytest.mark.parametrize('dtype', DTYPES)
+def test_gees_trexc(dtype):
+ seed(1234)
+ atol = np.finfo(dtype).eps*100
+
+ n = 10
+ a = generate_random_dtype_array([n, n], dtype=dtype)
+
+ gees, trexc = get_lapack_funcs(('gees', 'trexc'), dtype=dtype)
+
+ result = gees(lambda x: None, a, overwrite_a=False)
+ assert_equal(result[-1], 0)
+
+ t = result[0]
+ z = result[-3]
+
+ d2 = t[6, 6]
+
+ if dtype in COMPLEX_DTYPES:
+ assert_allclose(t, np.triu(t), rtol=0, atol=atol)
+
+ assert_allclose(z @ t @ z.conj().T, a, rtol=0, atol=atol)
+
+ result = trexc(t, z, 7, 1)
+ assert_equal(result[-1], 0)
+
+ t = result[0]
+ z = result[-2]
+
+ if dtype in COMPLEX_DTYPES:
+ assert_allclose(t, np.triu(t), rtol=0, atol=atol)
+
+ assert_allclose(z @ t @ z.conj().T, a, rtol=0, atol=atol)
+
+ assert_allclose(t[0, 0], d2, rtol=0, atol=atol)
+
+
+@pytest.mark.parametrize(
+ "t, expect, ifst, ilst",
+ [(np.array([[0.80, -0.11, 0.01, 0.03],
+ [0.00, -0.10, 0.25, 0.35],
+ [0.00, -0.65, -0.10, 0.20],
+ [0.00, 0.00, 0.00, -0.10]]),
+ np.array([[-0.1000, -0.6463, 0.0874, 0.2010],
+ [0.2514, -0.1000, 0.0927, 0.3505],
+ [0.0000, 0.0000, 0.8000, -0.0117],
+ [0.0000, 0.0000, 0.0000, -0.1000]]),
+ 2, 1),
+ (np.array([[-6.00 - 7.00j, 0.36 - 0.36j, -0.19 + 0.48j, 0.88 - 0.25j],
+ [0.00 + 0.00j, -5.00 + 2.00j, -0.03 - 0.72j, -0.23 + 0.13j],
+ [0.00 + 0.00j, 0.00 + 0.00j, 8.00 - 1.00j, 0.94 + 0.53j],
+ [0.00 + 0.00j, 0.00 + 0.00j, 0.00 + 0.00j, 3.00 - 4.00j]]),
+ np.array([[-5.0000 + 2.0000j, -0.1574 + 0.7143j,
+ 0.1781 - 0.1913j, 0.3950 + 0.3861j],
+ [0.0000 + 0.0000j, 8.0000 - 1.0000j,
+ 1.0742 + 0.1447j, 0.2515 - 0.3397j],
+ [0.0000 + 0.0000j, 0.0000 + 0.0000j,
+ 3.0000 - 4.0000j, 0.2264 + 0.8962j],
+ [0.0000 + 0.0000j, 0.0000 + 0.0000j,
+ 0.0000 + 0.0000j, -6.0000 - 7.0000j]]),
+ 1, 4)])
+def test_trexc_NAG(t, ifst, ilst, expect):
+ """
+ This test implements the example found in the NAG manual,
+ f08qfc, f08qtc, f08qgc, f08quc.
+ """
+ # NAG manual provides accuracy up to 4 decimals
+ atol = 1e-4
+ trexc = get_lapack_funcs('trexc', dtype=t.dtype)
+
+ result = trexc(t, t, ifst, ilst, wantq=0)
+ assert_equal(result[-1], 0)
+
+ t = result[0]
+ assert_allclose(expect, t, atol=atol)
+
+
+@pytest.mark.parametrize('dtype', DTYPES)
+def test_gges_tgexc(dtype):
+ if (
+ dtype == np.float32 and
+ sys.platform == 'darwin' and
+ blas_provider == 'openblas' and
+ blas_version < '0.3.21.dev'
+ ):
+ pytest.xfail("gges[float32] broken for OpenBLAS on macOS, see gh-16949")
+
+ seed(1234)
+ atol = np.finfo(dtype).eps*100
+
+ n = 10
+ a = generate_random_dtype_array([n, n], dtype=dtype)
+ b = generate_random_dtype_array([n, n], dtype=dtype)
+
+ gges, tgexc = get_lapack_funcs(('gges', 'tgexc'), dtype=dtype)
+
+ result = gges(lambda x: None, a, b, overwrite_a=False, overwrite_b=False)
+ assert_equal(result[-1], 0)
+
+ s = result[0]
+ t = result[1]
+ q = result[-4]
+ z = result[-3]
+
+ d1 = s[0, 0] / t[0, 0]
+ d2 = s[6, 6] / t[6, 6]
+
+ if dtype in COMPLEX_DTYPES:
+ assert_allclose(s, np.triu(s), rtol=0, atol=atol)
+ assert_allclose(t, np.triu(t), rtol=0, atol=atol)
+
+ assert_allclose(q @ s @ z.conj().T, a, rtol=0, atol=atol)
+ assert_allclose(q @ t @ z.conj().T, b, rtol=0, atol=atol)
+
+ result = tgexc(s, t, q, z, 7, 1)
+ assert_equal(result[-1], 0)
+
+ s = result[0]
+ t = result[1]
+ q = result[2]
+ z = result[3]
+
+ if dtype in COMPLEX_DTYPES:
+ assert_allclose(s, np.triu(s), rtol=0, atol=atol)
+ assert_allclose(t, np.triu(t), rtol=0, atol=atol)
+
+ assert_allclose(q @ s @ z.conj().T, a, rtol=0, atol=atol)
+ assert_allclose(q @ t @ z.conj().T, b, rtol=0, atol=atol)
+
+ assert_allclose(s[0, 0] / t[0, 0], d2, rtol=0, atol=atol)
+ assert_allclose(s[1, 1] / t[1, 1], d1, rtol=0, atol=atol)
+
+
+@pytest.mark.parametrize('dtype', DTYPES)
+def test_gees_trsen(dtype):
+ seed(1234)
+ atol = np.finfo(dtype).eps*100
+
+ n = 10
+ a = generate_random_dtype_array([n, n], dtype=dtype)
+
+ gees, trsen, trsen_lwork = get_lapack_funcs(
+ ('gees', 'trsen', 'trsen_lwork'), dtype=dtype)
+
+ result = gees(lambda x: None, a, overwrite_a=False)
+ assert_equal(result[-1], 0)
+
+ t = result[0]
+ z = result[-3]
+
+ d2 = t[6, 6]
+
+ if dtype in COMPLEX_DTYPES:
+ assert_allclose(t, np.triu(t), rtol=0, atol=atol)
+
+ assert_allclose(z @ t @ z.conj().T, a, rtol=0, atol=atol)
+
+ select = np.zeros(n)
+ select[6] = 1
+
+ lwork = _compute_lwork(trsen_lwork, select, t)
+
+ if dtype in COMPLEX_DTYPES:
+ result = trsen(select, t, z, lwork=lwork)
+ else:
+ result = trsen(select, t, z, lwork=lwork, liwork=lwork[1])
+ assert_equal(result[-1], 0)
+
+ t = result[0]
+ z = result[1]
+
+ if dtype in COMPLEX_DTYPES:
+ assert_allclose(t, np.triu(t), rtol=0, atol=atol)
+
+ assert_allclose(z @ t @ z.conj().T, a, rtol=0, atol=atol)
+
+ assert_allclose(t[0, 0], d2, rtol=0, atol=atol)
+
+
+@pytest.mark.parametrize(
+ "t, q, expect, select, expect_s, expect_sep",
+ [(np.array([[0.7995, -0.1144, 0.0060, 0.0336],
+ [0.0000, -0.0994, 0.2478, 0.3474],
+ [0.0000, -0.6483, -0.0994, 0.2026],
+ [0.0000, 0.0000, 0.0000, -0.1007]]),
+ np.array([[0.6551, 0.1037, 0.3450, 0.6641],
+ [0.5236, -0.5807, -0.6141, -0.1068],
+ [-0.5362, -0.3073, -0.2935, 0.7293],
+ [0.0956, 0.7467, -0.6463, 0.1249]]),
+ np.array([[0.3500, 0.4500, -0.1400, -0.1700],
+ [0.0900, 0.0700, -0.5399, 0.3500],
+ [-0.4400, -0.3300, -0.0300, 0.1700],
+ [0.2500, -0.3200, -0.1300, 0.1100]]),
+ np.array([1, 0, 0, 1]),
+ 1.75e+00, 3.22e+00),
+ (np.array([[-6.0004 - 6.9999j, 0.3637 - 0.3656j,
+ -0.1880 + 0.4787j, 0.8785 - 0.2539j],
+ [0.0000 + 0.0000j, -5.0000 + 2.0060j,
+ -0.0307 - 0.7217j, -0.2290 + 0.1313j],
+ [0.0000 + 0.0000j, 0.0000 + 0.0000j,
+ 7.9982 - 0.9964j, 0.9357 + 0.5359j],
+ [0.0000 + 0.0000j, 0.0000 + 0.0000j,
+ 0.0000 + 0.0000j, 3.0023 - 3.9998j]]),
+ np.array([[-0.8347 - 0.1364j, -0.0628 + 0.3806j,
+ 0.2765 - 0.0846j, 0.0633 - 0.2199j],
+ [0.0664 - 0.2968j, 0.2365 + 0.5240j,
+ -0.5877 - 0.4208j, 0.0835 + 0.2183j],
+ [-0.0362 - 0.3215j, 0.3143 - 0.5473j,
+ 0.0576 - 0.5736j, 0.0057 - 0.4058j],
+ [0.0086 + 0.2958j, -0.3416 - 0.0757j,
+ -0.1900 - 0.1600j, 0.8327 - 0.1868j]]),
+ np.array([[-3.9702 - 5.0406j, -4.1108 + 3.7002j,
+ -0.3403 + 1.0098j, 1.2899 - 0.8590j],
+ [0.3397 - 1.5006j, 1.5201 - 0.4301j,
+ 1.8797 - 5.3804j, 3.3606 + 0.6498j],
+ [3.3101 - 3.8506j, 2.4996 + 3.4504j,
+ 0.8802 - 1.0802j, 0.6401 - 1.4800j],
+ [-1.0999 + 0.8199j, 1.8103 - 1.5905j,
+ 3.2502 + 1.3297j, 1.5701 - 3.4397j]]),
+ np.array([1, 0, 0, 1]),
+ 1.02e+00, 1.82e-01)])
+def test_trsen_NAG(t, q, select, expect, expect_s, expect_sep):
+ """
+ This test implements the example found in the NAG manual,
+ f08qgc, f08quc.
+ """
+ # NAG manual provides accuracy up to 4 and 2 decimals
+ atol = 1e-4
+ atol2 = 1e-2
+ trsen, trsen_lwork = get_lapack_funcs(
+ ('trsen', 'trsen_lwork'), dtype=t.dtype)
+
+ lwork = _compute_lwork(trsen_lwork, select, t)
+
+ if t.dtype in COMPLEX_DTYPES:
+ result = trsen(select, t, q, lwork=lwork)
+ else:
+ result = trsen(select, t, q, lwork=lwork, liwork=lwork[1])
+ assert_equal(result[-1], 0)
+
+ t = result[0]
+ q = result[1]
+ if t.dtype in COMPLEX_DTYPES:
+ s = result[4]
+ sep = result[5]
+ else:
+ s = result[5]
+ sep = result[6]
+
+ assert_allclose(expect, q @ t @ q.conj().T, atol=atol)
+ assert_allclose(expect_s, 1 / s, atol=atol2)
+ assert_allclose(expect_sep, 1 / sep, atol=atol2)
+
+
+@pytest.mark.parametrize('dtype', DTYPES)
+def test_gges_tgsen(dtype):
+ if (
+ dtype == np.float32 and
+ sys.platform == 'darwin' and
+ blas_provider == 'openblas' and
+ blas_version < '0.3.21.dev'
+ ):
+ pytest.xfail("gges[float32] broken for OpenBLAS on macOS, see gh-16949")
+
+ seed(1234)
+ atol = np.finfo(dtype).eps*100
+
+ n = 10
+ a = generate_random_dtype_array([n, n], dtype=dtype)
+ b = generate_random_dtype_array([n, n], dtype=dtype)
+
+ gges, tgsen, tgsen_lwork = get_lapack_funcs(
+ ('gges', 'tgsen', 'tgsen_lwork'), dtype=dtype)
+
+ result = gges(lambda x: None, a, b, overwrite_a=False, overwrite_b=False)
+ assert_equal(result[-1], 0)
+
+ s = result[0]
+ t = result[1]
+ q = result[-4]
+ z = result[-3]
+
+ d1 = s[0, 0] / t[0, 0]
+ d2 = s[6, 6] / t[6, 6]
+
+ if dtype in COMPLEX_DTYPES:
+ assert_allclose(s, np.triu(s), rtol=0, atol=atol)
+ assert_allclose(t, np.triu(t), rtol=0, atol=atol)
+
+ assert_allclose(q @ s @ z.conj().T, a, rtol=0, atol=atol)
+ assert_allclose(q @ t @ z.conj().T, b, rtol=0, atol=atol)
+
+ select = np.zeros(n)
+ select[6] = 1
+
+ lwork = _compute_lwork(tgsen_lwork, select, s, t)
+
+ # off-by-one error in LAPACK, see gh-issue #13397
+ lwork = (lwork[0]+1, lwork[1])
+
+ result = tgsen(select, s, t, q, z, lwork=lwork)
+ assert_equal(result[-1], 0)
+
+ s = result[0]
+ t = result[1]
+ q = result[-7]
+ z = result[-6]
+
+ if dtype in COMPLEX_DTYPES:
+ assert_allclose(s, np.triu(s), rtol=0, atol=atol)
+ assert_allclose(t, np.triu(t), rtol=0, atol=atol)
+
+ assert_allclose(q @ s @ z.conj().T, a, rtol=0, atol=atol)
+ assert_allclose(q @ t @ z.conj().T, b, rtol=0, atol=atol)
+
+ assert_allclose(s[0, 0] / t[0, 0], d2, rtol=0, atol=atol)
+ assert_allclose(s[1, 1] / t[1, 1], d1, rtol=0, atol=atol)
+
+
+@pytest.mark.parametrize(
+ "a, b, c, d, e, f, rans, lans",
+ [(np.array([[4.0, 1.0, 1.0, 2.0],
+ [0.0, 3.0, 4.0, 1.0],
+ [0.0, 1.0, 3.0, 1.0],
+ [0.0, 0.0, 0.0, 6.0]]),
+ np.array([[1.0, 1.0, 1.0, 1.0],
+ [0.0, 3.0, 4.0, 1.0],
+ [0.0, 1.0, 3.0, 1.0],
+ [0.0, 0.0, 0.0, 4.0]]),
+ np.array([[-4.0, 7.0, 1.0, 12.0],
+ [-9.0, 2.0, -2.0, -2.0],
+ [-4.0, 2.0, -2.0, 8.0],
+ [-7.0, 7.0, -6.0, 19.0]]),
+ np.array([[2.0, 1.0, 1.0, 3.0],
+ [0.0, 1.0, 2.0, 1.0],
+ [0.0, 0.0, 1.0, 1.0],
+ [0.0, 0.0, 0.0, 2.0]]),
+ np.array([[1.0, 1.0, 1.0, 2.0],
+ [0.0, 1.0, 4.0, 1.0],
+ [0.0, 0.0, 1.0, 1.0],
+ [0.0, 0.0, 0.0, 1.0]]),
+ np.array([[-7.0, 5.0, 0.0, 7.0],
+ [-5.0, 1.0, -8.0, 0.0],
+ [-1.0, 2.0, -3.0, 5.0],
+ [-3.0, 2.0, 0.0, 5.0]]),
+ np.array([[1.0, 1.0, 1.0, 1.0],
+ [-1.0, 2.0, -1.0, -1.0],
+ [-1.0, 1.0, 3.0, 1.0],
+ [-1.0, 1.0, -1.0, 4.0]]),
+ np.array([[4.0, -1.0, 1.0, -1.0],
+ [1.0, 3.0, -1.0, 1.0],
+ [-1.0, 1.0, 2.0, -1.0],
+ [1.0, -1.0, 1.0, 1.0]]))])
+@pytest.mark.parametrize('dtype', REAL_DTYPES)
+def test_tgsyl_NAG(a, b, c, d, e, f, rans, lans, dtype):
+ atol = 1e-4
+
+ tgsyl = get_lapack_funcs(('tgsyl'), dtype=dtype)
+ rout, lout, scale, dif, info = tgsyl(a, b, c, d, e, f)
+
+ assert_equal(info, 0)
+ assert_allclose(scale, 1.0, rtol=0, atol=np.finfo(dtype).eps*100,
+ err_msg="SCALE must be 1.0")
+ assert_allclose(dif, 0.0, rtol=0, atol=np.finfo(dtype).eps*100,
+ err_msg="DIF must be nearly 0")
+ assert_allclose(rout, rans, atol=atol,
+ err_msg="Solution for R is incorrect")
+ assert_allclose(lout, lans, atol=atol,
+ err_msg="Solution for L is incorrect")
+
+
+@pytest.mark.parametrize('dtype', REAL_DTYPES)
+@pytest.mark.parametrize('trans', ('N', 'T'))
+@pytest.mark.parametrize('ijob', [0, 1, 2, 3, 4])
+def test_tgsyl(dtype, trans, ijob):
+
+ atol = 1e-3 if dtype == np.float32 else 1e-10
+ rng = np.random.default_rng(1685779866898198)
+ m, n = 10, 15
+
+ a, d, *_ = qz(rng.uniform(-10, 10, [m, m]).astype(dtype),
+ rng.uniform(-10, 10, [m, m]).astype(dtype),
+ output='real')
+
+ b, e, *_ = qz(rng.uniform(-10, 10, [n, n]).astype(dtype),
+ rng.uniform(-10, 10, [n, n]).astype(dtype),
+ output='real')
+
+ c = rng.uniform(-2, 2, [m, n]).astype(dtype)
+ f = rng.uniform(-2, 2, [m, n]).astype(dtype)
+
+ tgsyl = get_lapack_funcs(('tgsyl'), dtype=dtype)
+ rout, lout, scale, dif, info = tgsyl(a, b, c, d, e, f,
+ trans=trans, ijob=ijob)
+
+ assert info == 0, "INFO is non-zero"
+ assert scale >= 0.0, "SCALE must be non-negative"
+ if ijob == 0:
+ assert_allclose(dif, 0.0, rtol=0, atol=np.finfo(dtype).eps*100,
+ err_msg="DIF must be 0 for ijob =0")
+ else:
+ assert dif >= 0.0, "DIF must be non-negative"
+
+ # Only DIF is calculated for ijob = 3/4
+ if ijob <= 2:
+ if trans == 'N':
+ lhs1 = a @ rout - lout @ b
+ rhs1 = scale*c
+ lhs2 = d @ rout - lout @ e
+ rhs2 = scale*f
+ elif trans == 'T':
+ lhs1 = np.transpose(a) @ rout + np.transpose(d) @ lout
+ rhs1 = scale*c
+ lhs2 = rout @ np.transpose(b) + lout @ np.transpose(e)
+ rhs2 = -1.0*scale*f
+
+ assert_allclose(lhs1, rhs1, atol=atol, rtol=0.,
+ err_msg='lhs1 and rhs1 do not match')
+ assert_allclose(lhs2, rhs2, atol=atol, rtol=0.,
+ err_msg='lhs2 and rhs2 do not match')
diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_matfuncs.py b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_matfuncs.py
new file mode 100644
index 0000000000000000000000000000000000000000..b50122f81fa8be59aaa3bf3dca223fe7247c3301
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_matfuncs.py
@@ -0,0 +1,1013 @@
+#
+# Created by: Pearu Peterson, March 2002
+#
+""" Test functions for linalg.matfuncs module
+
+"""
+import random
+import functools
+
+import numpy as np
+from numpy import array, identity, dot, sqrt
+from numpy.testing import (assert_array_almost_equal, assert_allclose, assert_,
+ assert_array_less, assert_array_equal, assert_warns)
+import pytest
+
+import scipy.linalg
+from scipy.linalg import (funm, signm, logm, sqrtm, fractional_matrix_power,
+ expm, expm_frechet, expm_cond, norm, khatri_rao)
+from scipy.linalg import _matfuncs_inv_ssq
+from scipy.linalg._matfuncs import pick_pade_structure
+import scipy.linalg._expm_frechet
+
+from scipy.optimize import minimize
+
+
+def _get_al_mohy_higham_2012_experiment_1():
+ """
+ Return the test matrix from Experiment (1) of [1]_.
+
+ References
+ ----------
+ .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2012)
+ "Improved Inverse Scaling and Squaring Algorithms
+ for the Matrix Logarithm."
+ SIAM Journal on Scientific Computing, 34 (4). C152-C169.
+ ISSN 1095-7197
+
+ """
+ A = np.array([
+ [3.2346e-1, 3e4, 3e4, 3e4],
+ [0, 3.0089e-1, 3e4, 3e4],
+ [0, 0, 3.2210e-1, 3e4],
+ [0, 0, 0, 3.0744e-1]], dtype=float)
+ return A
+
+
+class TestSignM:
+
+ def test_nils(self):
+ a = array([[29.2, -24.2, 69.5, 49.8, 7.],
+ [-9.2, 5.2, -18., -16.8, -2.],
+ [-10., 6., -20., -18., -2.],
+ [-9.6, 9.6, -25.5, -15.4, -2.],
+ [9.8, -4.8, 18., 18.2, 2.]])
+ cr = array([[11.94933333,-2.24533333,15.31733333,21.65333333,-2.24533333],
+ [-3.84266667,0.49866667,-4.59066667,-7.18666667,0.49866667],
+ [-4.08,0.56,-4.92,-7.6,0.56],
+ [-4.03466667,1.04266667,-5.59866667,-7.02666667,1.04266667],
+ [4.15733333,-0.50133333,4.90933333,7.81333333,-0.50133333]])
+ r = signm(a)
+ assert_array_almost_equal(r,cr)
+
+ def test_defective1(self):
+ a = array([[0.0,1,0,0],[1,0,1,0],[0,0,0,1],[0,0,1,0]])
+ signm(a, disp=False)
+ #XXX: what would be the correct result?
+
+ def test_defective2(self):
+ a = array((
+ [29.2,-24.2,69.5,49.8,7.0],
+ [-9.2,5.2,-18.0,-16.8,-2.0],
+ [-10.0,6.0,-20.0,-18.0,-2.0],
+ [-9.6,9.6,-25.5,-15.4,-2.0],
+ [9.8,-4.8,18.0,18.2,2.0]))
+ signm(a, disp=False)
+ #XXX: what would be the correct result?
+
+ def test_defective3(self):
+ a = array([[-2., 25., 0., 0., 0., 0., 0.],
+ [0., -3., 10., 3., 3., 3., 0.],
+ [0., 0., 2., 15., 3., 3., 0.],
+ [0., 0., 0., 0., 15., 3., 0.],
+ [0., 0., 0., 0., 3., 10., 0.],
+ [0., 0., 0., 0., 0., -2., 25.],
+ [0., 0., 0., 0., 0., 0., -3.]])
+ signm(a, disp=False)
+ #XXX: what would be the correct result?
+
+
+class TestLogM:
+
+ def test_nils(self):
+ a = array([[-2., 25., 0., 0., 0., 0., 0.],
+ [0., -3., 10., 3., 3., 3., 0.],
+ [0., 0., 2., 15., 3., 3., 0.],
+ [0., 0., 0., 0., 15., 3., 0.],
+ [0., 0., 0., 0., 3., 10., 0.],
+ [0., 0., 0., 0., 0., -2., 25.],
+ [0., 0., 0., 0., 0., 0., -3.]])
+ m = (identity(7)*3.1+0j)-a
+ logm(m, disp=False)
+ #XXX: what would be the correct result?
+
+ def test_al_mohy_higham_2012_experiment_1_logm(self):
+ # The logm completes the round trip successfully.
+ # Note that the expm leg of the round trip is badly conditioned.
+ A = _get_al_mohy_higham_2012_experiment_1()
+ A_logm, info = logm(A, disp=False)
+ A_round_trip = expm(A_logm)
+ assert_allclose(A_round_trip, A, rtol=5e-5, atol=1e-14)
+
+ def test_al_mohy_higham_2012_experiment_1_funm_log(self):
+ # The raw funm with np.log does not complete the round trip.
+ # Note that the expm leg of the round trip is badly conditioned.
+ A = _get_al_mohy_higham_2012_experiment_1()
+ A_funm_log, info = funm(A, np.log, disp=False)
+ A_round_trip = expm(A_funm_log)
+ assert_(not np.allclose(A_round_trip, A, rtol=1e-5, atol=1e-14))
+
+ def test_round_trip_random_float(self):
+ np.random.seed(1234)
+ for n in range(1, 6):
+ M_unscaled = np.random.randn(n, n)
+ for scale in np.logspace(-4, 4, 9):
+ M = M_unscaled * scale
+
+ # Eigenvalues are related to the branch cut.
+ W = np.linalg.eigvals(M)
+ err_msg = f'M:{M} eivals:{W}'
+
+ # Check sqrtm round trip because it is used within logm.
+ M_sqrtm, info = sqrtm(M, disp=False)
+ M_sqrtm_round_trip = M_sqrtm.dot(M_sqrtm)
+ assert_allclose(M_sqrtm_round_trip, M)
+
+ # Check logm round trip.
+ M_logm, info = logm(M, disp=False)
+ M_logm_round_trip = expm(M_logm)
+ assert_allclose(M_logm_round_trip, M, err_msg=err_msg)
+
+ def test_round_trip_random_complex(self):
+ np.random.seed(1234)
+ for n in range(1, 6):
+ M_unscaled = np.random.randn(n, n) + 1j * np.random.randn(n, n)
+ for scale in np.logspace(-4, 4, 9):
+ M = M_unscaled * scale
+ M_logm, info = logm(M, disp=False)
+ M_round_trip = expm(M_logm)
+ assert_allclose(M_round_trip, M)
+
+ def test_logm_type_preservation_and_conversion(self):
+ # The logm matrix function should preserve the type of a matrix
+ # whose eigenvalues are positive with zero imaginary part.
+ # Test this preservation for variously structured matrices.
+ complex_dtype_chars = ('F', 'D', 'G')
+ for matrix_as_list in (
+ [[1, 0], [0, 1]],
+ [[1, 0], [1, 1]],
+ [[2, 1], [1, 1]],
+ [[2, 3], [1, 2]]):
+
+ # check that the spectrum has the expected properties
+ W = scipy.linalg.eigvals(matrix_as_list)
+ assert_(not any(w.imag or w.real < 0 for w in W))
+
+ # check float type preservation
+ A = np.array(matrix_as_list, dtype=float)
+ A_logm, info = logm(A, disp=False)
+ assert_(A_logm.dtype.char not in complex_dtype_chars)
+
+ # check complex type preservation
+ A = np.array(matrix_as_list, dtype=complex)
+ A_logm, info = logm(A, disp=False)
+ assert_(A_logm.dtype.char in complex_dtype_chars)
+
+ # check float->complex type conversion for the matrix negation
+ A = -np.array(matrix_as_list, dtype=float)
+ A_logm, info = logm(A, disp=False)
+ assert_(A_logm.dtype.char in complex_dtype_chars)
+
+ def test_complex_spectrum_real_logm(self):
+ # This matrix has complex eigenvalues and real logm.
+ # Its output dtype depends on its input dtype.
+ M = [[1, 1, 2], [2, 1, 1], [1, 2, 1]]
+ for dt in float, complex:
+ X = np.array(M, dtype=dt)
+ w = scipy.linalg.eigvals(X)
+ assert_(1e-2 < np.absolute(w.imag).sum())
+ Y, info = logm(X, disp=False)
+ assert_(np.issubdtype(Y.dtype, np.inexact))
+ assert_allclose(expm(Y), X)
+
+ def test_real_mixed_sign_spectrum(self):
+ # These matrices have real eigenvalues with mixed signs.
+ # The output logm dtype is complex, regardless of input dtype.
+ for M in (
+ [[1, 0], [0, -1]],
+ [[0, 1], [1, 0]]):
+ for dt in float, complex:
+ A = np.array(M, dtype=dt)
+ A_logm, info = logm(A, disp=False)
+ assert_(np.issubdtype(A_logm.dtype, np.complexfloating))
+
+ def test_exactly_singular(self):
+ A = np.array([[0, 0], [1j, 1j]])
+ B = np.asarray([[1, 1], [0, 0]])
+ for M in A, A.T, B, B.T:
+ expected_warning = _matfuncs_inv_ssq.LogmExactlySingularWarning
+ L, info = assert_warns(expected_warning, logm, M, disp=False)
+ E = expm(L)
+ assert_allclose(E, M, atol=1e-14)
+
+ def test_nearly_singular(self):
+ M = np.array([[1e-100]])
+ expected_warning = _matfuncs_inv_ssq.LogmNearlySingularWarning
+ L, info = assert_warns(expected_warning, logm, M, disp=False)
+ E = expm(L)
+ assert_allclose(E, M, atol=1e-14)
+
+ def test_opposite_sign_complex_eigenvalues(self):
+ # See gh-6113
+ E = [[0, 1], [-1, 0]]
+ L = [[0, np.pi*0.5], [-np.pi*0.5, 0]]
+ assert_allclose(expm(L), E, atol=1e-14)
+ assert_allclose(logm(E), L, atol=1e-14)
+ E = [[1j, 4], [0, -1j]]
+ L = [[1j*np.pi*0.5, 2*np.pi], [0, -1j*np.pi*0.5]]
+ assert_allclose(expm(L), E, atol=1e-14)
+ assert_allclose(logm(E), L, atol=1e-14)
+ E = [[1j, 0], [0, -1j]]
+ L = [[1j*np.pi*0.5, 0], [0, -1j*np.pi*0.5]]
+ assert_allclose(expm(L), E, atol=1e-14)
+ assert_allclose(logm(E), L, atol=1e-14)
+
+ def test_readonly(self):
+ n = 5
+ a = np.ones((n, n)) + np.identity(n)
+ a.flags.writeable = False
+ logm(a)
+
+
+class TestSqrtM:
+ def test_round_trip_random_float(self):
+ np.random.seed(1234)
+ for n in range(1, 6):
+ M_unscaled = np.random.randn(n, n)
+ for scale in np.logspace(-4, 4, 9):
+ M = M_unscaled * scale
+ M_sqrtm, info = sqrtm(M, disp=False)
+ M_sqrtm_round_trip = M_sqrtm.dot(M_sqrtm)
+ assert_allclose(M_sqrtm_round_trip, M)
+
+ def test_round_trip_random_complex(self):
+ np.random.seed(1234)
+ for n in range(1, 6):
+ M_unscaled = np.random.randn(n, n) + 1j * np.random.randn(n, n)
+ for scale in np.logspace(-4, 4, 9):
+ M = M_unscaled * scale
+ M_sqrtm, info = sqrtm(M, disp=False)
+ M_sqrtm_round_trip = M_sqrtm.dot(M_sqrtm)
+ assert_allclose(M_sqrtm_round_trip, M)
+
+ def test_bad(self):
+ # See https://web.archive.org/web/20051220232650/http://www.maths.man.ac.uk/~nareports/narep336.ps.gz
+ e = 2**-5
+ se = sqrt(e)
+ a = array([[1.0,0,0,1],
+ [0,e,0,0],
+ [0,0,e,0],
+ [0,0,0,1]])
+ sa = array([[1,0,0,0.5],
+ [0,se,0,0],
+ [0,0,se,0],
+ [0,0,0,1]])
+ n = a.shape[0]
+ assert_array_almost_equal(dot(sa,sa),a)
+ # Check default sqrtm.
+ esa = sqrtm(a, disp=False, blocksize=n)[0]
+ assert_array_almost_equal(dot(esa,esa),a)
+ # Check sqrtm with 2x2 blocks.
+ esa = sqrtm(a, disp=False, blocksize=2)[0]
+ assert_array_almost_equal(dot(esa,esa),a)
+
+ def test_sqrtm_type_preservation_and_conversion(self):
+ # The sqrtm matrix function should preserve the type of a matrix
+ # whose eigenvalues are nonnegative with zero imaginary part.
+ # Test this preservation for variously structured matrices.
+ complex_dtype_chars = ('F', 'D', 'G')
+ for matrix_as_list in (
+ [[1, 0], [0, 1]],
+ [[1, 0], [1, 1]],
+ [[2, 1], [1, 1]],
+ [[2, 3], [1, 2]],
+ [[1, 1], [1, 1]]):
+
+ # check that the spectrum has the expected properties
+ W = scipy.linalg.eigvals(matrix_as_list)
+ assert_(not any(w.imag or w.real < 0 for w in W))
+
+ # check float type preservation
+ A = np.array(matrix_as_list, dtype=float)
+ A_sqrtm, info = sqrtm(A, disp=False)
+ assert_(A_sqrtm.dtype.char not in complex_dtype_chars)
+
+ # check complex type preservation
+ A = np.array(matrix_as_list, dtype=complex)
+ A_sqrtm, info = sqrtm(A, disp=False)
+ assert_(A_sqrtm.dtype.char in complex_dtype_chars)
+
+ # check float->complex type conversion for the matrix negation
+ A = -np.array(matrix_as_list, dtype=float)
+ A_sqrtm, info = sqrtm(A, disp=False)
+ assert_(A_sqrtm.dtype.char in complex_dtype_chars)
+
+ def test_sqrtm_type_conversion_mixed_sign_or_complex_spectrum(self):
+ complex_dtype_chars = ('F', 'D', 'G')
+ for matrix_as_list in (
+ [[1, 0], [0, -1]],
+ [[0, 1], [1, 0]],
+ [[0, 1, 0], [0, 0, 1], [1, 0, 0]]):
+
+ # check that the spectrum has the expected properties
+ W = scipy.linalg.eigvals(matrix_as_list)
+ assert_(any(w.imag or w.real < 0 for w in W))
+
+ # check complex->complex
+ A = np.array(matrix_as_list, dtype=complex)
+ A_sqrtm, info = sqrtm(A, disp=False)
+ assert_(A_sqrtm.dtype.char in complex_dtype_chars)
+
+ # check float->complex
+ A = np.array(matrix_as_list, dtype=float)
+ A_sqrtm, info = sqrtm(A, disp=False)
+ assert_(A_sqrtm.dtype.char in complex_dtype_chars)
+
+ def test_blocksizes(self):
+ # Make sure I do not goof up the blocksizes when they do not divide n.
+ np.random.seed(1234)
+ for n in range(1, 8):
+ A = np.random.rand(n, n) + 1j*np.random.randn(n, n)
+ A_sqrtm_default, info = sqrtm(A, disp=False, blocksize=n)
+ assert_allclose(A, np.linalg.matrix_power(A_sqrtm_default, 2))
+ for blocksize in range(1, 10):
+ A_sqrtm_new, info = sqrtm(A, disp=False, blocksize=blocksize)
+ assert_allclose(A_sqrtm_default, A_sqrtm_new)
+
+ def test_al_mohy_higham_2012_experiment_1(self):
+ # Matrix square root of a tricky upper triangular matrix.
+ A = _get_al_mohy_higham_2012_experiment_1()
+ A_sqrtm, info = sqrtm(A, disp=False)
+ A_round_trip = A_sqrtm.dot(A_sqrtm)
+ assert_allclose(A_round_trip, A, rtol=1e-5)
+ assert_allclose(np.tril(A_round_trip), np.tril(A))
+
+ def test_strict_upper_triangular(self):
+ # This matrix has no square root.
+ for dt in int, float:
+ A = np.array([
+ [0, 3, 0, 0],
+ [0, 0, 3, 0],
+ [0, 0, 0, 3],
+ [0, 0, 0, 0]], dtype=dt)
+ A_sqrtm, info = sqrtm(A, disp=False)
+ assert_(np.isnan(A_sqrtm).all())
+
+ def test_weird_matrix(self):
+ # The square root of matrix B exists.
+ for dt in int, float:
+ A = np.array([
+ [0, 0, 1],
+ [0, 0, 0],
+ [0, 1, 0]], dtype=dt)
+ B = np.array([
+ [0, 1, 0],
+ [0, 0, 0],
+ [0, 0, 0]], dtype=dt)
+ assert_array_equal(B, A.dot(A))
+
+ # But scipy sqrtm is not clever enough to find it.
+ B_sqrtm, info = sqrtm(B, disp=False)
+ assert_(np.isnan(B_sqrtm).all())
+
+ def test_disp(self):
+ np.random.seed(1234)
+
+ A = np.random.rand(3, 3)
+ B = sqrtm(A, disp=True)
+ assert_allclose(B.dot(B), A)
+
+ def test_opposite_sign_complex_eigenvalues(self):
+ M = [[2j, 4], [0, -2j]]
+ R = [[1+1j, 2], [0, 1-1j]]
+ assert_allclose(np.dot(R, R), M, atol=1e-14)
+ assert_allclose(sqrtm(M), R, atol=1e-14)
+
+ def test_gh4866(self):
+ M = np.array([[1, 0, 0, 1],
+ [0, 0, 0, 0],
+ [0, 0, 0, 0],
+ [1, 0, 0, 1]])
+ R = np.array([[sqrt(0.5), 0, 0, sqrt(0.5)],
+ [0, 0, 0, 0],
+ [0, 0, 0, 0],
+ [sqrt(0.5), 0, 0, sqrt(0.5)]])
+ assert_allclose(np.dot(R, R), M, atol=1e-14)
+ assert_allclose(sqrtm(M), R, atol=1e-14)
+
+ def test_gh5336(self):
+ M = np.diag([2, 1, 0])
+ R = np.diag([sqrt(2), 1, 0])
+ assert_allclose(np.dot(R, R), M, atol=1e-14)
+ assert_allclose(sqrtm(M), R, atol=1e-14)
+
+ def test_gh7839(self):
+ M = np.zeros((2, 2))
+ R = np.zeros((2, 2))
+ assert_allclose(np.dot(R, R), M, atol=1e-14)
+ assert_allclose(sqrtm(M), R, atol=1e-14)
+
+ @pytest.mark.xfail(reason="failing on macOS after gh-20212")
+ def test_gh17918(self):
+ M = np.empty((19, 19))
+ M.fill(0.94)
+ np.fill_diagonal(M, 1)
+ assert np.isrealobj(sqrtm(M))
+
+ def test_data_size_preservation_uint_in_float_out(self):
+ M = np.zeros((10, 10), dtype=np.uint8)
+ # input bit size is 8, but minimum float bit size is 16
+ assert sqrtm(M).dtype == np.float16
+ M = np.zeros((10, 10), dtype=np.uint16)
+ assert sqrtm(M).dtype == np.float16
+ M = np.zeros((10, 10), dtype=np.uint32)
+ assert sqrtm(M).dtype == np.float32
+ M = np.zeros((10, 10), dtype=np.uint64)
+ assert sqrtm(M).dtype == np.float64
+
+ def test_data_size_preservation_int_in_float_out(self):
+ M = np.zeros((10, 10), dtype=np.int8)
+ # input bit size is 8, but minimum float bit size is 16
+ assert sqrtm(M).dtype == np.float16
+ M = np.zeros((10, 10), dtype=np.int16)
+ assert sqrtm(M).dtype == np.float16
+ M = np.zeros((10, 10), dtype=np.int32)
+ assert sqrtm(M).dtype == np.float32
+ M = np.zeros((10, 10), dtype=np.int64)
+ assert sqrtm(M).dtype == np.float64
+
+ def test_data_size_preservation_int_in_comp_out(self):
+ M = np.array([[2, 4], [0, -2]], dtype=np.int8)
+ # input bit size is 8, but minimum complex bit size is 64
+ assert sqrtm(M).dtype == np.complex64
+ M = np.array([[2, 4], [0, -2]], dtype=np.int16)
+ # input bit size is 16, but minimum complex bit size is 64
+ assert sqrtm(M).dtype == np.complex64
+ M = np.array([[2, 4], [0, -2]], dtype=np.int32)
+ assert sqrtm(M).dtype == np.complex64
+ M = np.array([[2, 4], [0, -2]], dtype=np.int64)
+ assert sqrtm(M).dtype == np.complex128
+
+ def test_data_size_preservation_float_in_float_out(self):
+ M = np.zeros((10, 10), dtype=np.float16)
+ assert sqrtm(M).dtype == np.float16
+ M = np.zeros((10, 10), dtype=np.float32)
+ assert sqrtm(M).dtype == np.float32
+ M = np.zeros((10, 10), dtype=np.float64)
+ assert sqrtm(M).dtype == np.float64
+ if hasattr(np, 'float128'):
+ M = np.zeros((10, 10), dtype=np.float128)
+ assert sqrtm(M).dtype == np.float128
+
+ def test_data_size_preservation_float_in_comp_out(self):
+ M = np.array([[2, 4], [0, -2]], dtype=np.float16)
+ # input bit size is 16, but minimum complex bit size is 64
+ assert sqrtm(M).dtype == np.complex64
+ M = np.array([[2, 4], [0, -2]], dtype=np.float32)
+ assert sqrtm(M).dtype == np.complex64
+ M = np.array([[2, 4], [0, -2]], dtype=np.float64)
+ assert sqrtm(M).dtype == np.complex128
+ if hasattr(np, 'float128') and hasattr(np, 'complex256'):
+ M = np.array([[2, 4], [0, -2]], dtype=np.float128)
+ assert sqrtm(M).dtype == np.complex256
+
+ def test_data_size_preservation_comp_in_comp_out(self):
+ M = np.array([[2j, 4], [0, -2j]], dtype=np.complex64)
+ assert sqrtm(M).dtype == np.complex128
+ if hasattr(np, 'complex256'):
+ M = np.array([[2j, 4], [0, -2j]], dtype=np.complex128)
+ assert sqrtm(M).dtype == np.complex256
+ M = np.array([[2j, 4], [0, -2j]], dtype=np.complex256)
+ assert sqrtm(M).dtype == np.complex256
+
+
+class TestFractionalMatrixPower:
+ def test_round_trip_random_complex(self):
+ np.random.seed(1234)
+ for p in range(1, 5):
+ for n in range(1, 5):
+ M_unscaled = np.random.randn(n, n) + 1j * np.random.randn(n, n)
+ for scale in np.logspace(-4, 4, 9):
+ M = M_unscaled * scale
+ M_root = fractional_matrix_power(M, 1/p)
+ M_round_trip = np.linalg.matrix_power(M_root, p)
+ assert_allclose(M_round_trip, M)
+
+ def test_round_trip_random_float(self):
+ # This test is more annoying because it can hit the branch cut;
+ # this happens when the matrix has an eigenvalue
+ # with no imaginary component and with a real negative component,
+ # and it means that the principal branch does not exist.
+ np.random.seed(1234)
+ for p in range(1, 5):
+ for n in range(1, 5):
+ M_unscaled = np.random.randn(n, n)
+ for scale in np.logspace(-4, 4, 9):
+ M = M_unscaled * scale
+ M_root = fractional_matrix_power(M, 1/p)
+ M_round_trip = np.linalg.matrix_power(M_root, p)
+ assert_allclose(M_round_trip, M)
+
+ def test_larger_abs_fractional_matrix_powers(self):
+ np.random.seed(1234)
+ for n in (2, 3, 5):
+ for i in range(10):
+ M = np.random.randn(n, n) + 1j * np.random.randn(n, n)
+ M_one_fifth = fractional_matrix_power(M, 0.2)
+ # Test the round trip.
+ M_round_trip = np.linalg.matrix_power(M_one_fifth, 5)
+ assert_allclose(M, M_round_trip)
+ # Test a large abs fractional power.
+ X = fractional_matrix_power(M, -5.4)
+ Y = np.linalg.matrix_power(M_one_fifth, -27)
+ assert_allclose(X, Y)
+ # Test another large abs fractional power.
+ X = fractional_matrix_power(M, 3.8)
+ Y = np.linalg.matrix_power(M_one_fifth, 19)
+ assert_allclose(X, Y)
+
+ def test_random_matrices_and_powers(self):
+ # Each independent iteration of this fuzz test picks random parameters.
+ # It tries to hit some edge cases.
+ np.random.seed(1234)
+ nsamples = 20
+ for i in range(nsamples):
+ # Sample a matrix size and a random real power.
+ n = random.randrange(1, 5)
+ p = np.random.randn()
+
+ # Sample a random real or complex matrix.
+ matrix_scale = np.exp(random.randrange(-4, 5))
+ A = np.random.randn(n, n)
+ if random.choice((True, False)):
+ A = A + 1j * np.random.randn(n, n)
+ A = A * matrix_scale
+
+ # Check a couple of analytically equivalent ways
+ # to compute the fractional matrix power.
+ # These can be compared because they both use the principal branch.
+ A_power = fractional_matrix_power(A, p)
+ A_logm, info = logm(A, disp=False)
+ A_power_expm_logm = expm(A_logm * p)
+ assert_allclose(A_power, A_power_expm_logm)
+
+ def test_al_mohy_higham_2012_experiment_1(self):
+ # Fractional powers of a tricky upper triangular matrix.
+ A = _get_al_mohy_higham_2012_experiment_1()
+
+ # Test remainder matrix power.
+ A_funm_sqrt, info = funm(A, np.sqrt, disp=False)
+ A_sqrtm, info = sqrtm(A, disp=False)
+ A_rem_power = _matfuncs_inv_ssq._remainder_matrix_power(A, 0.5)
+ A_power = fractional_matrix_power(A, 0.5)
+ assert_allclose(A_rem_power, A_power, rtol=1e-11)
+ assert_allclose(A_sqrtm, A_power)
+ assert_allclose(A_sqrtm, A_funm_sqrt)
+
+ # Test more fractional powers.
+ for p in (1/2, 5/3):
+ A_power = fractional_matrix_power(A, p)
+ A_round_trip = fractional_matrix_power(A_power, 1/p)
+ assert_allclose(A_round_trip, A, rtol=1e-2)
+ assert_allclose(np.tril(A_round_trip, 1), np.tril(A, 1))
+
+ def test_briggs_helper_function(self):
+ np.random.seed(1234)
+ for a in np.random.randn(10) + 1j * np.random.randn(10):
+ for k in range(5):
+ x_observed = _matfuncs_inv_ssq._briggs_helper_function(a, k)
+ x_expected = a ** np.exp2(-k) - 1
+ assert_allclose(x_observed, x_expected)
+
+ def test_type_preservation_and_conversion(self):
+ # The fractional_matrix_power matrix function should preserve
+ # the type of a matrix whose eigenvalues
+ # are positive with zero imaginary part.
+ # Test this preservation for variously structured matrices.
+ complex_dtype_chars = ('F', 'D', 'G')
+ for matrix_as_list in (
+ [[1, 0], [0, 1]],
+ [[1, 0], [1, 1]],
+ [[2, 1], [1, 1]],
+ [[2, 3], [1, 2]]):
+
+ # check that the spectrum has the expected properties
+ W = scipy.linalg.eigvals(matrix_as_list)
+ assert_(not any(w.imag or w.real < 0 for w in W))
+
+ # Check various positive and negative powers
+ # with absolute values bigger and smaller than 1.
+ for p in (-2.4, -0.9, 0.2, 3.3):
+
+ # check float type preservation
+ A = np.array(matrix_as_list, dtype=float)
+ A_power = fractional_matrix_power(A, p)
+ assert_(A_power.dtype.char not in complex_dtype_chars)
+
+ # check complex type preservation
+ A = np.array(matrix_as_list, dtype=complex)
+ A_power = fractional_matrix_power(A, p)
+ assert_(A_power.dtype.char in complex_dtype_chars)
+
+ # check float->complex for the matrix negation
+ A = -np.array(matrix_as_list, dtype=float)
+ A_power = fractional_matrix_power(A, p)
+ assert_(A_power.dtype.char in complex_dtype_chars)
+
+ def test_type_conversion_mixed_sign_or_complex_spectrum(self):
+ complex_dtype_chars = ('F', 'D', 'G')
+ for matrix_as_list in (
+ [[1, 0], [0, -1]],
+ [[0, 1], [1, 0]],
+ [[0, 1, 0], [0, 0, 1], [1, 0, 0]]):
+
+ # check that the spectrum has the expected properties
+ W = scipy.linalg.eigvals(matrix_as_list)
+ assert_(any(w.imag or w.real < 0 for w in W))
+
+ # Check various positive and negative powers
+ # with absolute values bigger and smaller than 1.
+ for p in (-2.4, -0.9, 0.2, 3.3):
+
+ # check complex->complex
+ A = np.array(matrix_as_list, dtype=complex)
+ A_power = fractional_matrix_power(A, p)
+ assert_(A_power.dtype.char in complex_dtype_chars)
+
+ # check float->complex
+ A = np.array(matrix_as_list, dtype=float)
+ A_power = fractional_matrix_power(A, p)
+ assert_(A_power.dtype.char in complex_dtype_chars)
+
+ @pytest.mark.xfail(reason='Too unstable across LAPACKs.')
+ def test_singular(self):
+ # Negative fractional powers do not work with singular matrices.
+ for matrix_as_list in (
+ [[0, 0], [0, 0]],
+ [[1, 1], [1, 1]],
+ [[1, 2], [3, 6]],
+ [[0, 0, 0], [0, 1, 1], [0, -1, 1]]):
+
+ # Check fractional powers both for float and for complex types.
+ for newtype in (float, complex):
+ A = np.array(matrix_as_list, dtype=newtype)
+ for p in (-0.7, -0.9, -2.4, -1.3):
+ A_power = fractional_matrix_power(A, p)
+ assert_(np.isnan(A_power).all())
+ for p in (0.2, 1.43):
+ A_power = fractional_matrix_power(A, p)
+ A_round_trip = fractional_matrix_power(A_power, 1/p)
+ assert_allclose(A_round_trip, A)
+
+ def test_opposite_sign_complex_eigenvalues(self):
+ M = [[2j, 4], [0, -2j]]
+ R = [[1+1j, 2], [0, 1-1j]]
+ assert_allclose(np.dot(R, R), M, atol=1e-14)
+ assert_allclose(fractional_matrix_power(M, 0.5), R, atol=1e-14)
+
+
+class TestExpM:
+ def test_zero(self):
+ a = array([[0.,0],[0,0]])
+ assert_array_almost_equal(expm(a),[[1,0],[0,1]])
+
+ def test_single_elt(self):
+ elt = expm(1)
+ assert_allclose(elt, np.array([[np.e]]))
+
+ def test_empty_matrix_input(self):
+ # handle gh-11082
+ A = np.zeros((0, 0))
+ result = expm(A)
+ assert result.size == 0
+
+ def test_2x2_input(self):
+ E = np.e
+ a = array([[1, 4], [1, 1]])
+ aa = (E**4 + 1)/(2*E)
+ bb = (E**4 - 1)/E
+ assert_allclose(expm(a), array([[aa, bb], [bb/4, aa]]))
+ assert expm(a.astype(np.complex64)).dtype.char == 'F'
+ assert expm(a.astype(np.float32)).dtype.char == 'f'
+
+ def test_nx2x2_input(self):
+ E = np.e
+ # These are integer matrices with integer eigenvalues
+ a = np.array([[[1, 4], [1, 1]],
+ [[1, 3], [1, -1]],
+ [[1, 3], [4, 5]],
+ [[1, 3], [5, 3]],
+ [[4, 5], [-3, -4]]], order='F')
+ # Exact results are computed symbolically
+ a_res = np.array([
+ [[(E**4+1)/(2*E), (E**4-1)/E],
+ [(E**4-1)/4/E, (E**4+1)/(2*E)]],
+ [[1/(4*E**2)+(3*E**2)/4, (3*E**2)/4-3/(4*E**2)],
+ [E**2/4-1/(4*E**2), 3/(4*E**2)+E**2/4]],
+ [[3/(4*E)+E**7/4, -3/(8*E)+(3*E**7)/8],
+ [-1/(2*E)+E**7/2, 1/(4*E)+(3*E**7)/4]],
+ [[5/(8*E**2)+(3*E**6)/8, -3/(8*E**2)+(3*E**6)/8],
+ [-5/(8*E**2)+(5*E**6)/8, 3/(8*E**2)+(5*E**6)/8]],
+ [[-3/(2*E)+(5*E)/2, -5/(2*E)+(5*E)/2],
+ [3/(2*E)-(3*E)/2, 5/(2*E)-(3*E)/2]]
+ ])
+ assert_allclose(expm(a), a_res)
+
+ def test_readonly(self):
+ n = 7
+ a = np.ones((n, n))
+ a.flags.writeable = False
+ expm(a)
+
+ def test_gh18086(self):
+ A = np.zeros((400, 400), dtype=float)
+ rng = np.random.default_rng(100)
+ i = rng.integers(0, 399, 500)
+ j = rng.integers(0, 399, 500)
+ A[i, j] = rng.random(500)
+ # Problem appears when m = 9
+ Am = np.empty((5, 400, 400), dtype=float)
+ Am[0] = A.copy()
+ m, s = pick_pade_structure(Am)
+ assert m == 9
+ # Check that result is accurate
+ first_res = expm(A)
+ np.testing.assert_array_almost_equal(logm(first_res), A)
+ # Check that result is consistent
+ for i in range(5):
+ next_res = expm(A)
+ np.testing.assert_array_almost_equal(first_res, next_res)
+
+
+class TestExpmFrechet:
+
+ def test_expm_frechet(self):
+ # a test of the basic functionality
+ M = np.array([
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [0, 0, 1, 2],
+ [0, 0, 5, 6],
+ ], dtype=float)
+ A = np.array([
+ [1, 2],
+ [5, 6],
+ ], dtype=float)
+ E = np.array([
+ [3, 4],
+ [7, 8],
+ ], dtype=float)
+ expected_expm = scipy.linalg.expm(A)
+ expected_frechet = scipy.linalg.expm(M)[:2, 2:]
+ for kwargs in ({}, {'method':'SPS'}, {'method':'blockEnlarge'}):
+ observed_expm, observed_frechet = expm_frechet(A, E, **kwargs)
+ assert_allclose(expected_expm, observed_expm)
+ assert_allclose(expected_frechet, observed_frechet)
+
+ def test_small_norm_expm_frechet(self):
+ # methodically test matrices with a range of norms, for better coverage
+ M_original = np.array([
+ [1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [0, 0, 1, 2],
+ [0, 0, 5, 6],
+ ], dtype=float)
+ A_original = np.array([
+ [1, 2],
+ [5, 6],
+ ], dtype=float)
+ E_original = np.array([
+ [3, 4],
+ [7, 8],
+ ], dtype=float)
+ A_original_norm_1 = scipy.linalg.norm(A_original, 1)
+ selected_m_list = [1, 3, 5, 7, 9, 11, 13, 15]
+ m_neighbor_pairs = zip(selected_m_list[:-1], selected_m_list[1:])
+ for ma, mb in m_neighbor_pairs:
+ ell_a = scipy.linalg._expm_frechet.ell_table_61[ma]
+ ell_b = scipy.linalg._expm_frechet.ell_table_61[mb]
+ target_norm_1 = 0.5 * (ell_a + ell_b)
+ scale = target_norm_1 / A_original_norm_1
+ M = scale * M_original
+ A = scale * A_original
+ E = scale * E_original
+ expected_expm = scipy.linalg.expm(A)
+ expected_frechet = scipy.linalg.expm(M)[:2, 2:]
+ observed_expm, observed_frechet = expm_frechet(A, E)
+ assert_allclose(expected_expm, observed_expm)
+ assert_allclose(expected_frechet, observed_frechet)
+
+ def test_fuzz(self):
+ # try a bunch of crazy inputs
+ rfuncs = (
+ np.random.uniform,
+ np.random.normal,
+ np.random.standard_cauchy,
+ np.random.exponential)
+ ntests = 100
+ for i in range(ntests):
+ rfunc = random.choice(rfuncs)
+ target_norm_1 = random.expovariate(1.0)
+ n = random.randrange(2, 16)
+ A_original = rfunc(size=(n,n))
+ E_original = rfunc(size=(n,n))
+ A_original_norm_1 = scipy.linalg.norm(A_original, 1)
+ scale = target_norm_1 / A_original_norm_1
+ A = scale * A_original
+ E = scale * E_original
+ M = np.vstack([
+ np.hstack([A, E]),
+ np.hstack([np.zeros_like(A), A])])
+ expected_expm = scipy.linalg.expm(A)
+ expected_frechet = scipy.linalg.expm(M)[:n, n:]
+ observed_expm, observed_frechet = expm_frechet(A, E)
+ assert_allclose(expected_expm, observed_expm, atol=5e-8)
+ assert_allclose(expected_frechet, observed_frechet, atol=1e-7)
+
+ def test_problematic_matrix(self):
+ # this test case uncovered a bug which has since been fixed
+ A = np.array([
+ [1.50591997, 1.93537998],
+ [0.41203263, 0.23443516],
+ ], dtype=float)
+ E = np.array([
+ [1.87864034, 2.07055038],
+ [1.34102727, 0.67341123],
+ ], dtype=float)
+ scipy.linalg.norm(A, 1)
+ sps_expm, sps_frechet = expm_frechet(
+ A, E, method='SPS')
+ blockEnlarge_expm, blockEnlarge_frechet = expm_frechet(
+ A, E, method='blockEnlarge')
+ assert_allclose(sps_expm, blockEnlarge_expm)
+ assert_allclose(sps_frechet, blockEnlarge_frechet)
+
+ @pytest.mark.slow
+ @pytest.mark.skip(reason='this test is deliberately slow')
+ def test_medium_matrix(self):
+ # profile this to see the speed difference
+ n = 1000
+ A = np.random.exponential(size=(n, n))
+ E = np.random.exponential(size=(n, n))
+ sps_expm, sps_frechet = expm_frechet(
+ A, E, method='SPS')
+ blockEnlarge_expm, blockEnlarge_frechet = expm_frechet(
+ A, E, method='blockEnlarge')
+ assert_allclose(sps_expm, blockEnlarge_expm)
+ assert_allclose(sps_frechet, blockEnlarge_frechet)
+
+
+def _help_expm_cond_search(A, A_norm, X, X_norm, eps, p):
+ p = np.reshape(p, A.shape)
+ p_norm = norm(p)
+ perturbation = eps * p * (A_norm / p_norm)
+ X_prime = expm(A + perturbation)
+ scaled_relative_error = norm(X_prime - X) / (X_norm * eps)
+ return -scaled_relative_error
+
+
+def _normalized_like(A, B):
+ return A * (scipy.linalg.norm(B) / scipy.linalg.norm(A))
+
+
+def _relative_error(f, A, perturbation):
+ X = f(A)
+ X_prime = f(A + perturbation)
+ return norm(X_prime - X) / norm(X)
+
+
+class TestExpmConditionNumber:
+ def test_expm_cond_smoke(self):
+ np.random.seed(1234)
+ for n in range(1, 4):
+ A = np.random.randn(n, n)
+ kappa = expm_cond(A)
+ assert_array_less(0, kappa)
+
+ def test_expm_bad_condition_number(self):
+ A = np.array([
+ [-1.128679820, 9.614183771e4, -4.524855739e9, 2.924969411e14],
+ [0, -1.201010529, 9.634696872e4, -4.681048289e9],
+ [0, 0, -1.132893222, 9.532491830e4],
+ [0, 0, 0, -1.179475332],
+ ])
+ kappa = expm_cond(A)
+ assert_array_less(1e36, kappa)
+
+ def test_univariate(self):
+ np.random.seed(12345)
+ for x in np.linspace(-5, 5, num=11):
+ A = np.array([[x]])
+ assert_allclose(expm_cond(A), abs(x))
+ for x in np.logspace(-2, 2, num=11):
+ A = np.array([[x]])
+ assert_allclose(expm_cond(A), abs(x))
+ for i in range(10):
+ A = np.random.randn(1, 1)
+ assert_allclose(expm_cond(A), np.absolute(A)[0, 0])
+
+ @pytest.mark.slow
+ def test_expm_cond_fuzz(self):
+ np.random.seed(12345)
+ eps = 1e-5
+ nsamples = 10
+ for i in range(nsamples):
+ n = np.random.randint(2, 5)
+ A = np.random.randn(n, n)
+ A_norm = scipy.linalg.norm(A)
+ X = expm(A)
+ X_norm = scipy.linalg.norm(X)
+ kappa = expm_cond(A)
+
+ # Look for the small perturbation that gives the greatest
+ # relative error.
+ f = functools.partial(_help_expm_cond_search,
+ A, A_norm, X, X_norm, eps)
+ guess = np.ones(n*n)
+ out = minimize(f, guess, method='L-BFGS-B')
+ xopt = out.x
+ yopt = f(xopt)
+ p_best = eps * _normalized_like(np.reshape(xopt, A.shape), A)
+ p_best_relerr = _relative_error(expm, A, p_best)
+ assert_allclose(p_best_relerr, -yopt * eps)
+
+ # Check that the identified perturbation indeed gives greater
+ # relative error than random perturbations with similar norms.
+ for j in range(5):
+ p_rand = eps * _normalized_like(np.random.randn(*A.shape), A)
+ assert_allclose(norm(p_best), norm(p_rand))
+ p_rand_relerr = _relative_error(expm, A, p_rand)
+ assert_array_less(p_rand_relerr, p_best_relerr)
+
+ # The greatest relative error should not be much greater than
+ # eps times the condition number kappa.
+ # In the limit as eps approaches zero it should never be greater.
+ assert_array_less(p_best_relerr, (1 + 2*eps) * eps * kappa)
+
+
+class TestKhatriRao:
+
+ def test_basic(self):
+ a = khatri_rao(array([[1, 2], [3, 4]]),
+ array([[5, 6], [7, 8]]))
+
+ assert_array_equal(a, array([[5, 12],
+ [7, 16],
+ [15, 24],
+ [21, 32]]))
+
+ b = khatri_rao(np.empty([2, 2]), np.empty([2, 2]))
+ assert_array_equal(b.shape, (4, 2))
+
+ def test_number_of_columns_equality(self):
+ with pytest.raises(ValueError):
+ a = array([[1, 2, 3],
+ [4, 5, 6]])
+ b = array([[1, 2],
+ [3, 4]])
+ khatri_rao(a, b)
+
+ def test_to_assure_2d_array(self):
+ with pytest.raises(ValueError):
+ # both arrays are 1-D
+ a = array([1, 2, 3])
+ b = array([4, 5, 6])
+ khatri_rao(a, b)
+
+ with pytest.raises(ValueError):
+ # first array is 1-D
+ a = array([1, 2, 3])
+ b = array([
+ [1, 2, 3],
+ [4, 5, 6]
+ ])
+ khatri_rao(a, b)
+
+ with pytest.raises(ValueError):
+ # second array is 1-D
+ a = array([
+ [1, 2, 3],
+ [7, 8, 9]
+ ])
+ b = array([4, 5, 6])
+ khatri_rao(a, b)
+
+ def test_equality_of_two_equations(self):
+ a = array([[1, 2], [3, 4]])
+ b = array([[5, 6], [7, 8]])
+
+ res1 = khatri_rao(a, b)
+ res2 = np.vstack([np.kron(a[:, k], b[:, k])
+ for k in range(b.shape[1])]).T
+
+ assert_array_equal(res1, res2)
diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_matmul_toeplitz.py b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_matmul_toeplitz.py
new file mode 100644
index 0000000000000000000000000000000000000000..b480e9d398ba29f95739eb8d497967aca6ec6cb3
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_matmul_toeplitz.py
@@ -0,0 +1,125 @@
+"""Test functions for linalg.matmul_toeplitz function
+"""
+
+import numpy as np
+from scipy.linalg import toeplitz, matmul_toeplitz
+
+from pytest import raises as assert_raises
+from numpy.testing import assert_allclose
+
+
+class TestMatmulToeplitz:
+
+ def setup_method(self):
+ self.rng = np.random.RandomState(42)
+ self.tolerance = 1.5e-13
+
+ def test_real(self):
+ cases = []
+
+ n = 1
+ c = self.rng.normal(size=n)
+ r = self.rng.normal(size=n)
+ x = self.rng.normal(size=(n, 1))
+ cases.append((x, c, r, False))
+
+ n = 2
+ c = self.rng.normal(size=n)
+ r = self.rng.normal(size=n)
+ x = self.rng.normal(size=(n, 1))
+ cases.append((x, c, r, False))
+
+ n = 101
+ c = self.rng.normal(size=n)
+ r = self.rng.normal(size=n)
+ x = self.rng.normal(size=(n, 1))
+ cases.append((x, c, r, True))
+
+ n = 1000
+ c = self.rng.normal(size=n)
+ r = self.rng.normal(size=n)
+ x = self.rng.normal(size=(n, 1))
+ cases.append((x, c, r, False))
+
+ n = 100
+ c = self.rng.normal(size=n)
+ r = self.rng.normal(size=n)
+ x = self.rng.normal(size=(n, self.rng.randint(1, 10)))
+ cases.append((x, c, r, False))
+
+ n = 100
+ c = self.rng.normal(size=(n, 1))
+ r = self.rng.normal(size=(n, 1))
+ x = self.rng.normal(size=(n, self.rng.randint(1, 10)))
+ cases.append((x, c, r, True))
+
+ n = 100
+ c = self.rng.normal(size=(n, 1))
+ r = None
+ x = self.rng.normal(size=(n, self.rng.randint(1, 10)))
+ cases.append((x, c, r, True, -1))
+
+ n = 100
+ c = self.rng.normal(size=(n, 1))
+ r = None
+ x = self.rng.normal(size=n)
+ cases.append((x, c, r, False))
+
+ n = 101
+ c = self.rng.normal(size=n)
+ r = self.rng.normal(size=n-27)
+ x = self.rng.normal(size=(n-27, 1))
+ cases.append((x, c, r, True))
+
+ n = 100
+ c = self.rng.normal(size=n)
+ r = self.rng.normal(size=n//4)
+ x = self.rng.normal(size=(n//4, self.rng.randint(1, 10)))
+ cases.append((x, c, r, True))
+
+ [self.do(*i) for i in cases]
+
+ def test_complex(self):
+ n = 127
+ c = self.rng.normal(size=(n, 1)) + self.rng.normal(size=(n, 1))*1j
+ r = self.rng.normal(size=(n, 1)) + self.rng.normal(size=(n, 1))*1j
+ x = self.rng.normal(size=(n, 3)) + self.rng.normal(size=(n, 3))*1j
+ self.do(x, c, r, False)
+
+ n = 100
+ c = self.rng.normal(size=(n, 1)) + self.rng.normal(size=(n, 1))*1j
+ r = self.rng.normal(size=(n//2, 1)) +\
+ self.rng.normal(size=(n//2, 1))*1j
+ x = self.rng.normal(size=(n//2, 3)) +\
+ self.rng.normal(size=(n//2, 3))*1j
+ self.do(x, c, r, False)
+
+ def test_exceptions(self):
+
+ n = 100
+ c = self.rng.normal(size=n)
+ r = self.rng.normal(size=2*n)
+ x = self.rng.normal(size=n)
+ assert_raises(ValueError, matmul_toeplitz, (c, r), x, True)
+
+ n = 100
+ c = self.rng.normal(size=n)
+ r = self.rng.normal(size=n)
+ x = self.rng.normal(size=n-1)
+ assert_raises(ValueError, matmul_toeplitz, (c, r), x, True)
+
+ n = 100
+ c = self.rng.normal(size=n)
+ r = self.rng.normal(size=n//2)
+ x = self.rng.normal(size=n//2-1)
+ assert_raises(ValueError, matmul_toeplitz, (c, r), x, True)
+
+ # For toeplitz matrices, matmul_toeplitz() should be equivalent to @.
+ def do(self, x, c, r=None, check_finite=False, workers=None):
+ if r is None:
+ actual = matmul_toeplitz(c, x, check_finite, workers)
+ else:
+ actual = matmul_toeplitz((c, r), x, check_finite)
+ desired = toeplitz(c, r) @ x
+ assert_allclose(actual, desired,
+ rtol=self.tolerance, atol=self.tolerance)
diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_misc.py b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_misc.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c10923e088b796764e3cc01bbda7da8351a85bf
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_misc.py
@@ -0,0 +1,5 @@
+from scipy.linalg import norm
+
+
+def test_norm():
+ assert norm([]) == 0.0
diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_procrustes.py b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_procrustes.py
new file mode 100644
index 0000000000000000000000000000000000000000..f41fd0e254c7d301d8a2612a6c9d67a26e2c302c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_procrustes.py
@@ -0,0 +1,191 @@
+from itertools import product, permutations
+
+import numpy as np
+from numpy.testing import assert_array_less, assert_allclose
+from pytest import raises as assert_raises
+
+from scipy.linalg import inv, eigh, norm
+from scipy.linalg import orthogonal_procrustes
+from scipy.sparse._sputils import matrix
+
+
+def test_orthogonal_procrustes_ndim_too_large():
+ np.random.seed(1234)
+ A = np.random.randn(3, 4, 5)
+ B = np.random.randn(3, 4, 5)
+ assert_raises(ValueError, orthogonal_procrustes, A, B)
+
+
+def test_orthogonal_procrustes_ndim_too_small():
+ np.random.seed(1234)
+ A = np.random.randn(3)
+ B = np.random.randn(3)
+ assert_raises(ValueError, orthogonal_procrustes, A, B)
+
+
+def test_orthogonal_procrustes_shape_mismatch():
+ np.random.seed(1234)
+ shapes = ((3, 3), (3, 4), (4, 3), (4, 4))
+ for a, b in permutations(shapes, 2):
+ A = np.random.randn(*a)
+ B = np.random.randn(*b)
+ assert_raises(ValueError, orthogonal_procrustes, A, B)
+
+
+def test_orthogonal_procrustes_checkfinite_exception():
+ np.random.seed(1234)
+ m, n = 2, 3
+ A_good = np.random.randn(m, n)
+ B_good = np.random.randn(m, n)
+ for bad_value in np.inf, -np.inf, np.nan:
+ A_bad = A_good.copy()
+ A_bad[1, 2] = bad_value
+ B_bad = B_good.copy()
+ B_bad[1, 2] = bad_value
+ for A, B in ((A_good, B_bad), (A_bad, B_good), (A_bad, B_bad)):
+ assert_raises(ValueError, orthogonal_procrustes, A, B)
+
+
+def test_orthogonal_procrustes_scale_invariance():
+ np.random.seed(1234)
+ m, n = 4, 3
+ for i in range(3):
+ A_orig = np.random.randn(m, n)
+ B_orig = np.random.randn(m, n)
+ R_orig, s = orthogonal_procrustes(A_orig, B_orig)
+ for A_scale in np.square(np.random.randn(3)):
+ for B_scale in np.square(np.random.randn(3)):
+ R, s = orthogonal_procrustes(A_orig * A_scale, B_orig * B_scale)
+ assert_allclose(R, R_orig)
+
+
+def test_orthogonal_procrustes_array_conversion():
+ np.random.seed(1234)
+ for m, n in ((6, 4), (4, 4), (4, 6)):
+ A_arr = np.random.randn(m, n)
+ B_arr = np.random.randn(m, n)
+ As = (A_arr, A_arr.tolist(), matrix(A_arr))
+ Bs = (B_arr, B_arr.tolist(), matrix(B_arr))
+ R_arr, s = orthogonal_procrustes(A_arr, B_arr)
+ AR_arr = A_arr.dot(R_arr)
+ for A, B in product(As, Bs):
+ R, s = orthogonal_procrustes(A, B)
+ AR = A_arr.dot(R)
+ assert_allclose(AR, AR_arr)
+
+
+def test_orthogonal_procrustes():
+ np.random.seed(1234)
+ for m, n in ((6, 4), (4, 4), (4, 6)):
+ # Sample a random target matrix.
+ B = np.random.randn(m, n)
+ # Sample a random orthogonal matrix
+ # by computing eigh of a sampled symmetric matrix.
+ X = np.random.randn(n, n)
+ w, V = eigh(X.T + X)
+ assert_allclose(inv(V), V.T)
+ # Compute a matrix with a known orthogonal transformation that gives B.
+ A = np.dot(B, V.T)
+ # Check that an orthogonal transformation from A to B can be recovered.
+ R, s = orthogonal_procrustes(A, B)
+ assert_allclose(inv(R), R.T)
+ assert_allclose(A.dot(R), B)
+ # Create a perturbed input matrix.
+ A_perturbed = A + 1e-2 * np.random.randn(m, n)
+ # Check that the orthogonal procrustes function can find an orthogonal
+ # transformation that is better than the orthogonal transformation
+ # computed from the original input matrix.
+ R_prime, s = orthogonal_procrustes(A_perturbed, B)
+ assert_allclose(inv(R_prime), R_prime.T)
+ # Compute the naive and optimal transformations of the perturbed input.
+ naive_approx = A_perturbed.dot(R)
+ optim_approx = A_perturbed.dot(R_prime)
+ # Compute the Frobenius norm errors of the matrix approximations.
+ naive_approx_error = norm(naive_approx - B, ord='fro')
+ optim_approx_error = norm(optim_approx - B, ord='fro')
+ # Check that the orthogonal Procrustes approximation is better.
+ assert_array_less(optim_approx_error, naive_approx_error)
+
+
+def _centered(A):
+ mu = A.mean(axis=0)
+ return A - mu, mu
+
+
+def test_orthogonal_procrustes_exact_example():
+ # Check a small application.
+ # It uses translation, scaling, reflection, and rotation.
+ #
+ # |
+ # a b |
+ # |
+ # d c | w
+ # |
+ # --------+--- x ----- z ---
+ # |
+ # | y
+ # |
+ #
+ A_orig = np.array([[-3, 3], [-2, 3], [-2, 2], [-3, 2]], dtype=float)
+ B_orig = np.array([[3, 2], [1, 0], [3, -2], [5, 0]], dtype=float)
+ A, A_mu = _centered(A_orig)
+ B, B_mu = _centered(B_orig)
+ R, s = orthogonal_procrustes(A, B)
+ scale = s / np.square(norm(A))
+ B_approx = scale * np.dot(A, R) + B_mu
+ assert_allclose(B_approx, B_orig, atol=1e-8)
+
+
+def test_orthogonal_procrustes_stretched_example():
+ # Try again with a target with a stretched y axis.
+ A_orig = np.array([[-3, 3], [-2, 3], [-2, 2], [-3, 2]], dtype=float)
+ B_orig = np.array([[3, 40], [1, 0], [3, -40], [5, 0]], dtype=float)
+ A, A_mu = _centered(A_orig)
+ B, B_mu = _centered(B_orig)
+ R, s = orthogonal_procrustes(A, B)
+ scale = s / np.square(norm(A))
+ B_approx = scale * np.dot(A, R) + B_mu
+ expected = np.array([[3, 21], [-18, 0], [3, -21], [24, 0]], dtype=float)
+ assert_allclose(B_approx, expected, atol=1e-8)
+ # Check disparity symmetry.
+ expected_disparity = 0.4501246882793018
+ AB_disparity = np.square(norm(B_approx - B_orig) / norm(B))
+ assert_allclose(AB_disparity, expected_disparity)
+ R, s = orthogonal_procrustes(B, A)
+ scale = s / np.square(norm(B))
+ A_approx = scale * np.dot(B, R) + A_mu
+ BA_disparity = np.square(norm(A_approx - A_orig) / norm(A))
+ assert_allclose(BA_disparity, expected_disparity)
+
+
+def test_orthogonal_procrustes_skbio_example():
+ # This transformation is also exact.
+ # It uses translation, scaling, and reflection.
+ #
+ # |
+ # | a
+ # | b
+ # | c d
+ # --+---------
+ # |
+ # | w
+ # |
+ # | x
+ # |
+ # | z y
+ # |
+ #
+ A_orig = np.array([[4, -2], [4, -4], [4, -6], [2, -6]], dtype=float)
+ B_orig = np.array([[1, 3], [1, 2], [1, 1], [2, 1]], dtype=float)
+ B_standardized = np.array([
+ [-0.13363062, 0.6681531],
+ [-0.13363062, 0.13363062],
+ [-0.13363062, -0.40089186],
+ [0.40089186, -0.40089186]])
+ A, A_mu = _centered(A_orig)
+ B, B_mu = _centered(B_orig)
+ R, s = orthogonal_procrustes(A, B)
+ scale = s / np.square(norm(A))
+ B_approx = scale * np.dot(A, R) + B_mu
+ assert_allclose(B_approx, B_orig)
+ assert_allclose(B / norm(B), B_standardized)
diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_sketches.py b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_sketches.py
new file mode 100644
index 0000000000000000000000000000000000000000..f4515e2dbfd1c7955487489a2324cd17b31d6726
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_sketches.py
@@ -0,0 +1,118 @@
+"""Tests for _sketches.py."""
+
+import numpy as np
+from numpy.testing import assert_, assert_equal
+from scipy.linalg import clarkson_woodruff_transform
+from scipy.linalg._sketches import cwt_matrix
+from scipy.sparse import issparse, rand
+from scipy.sparse.linalg import norm
+
+
+class TestClarksonWoodruffTransform:
+ """
+ Testing the Clarkson Woodruff Transform
+ """
+ # set seed for generating test matrices
+ rng = np.random.RandomState(seed=1179103485)
+
+ # Test matrix parameters
+ n_rows = 2000
+ n_cols = 100
+ density = 0.1
+
+ # Sketch matrix dimensions
+ n_sketch_rows = 200
+
+ # Seeds to test with
+ seeds = [1755490010, 934377150, 1391612830, 1752708722, 2008891431,
+ 1302443994, 1521083269, 1501189312, 1126232505, 1533465685]
+
+ A_dense = rng.randn(n_rows, n_cols)
+ A_csc = rand(
+ n_rows, n_cols, density=density, format='csc', random_state=rng,
+ )
+ A_csr = rand(
+ n_rows, n_cols, density=density, format='csr', random_state=rng,
+ )
+ A_coo = rand(
+ n_rows, n_cols, density=density, format='coo', random_state=rng,
+ )
+
+ # Collect the test matrices
+ test_matrices = [
+ A_dense, A_csc, A_csr, A_coo,
+ ]
+
+ # Test vector with norm ~1
+ x = rng.randn(n_rows, 1) / np.sqrt(n_rows)
+
+ def test_sketch_dimensions(self):
+ for A in self.test_matrices:
+ for seed in self.seeds:
+ sketch = clarkson_woodruff_transform(
+ A, self.n_sketch_rows, seed=seed
+ )
+ assert_(sketch.shape == (self.n_sketch_rows, self.n_cols))
+
+ def test_seed_returns_identical_transform_matrix(self):
+ for A in self.test_matrices:
+ for seed in self.seeds:
+ S1 = cwt_matrix(
+ self.n_sketch_rows, self.n_rows, seed=seed
+ ).toarray()
+ S2 = cwt_matrix(
+ self.n_sketch_rows, self.n_rows, seed=seed
+ ).toarray()
+ assert_equal(S1, S2)
+
+ def test_seed_returns_identically(self):
+ for A in self.test_matrices:
+ for seed in self.seeds:
+ sketch1 = clarkson_woodruff_transform(
+ A, self.n_sketch_rows, seed=seed
+ )
+ sketch2 = clarkson_woodruff_transform(
+ A, self.n_sketch_rows, seed=seed
+ )
+ if issparse(sketch1):
+ sketch1 = sketch1.toarray()
+ if issparse(sketch2):
+ sketch2 = sketch2.toarray()
+ assert_equal(sketch1, sketch2)
+
+ def test_sketch_preserves_frobenius_norm(self):
+ # Given the probabilistic nature of the sketches
+ # we run the test multiple times and check that
+ # we pass all/almost all the tries.
+ n_errors = 0
+ for A in self.test_matrices:
+ if issparse(A):
+ true_norm = norm(A)
+ else:
+ true_norm = np.linalg.norm(A)
+ for seed in self.seeds:
+ sketch = clarkson_woodruff_transform(
+ A, self.n_sketch_rows, seed=seed,
+ )
+ if issparse(sketch):
+ sketch_norm = norm(sketch)
+ else:
+ sketch_norm = np.linalg.norm(sketch)
+
+ if np.abs(true_norm - sketch_norm) > 0.1 * true_norm:
+ n_errors += 1
+ assert_(n_errors == 0)
+
+ def test_sketch_preserves_vector_norm(self):
+ n_errors = 0
+ n_sketch_rows = int(np.ceil(2. / (0.01 * 0.5**2)))
+ true_norm = np.linalg.norm(self.x)
+ for seed in self.seeds:
+ sketch = clarkson_woodruff_transform(
+ self.x, n_sketch_rows, seed=seed,
+ )
+ sketch_norm = np.linalg.norm(sketch)
+
+ if np.abs(true_norm - sketch_norm) > 0.5 * true_norm:
+ n_errors += 1
+ assert_(n_errors == 0)
diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_solve_toeplitz.py b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_solve_toeplitz.py
new file mode 100644
index 0000000000000000000000000000000000000000..ecced19e2d397d5ed0754d9e4f4edf1125156922
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_solve_toeplitz.py
@@ -0,0 +1,121 @@
+"""Test functions for linalg._solve_toeplitz module
+"""
+import numpy as np
+from scipy.linalg._solve_toeplitz import levinson
+from scipy.linalg import solve, toeplitz, solve_toeplitz
+from numpy.testing import assert_equal, assert_allclose
+
+import pytest
+from pytest import raises as assert_raises
+
+
+def test_solve_equivalence():
+ # For toeplitz matrices, solve_toeplitz() should be equivalent to solve().
+ random = np.random.RandomState(1234)
+ for n in (1, 2, 3, 10):
+ c = random.randn(n)
+ if random.rand() < 0.5:
+ c = c + 1j * random.randn(n)
+ r = random.randn(n)
+ if random.rand() < 0.5:
+ r = r + 1j * random.randn(n)
+ y = random.randn(n)
+ if random.rand() < 0.5:
+ y = y + 1j * random.randn(n)
+
+ # Check equivalence when both the column and row are provided.
+ actual = solve_toeplitz((c,r), y)
+ desired = solve(toeplitz(c, r=r), y)
+ assert_allclose(actual, desired)
+
+ # Check equivalence when the column is provided but not the row.
+ actual = solve_toeplitz(c, b=y)
+ desired = solve(toeplitz(c), y)
+ assert_allclose(actual, desired)
+
+
+def test_multiple_rhs():
+ random = np.random.RandomState(1234)
+ c = random.randn(4)
+ r = random.randn(4)
+ for offset in [0, 1j]:
+ for yshape in ((4,), (4, 3), (4, 3, 2)):
+ y = random.randn(*yshape) + offset
+ actual = solve_toeplitz((c,r), b=y)
+ desired = solve(toeplitz(c, r=r), y)
+ assert_equal(actual.shape, yshape)
+ assert_equal(desired.shape, yshape)
+ assert_allclose(actual, desired)
+
+
+def test_native_list_arguments():
+ c = [1,2,4,7]
+ r = [1,3,9,12]
+ y = [5,1,4,2]
+ actual = solve_toeplitz((c,r), y)
+ desired = solve(toeplitz(c, r=r), y)
+ assert_allclose(actual, desired)
+
+
+def test_zero_diag_error():
+ # The Levinson-Durbin implementation fails when the diagonal is zero.
+ random = np.random.RandomState(1234)
+ n = 4
+ c = random.randn(n)
+ r = random.randn(n)
+ y = random.randn(n)
+ c[0] = 0
+ assert_raises(np.linalg.LinAlgError,
+ solve_toeplitz, (c, r), b=y)
+
+
+def test_wikipedia_counterexample():
+ # The Levinson-Durbin implementation also fails in other cases.
+ # This example is from the talk page of the wikipedia article.
+ random = np.random.RandomState(1234)
+ c = [2, 2, 1]
+ y = random.randn(3)
+ assert_raises(np.linalg.LinAlgError, solve_toeplitz, c, b=y)
+
+
+def test_reflection_coeffs():
+ # check that the partial solutions are given by the reflection
+ # coefficients
+
+ random = np.random.RandomState(1234)
+ y_d = random.randn(10)
+ y_z = random.randn(10) + 1j
+ reflection_coeffs_d = [1]
+ reflection_coeffs_z = [1]
+ for i in range(2, 10):
+ reflection_coeffs_d.append(solve_toeplitz(y_d[:(i-1)], b=y_d[1:i])[-1])
+ reflection_coeffs_z.append(solve_toeplitz(y_z[:(i-1)], b=y_z[1:i])[-1])
+
+ y_d_concat = np.concatenate((y_d[-2:0:-1], y_d[:-1]))
+ y_z_concat = np.concatenate((y_z[-2:0:-1].conj(), y_z[:-1]))
+ _, ref_d = levinson(y_d_concat, b=y_d[1:])
+ _, ref_z = levinson(y_z_concat, b=y_z[1:])
+
+ assert_allclose(reflection_coeffs_d, ref_d[:-1])
+ assert_allclose(reflection_coeffs_z, ref_z[:-1])
+
+
+@pytest.mark.xfail(reason='Instability of Levinson iteration')
+def test_unstable():
+ # this is a "Gaussian Toeplitz matrix", as mentioned in Example 2 of
+ # I. Gohbert, T. Kailath and V. Olshevsky "Fast Gaussian Elimination with
+ # Partial Pivoting for Matrices with Displacement Structure"
+ # Mathematics of Computation, 64, 212 (1995), pp 1557-1576
+ # which can be unstable for levinson recursion.
+
+ # other fast toeplitz solvers such as GKO or Burg should be better.
+ random = np.random.RandomState(1234)
+ n = 100
+ c = 0.9 ** (np.arange(n)**2)
+ y = random.randn(n)
+
+ solution1 = solve_toeplitz(c, b=y)
+ solution2 = solve(toeplitz(c), y)
+
+ assert_allclose(solution1, solution2)
+
diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_solvers.py b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_solvers.py
new file mode 100644
index 0000000000000000000000000000000000000000..1be2f0d35bd13c2c760c9011b6c482b61a92ccbc
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_solvers.py
@@ -0,0 +1,777 @@
+import os
+import numpy as np
+
+from numpy.testing import assert_array_almost_equal, assert_allclose
+import pytest
+from pytest import raises as assert_raises
+
+from scipy.linalg import solve_sylvester
+from scipy.linalg import solve_continuous_lyapunov, solve_discrete_lyapunov
+from scipy.linalg import solve_continuous_are, solve_discrete_are
+from scipy.linalg import block_diag, solve, LinAlgError
+from scipy.sparse._sputils import matrix
+
+
+def _load_data(name):
+ """
+ Load npz data file under data/
+ Returns a copy of the data, rather than keeping the npz file open.
+ """
+ filename = os.path.join(os.path.abspath(os.path.dirname(__file__)),
+ 'data', name)
+ with np.load(filename) as f:
+ return dict(f.items())
+
+
+class TestSolveLyapunov:
+
+ cases = [
+ (np.array([[1, 2], [3, 4]]),
+ np.array([[9, 10], [11, 12]])),
+ # a, q all complex.
+ (np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
+ np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
+ # a real; q complex.
+ (np.array([[1.0, 2.0], [3.0, 5.0]]),
+ np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
+ # a complex; q real.
+ (np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
+ np.array([[2.0, 2.0], [-1.0, 2.0]])),
+ # An example from Kitagawa, 1977
+ (np.array([[3, 9, 5, 1, 4], [1, 2, 3, 8, 4], [4, 6, 6, 6, 3],
+ [1, 5, 2, 0, 7], [5, 3, 3, 1, 5]]),
+ np.array([[2, 4, 1, 0, 1], [4, 1, 0, 2, 0], [1, 0, 3, 0, 3],
+ [0, 2, 0, 1, 0], [1, 0, 3, 0, 4]])),
+ # Companion matrix example. a complex; q real; a.shape[0] = 11
+ (np.array([[0.100+0.j, 0.091+0.j, 0.082+0.j, 0.073+0.j, 0.064+0.j,
+ 0.055+0.j, 0.046+0.j, 0.037+0.j, 0.028+0.j, 0.019+0.j,
+ 0.010+0.j],
+ [1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+ 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+ 0.000+0.j],
+ [0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+ 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+ 0.000+0.j],
+ [0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j,
+ 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+ 0.000+0.j],
+ [0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j,
+ 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+ 0.000+0.j],
+ [0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j,
+ 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+ 0.000+0.j],
+ [0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+ 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+ 0.000+0.j],
+ [0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+ 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+ 0.000+0.j],
+ [0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+ 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j,
+ 0.000+0.j],
+ [0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+ 0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j,
+ 0.000+0.j],
+ [0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j,
+ 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j,
+ 0.000+0.j]]),
+ np.eye(11)),
+ # https://github.com/scipy/scipy/issues/4176
+ (matrix([[0, 1], [-1/2, -1]]),
+ (matrix([0, 3]).T @ matrix([0, 3]).T.T)),
+ # https://github.com/scipy/scipy/issues/4176
+ (matrix([[0, 1], [-1/2, -1]]),
+ (np.array(matrix([0, 3]).T @ matrix([0, 3]).T.T))),
+ ]
+
+ def test_continuous_squareness_and_shape(self):
+ nsq = np.ones((3, 2))
+ sq = np.eye(3)
+ assert_raises(ValueError, solve_continuous_lyapunov, nsq, sq)
+ assert_raises(ValueError, solve_continuous_lyapunov, sq, nsq)
+ assert_raises(ValueError, solve_continuous_lyapunov, sq, np.eye(2))
+
+ def check_continuous_case(self, a, q):
+ x = solve_continuous_lyapunov(a, q)
+ assert_array_almost_equal(
+ np.dot(a, x) + np.dot(x, a.conj().transpose()), q)
+
+ def check_discrete_case(self, a, q, method=None):
+ x = solve_discrete_lyapunov(a, q, method=method)
+ assert_array_almost_equal(
+ np.dot(np.dot(a, x), a.conj().transpose()) - x, -1.0*q)
+
+ def test_cases(self):
+ for case in self.cases:
+ self.check_continuous_case(case[0], case[1])
+ self.check_discrete_case(case[0], case[1])
+ self.check_discrete_case(case[0], case[1], method='direct')
+ self.check_discrete_case(case[0], case[1], method='bilinear')
+
+
+class TestSolveContinuousAre:
+ mat6 = _load_data('carex_6_data.npz')
+ mat15 = _load_data('carex_15_data.npz')
+ mat18 = _load_data('carex_18_data.npz')
+ mat19 = _load_data('carex_19_data.npz')
+ mat20 = _load_data('carex_20_data.npz')
+ cases = [
+ # Carex examples taken from (with default parameters):
+ # [1] P.BENNER, A.J. LAUB, V. MEHRMANN: 'A Collection of Benchmark
+ # Examples for the Numerical Solution of Algebraic Riccati
+ # Equations II: Continuous-Time Case', Tech. Report SPC 95_23,
+ # Fak. f. Mathematik, TU Chemnitz-Zwickau (Germany), 1995.
+ #
+ # The format of the data is (a, b, q, r, knownfailure), where
+ # knownfailure is None if the test passes or a string
+ # indicating the reason for failure.
+ #
+ # Test Case 0: carex #1
+ (np.diag([1.], 1),
+ np.array([[0], [1]]),
+ block_diag(1., 2.),
+ 1,
+ None),
+ # Test Case 1: carex #2
+ (np.array([[4, 3], [-4.5, -3.5]]),
+ np.array([[1], [-1]]),
+ np.array([[9, 6], [6, 4.]]),
+ 1,
+ None),
+ # Test Case 2: carex #3
+ (np.array([[0, 1, 0, 0],
+ [0, -1.89, 0.39, -5.53],
+ [0, -0.034, -2.98, 2.43],
+ [0.034, -0.0011, -0.99, -0.21]]),
+ np.array([[0, 0], [0.36, -1.6], [-0.95, -0.032], [0.03, 0]]),
+ np.array([[2.313, 2.727, 0.688, 0.023],
+ [2.727, 4.271, 1.148, 0.323],
+ [0.688, 1.148, 0.313, 0.102],
+ [0.023, 0.323, 0.102, 0.083]]),
+ np.eye(2),
+ None),
+ # Test Case 3: carex #4
+ (np.array([[-0.991, 0.529, 0, 0, 0, 0, 0, 0],
+ [0.522, -1.051, 0.596, 0, 0, 0, 0, 0],
+ [0, 0.522, -1.118, 0.596, 0, 0, 0, 0],
+ [0, 0, 0.522, -1.548, 0.718, 0, 0, 0],
+ [0, 0, 0, 0.922, -1.64, 0.799, 0, 0],
+ [0, 0, 0, 0, 0.922, -1.721, 0.901, 0],
+ [0, 0, 0, 0, 0, 0.922, -1.823, 1.021],
+ [0, 0, 0, 0, 0, 0, 0.922, -1.943]]),
+ np.array([[3.84, 4.00, 37.60, 3.08, 2.36, 2.88, 3.08, 3.00],
+ [-2.88, -3.04, -2.80, -2.32, -3.32, -3.82, -4.12, -3.96]]
+ ).T * 0.001,
+ np.array([[1.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.1],
+ [0.0, 1.0, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0],
+ [0.0, 0.0, 1.0, 0.0, 0.0, 0.5, 0.0, 0.0],
+ [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0],
+ [0.5, 0.1, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0],
+ [0.0, 0.0, 0.5, 0.0, 0.0, 0.1, 0.0, 0.0],
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.0],
+ [0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1]]),
+ np.eye(2),
+ None),
+ # Test Case 4: carex #5
+ (np.array(
+ [[-4.019, 5.120, 0., 0., -2.082, 0., 0., 0., 0.870],
+ [-0.346, 0.986, 0., 0., -2.340, 0., 0., 0., 0.970],
+ [-7.909, 15.407, -4.069, 0., -6.450, 0., 0., 0., 2.680],
+ [-21.816, 35.606, -0.339, -3.870, -17.800, 0., 0., 0., 7.390],
+ [-60.196, 98.188, -7.907, 0.340, -53.008, 0., 0., 0., 20.400],
+ [0, 0, 0, 0, 94.000, -147.200, 0., 53.200, 0.],
+ [0, 0, 0, 0, 0, 94.000, -147.200, 0, 0],
+ [0, 0, 0, 0, 0, 12.800, 0.000, -31.600, 0],
+ [0, 0, 0, 0, 12.800, 0.000, 0.000, 18.800, -31.600]]),
+ np.array([[0.010, -0.011, -0.151],
+ [0.003, -0.021, 0.000],
+ [0.009, -0.059, 0.000],
+ [0.024, -0.162, 0.000],
+ [0.068, -0.445, 0.000],
+ [0.000, 0.000, 0.000],
+ [0.000, 0.000, 0.000],
+ [0.000, 0.000, 0.000],
+ [0.000, 0.000, 0.000]]),
+ np.eye(9),
+ np.eye(3),
+ None),
+ # Test Case 5: carex #6
+ (mat6['A'], mat6['B'], mat6['Q'], mat6['R'], None),
+ # Test Case 6: carex #7
+ (np.array([[1, 0], [0, -2.]]),
+ np.array([[1e-6], [0]]),
+ np.ones((2, 2)),
+ 1.,
+ 'Bad residual accuracy'),
+ # Test Case 7: carex #8
+ (block_diag(-0.1, -0.02),
+ np.array([[0.100, 0.000], [0.001, 0.010]]),
+ np.array([[100, 1000], [1000, 10000]]),
+ np.ones((2, 2)) + block_diag(1e-6, 0),
+ None),
+ # Test Case 8: carex #9
+ (np.array([[0, 1e6], [0, 0]]),
+ np.array([[0], [1.]]),
+ np.eye(2),
+ 1.,
+ None),
+ # Test Case 9: carex #10
+ (np.array([[1.0000001, 1], [1., 1.0000001]]),
+ np.eye(2),
+ np.eye(2),
+ np.eye(2),
+ None),
+ # Test Case 10: carex #11
+ (np.array([[3, 1.], [4, 2]]),
+ np.array([[1], [1]]),
+ np.array([[-11, -5], [-5, -2.]]),
+ 1.,
+ None),
+ # Test Case 11: carex #12
+ (np.array([[7000000., 2000000., -0.],
+ [2000000., 6000000., -2000000.],
+ [0., -2000000., 5000000.]]) / 3,
+ np.eye(3),
+ np.array([[1., -2., -2.], [-2., 1., -2.], [-2., -2., 1.]]).dot(
+ np.diag([1e-6, 1, 1e6])).dot(
+ np.array([[1., -2., -2.], [-2., 1., -2.], [-2., -2., 1.]])) / 9,
+ np.eye(3) * 1e6,
+ 'Bad Residual Accuracy'),
+ # Test Case 12: carex #13
+ (np.array([[0, 0.4, 0, 0],
+ [0, 0, 0.345, 0],
+ [0, -0.524e6, -0.465e6, 0.262e6],
+ [0, 0, 0, -1e6]]),
+ np.array([[0, 0, 0, 1e6]]).T,
+ np.diag([1, 0, 1, 0]),
+ 1.,
+ None),
+ # Test Case 13: carex #14
+ (np.array([[-1e-6, 1, 0, 0],
+ [-1, -1e-6, 0, 0],
+ [0, 0, 1e-6, 1],
+ [0, 0, -1, 1e-6]]),
+ np.ones((4, 1)),
+ np.ones((4, 4)),
+ 1.,
+ None),
+ # Test Case 14: carex #15
+ (mat15['A'], mat15['B'], mat15['Q'], mat15['R'], None),
+ # Test Case 15: carex #16
+ (np.eye(64, 64, k=-1) + np.eye(64, 64)*(-2.) + np.rot90(
+ block_diag(1, np.zeros((62, 62)), 1)) + np.eye(64, 64, k=1),
+ np.eye(64),
+ np.eye(64),
+ np.eye(64),
+ None),
+ # Test Case 16: carex #17
+ (np.diag(np.ones((20, )), 1),
+ np.flipud(np.eye(21, 1)),
+ np.eye(21, 1) * np.eye(21, 1).T,
+ 1,
+ 'Bad Residual Accuracy'),
+ # Test Case 17: carex #18
+ (mat18['A'], mat18['B'], mat18['Q'], mat18['R'], None),
+ # Test Case 18: carex #19
+ (mat19['A'], mat19['B'], mat19['Q'], mat19['R'],
+ 'Bad Residual Accuracy'),
+ # Test Case 19: carex #20
+ (mat20['A'], mat20['B'], mat20['Q'], mat20['R'],
+ 'Bad Residual Accuracy')
+ ]
+ # Makes the minimum precision requirements customized to the test.
+ # Here numbers represent the number of decimals that agrees with zero
+ # matrix when the solution x is plugged in to the equation.
+ #
+ # res = array([[8e-3,1e-16],[1e-16,1e-20]]) --> min_decimal[k] = 2
+ #
+ # If the test is failing use "None" for that entry.
+ #
+ min_decimal = (14, 12, 13, 14, 11, 6, None, 5, 7, 14, 14,
+ None, 9, 14, 13, 14, None, 12, None, None)
+
+ @pytest.mark.parametrize("j, case", enumerate(cases))
+ def test_solve_continuous_are(self, j, case):
+ """Checks if 0 = XA + A'X - XB(R)^{-1} B'X + Q is true"""
+ a, b, q, r, knownfailure = case
+ if knownfailure:
+ pytest.xfail(reason=knownfailure)
+
+ dec = self.min_decimal[j]
+ x = solve_continuous_are(a, b, q, r)
+ res = x @ a + a.conj().T @ x + q
+ out_fact = x @ b
+ res -= out_fact @ solve(np.atleast_2d(r), out_fact.conj().T)
+ assert_array_almost_equal(res, np.zeros_like(res), decimal=dec)
+
+
+class TestSolveDiscreteAre:
+ cases = [
+ # Darex examples taken from (with default parameters):
+ # [1] P.BENNER, A.J. LAUB, V. MEHRMANN: 'A Collection of Benchmark
+ # Examples for the Numerical Solution of Algebraic Riccati
+ # Equations II: Discrete-Time Case', Tech. Report SPC 95_23,
+ # Fak. f. Mathematik, TU Chemnitz-Zwickau (Germany), 1995.
+ # [2] T. GUDMUNDSSON, C. KENNEY, A.J. LAUB: 'Scaling of the
+ # Discrete-Time Algebraic Riccati Equation to Enhance Stability
+ # of the Schur Solution Method', IEEE Trans.Aut.Cont., vol.37(4)
+ #
+ # The format of the data is (a, b, q, r, knownfailure), where
+ # knownfailure is None if the test passes or a string
+ # indicating the reason for failure.
+ #
+ # TEST CASE 0 : Complex a; real b, q, r
+ (np.array([[2, 1-2j], [0, -3j]]),
+ np.array([[0], [1]]),
+ np.array([[1, 0], [0, 2]]),
+ np.array([[1]]),
+ None),
+ # TEST CASE 1 :Real a, q, r; complex b
+ (np.array([[2, 1], [0, -1]]),
+ np.array([[-2j], [1j]]),
+ np.array([[1, 0], [0, 2]]),
+ np.array([[1]]),
+ None),
+ # TEST CASE 2 : Real a, b; complex q, r
+ (np.array([[3, 1], [0, -1]]),
+ np.array([[1, 2], [1, 3]]),
+ np.array([[1, 1+1j], [1-1j, 2]]),
+ np.array([[2, -2j], [2j, 3]]),
+ None),
+ # TEST CASE 3 : User-reported gh-2251 (Trac #1732)
+ (np.array([[0.63399379, 0.54906824, 0.76253406],
+ [0.5404729, 0.53745766, 0.08731853],
+ [0.27524045, 0.84922129, 0.4681622]]),
+ np.array([[0.96861695], [0.05532739], [0.78934047]]),
+ np.eye(3),
+ np.eye(1),
+ None),
+ # TEST CASE 4 : darex #1
+ (np.array([[4, 3], [-4.5, -3.5]]),
+ np.array([[1], [-1]]),
+ np.array([[9, 6], [6, 4]]),
+ np.array([[1]]),
+ None),
+ # TEST CASE 5 : darex #2
+ (np.array([[0.9512, 0], [0, 0.9048]]),
+ np.array([[4.877, 4.877], [-1.1895, 3.569]]),
+ np.array([[0.005, 0], [0, 0.02]]),
+ np.array([[1/3, 0], [0, 3]]),
+ None),
+ # TEST CASE 6 : darex #3
+ (np.array([[2, -1], [1, 0]]),
+ np.array([[1], [0]]),
+ np.array([[0, 0], [0, 1]]),
+ np.array([[0]]),
+ None),
+ # TEST CASE 7 : darex #4 (skipped the gen. Ric. term S)
+ (np.array([[0, 1], [0, -1]]),
+ np.array([[1, 0], [2, 1]]),
+ np.array([[-4, -4], [-4, 7]]) * (1/11),
+ np.array([[9, 3], [3, 1]]),
+ None),
+ # TEST CASE 8 : darex #5
+ (np.array([[0, 1], [0, 0]]),
+ np.array([[0], [1]]),
+ np.array([[1, 2], [2, 4]]),
+ np.array([[1]]),
+ None),
+ # TEST CASE 9 : darex #6
+ (np.array([[0.998, 0.067, 0, 0],
+ [-.067, 0.998, 0, 0],
+ [0, 0, 0.998, 0.153],
+ [0, 0, -.153, 0.998]]),
+ np.array([[0.0033, 0.0200],
+ [0.1000, -.0007],
+ [0.0400, 0.0073],
+ [-.0028, 0.1000]]),
+ np.array([[1.87, 0, 0, -0.244],
+ [0, 0.744, 0.205, 0],
+ [0, 0.205, 0.589, 0],
+ [-0.244, 0, 0, 1.048]]),
+ np.eye(2),
+ None),
+ # TEST CASE 10 : darex #7
+ (np.array([[0.984750, -.079903, 0.0009054, -.0010765],
+ [0.041588, 0.998990, -.0358550, 0.0126840],
+ [-.546620, 0.044916, -.3299100, 0.1931800],
+ [2.662400, -.100450, -.9245500, -.2632500]]),
+ np.array([[0.0037112, 0.0007361],
+ [-.0870510, 9.3411e-6],
+ [-1.198440, -4.1378e-4],
+ [-3.192700, 9.2535e-4]]),
+ np.eye(4)*1e-2,
+ np.eye(2),
+ None),
+ # TEST CASE 11 : darex #8
+ (np.array([[-0.6000000, -2.2000000, -3.6000000, -5.4000180],
+ [1.0000000, 0.6000000, 0.8000000, 3.3999820],
+ [0.0000000, 1.0000000, 1.8000000, 3.7999820],
+ [0.0000000, 0.0000000, 0.0000000, -0.9999820]]),
+ np.array([[1.0, -1.0, -1.0, -1.0],
+ [0.0, 1.0, -1.0, -1.0],
+ [0.0, 0.0, 1.0, -1.0],
+ [0.0, 0.0, 0.0, 1.0]]),
+ np.array([[2, 1, 3, 6],
+ [1, 2, 2, 5],
+ [3, 2, 6, 11],
+ [6, 5, 11, 22]]),
+ np.eye(4),
+ None),
+ # TEST CASE 12 : darex #9
+ (np.array([[95.4070, 1.9643, 0.3597, 0.0673, 0.0190],
+ [40.8490, 41.3170, 16.0840, 4.4679, 1.1971],
+ [12.2170, 26.3260, 36.1490, 15.9300, 12.3830],
+ [4.1118, 12.8580, 27.2090, 21.4420, 40.9760],
+ [0.1305, 0.5808, 1.8750, 3.6162, 94.2800]]) * 0.01,
+ np.array([[0.0434, -0.0122],
+ [2.6606, -1.0453],
+ [3.7530, -5.5100],
+ [3.6076, -6.6000],
+ [0.4617, -0.9148]]) * 0.01,
+ np.eye(5),
+ np.eye(2),
+ None),
+ # TEST CASE 13 : darex #10
+ (np.kron(np.eye(2), np.diag([1, 1], k=1)),
+ np.kron(np.eye(2), np.array([[0], [0], [1]])),
+ np.array([[1, 1, 0, 0, 0, 0],
+ [1, 1, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 1, -1, 0],
+ [0, 0, 0, -1, 1, 0],
+ [0, 0, 0, 0, 0, 0]]),
+ np.array([[3, 0], [0, 1]]),
+ None),
+ # TEST CASE 14 : darex #11
+ (0.001 * np.array(
+ [[870.1, 135.0, 11.59, .5014, -37.22, .3484, 0, 4.242, 7.249],
+ [76.55, 897.4, 12.72, 0.5504, -40.16, .3743, 0, 4.53, 7.499],
+ [-127.2, 357.5, 817, 1.455, -102.8, .987, 0, 11.85, 18.72],
+ [-363.5, 633.9, 74.91, 796.6, -273.5, 2.653, 0, 31.72, 48.82],
+ [-960, 1645.9, -128.9, -5.597, 71.42, 7.108, 0, 84.52, 125.9],
+ [-664.4, 112.96, -88.89, -3.854, 84.47, 13.6, 0, 144.3, 101.6],
+ [-410.2, 693, -54.71, -2.371, 66.49, 12.49, .1063, 99.97, 69.67],
+ [-179.9, 301.7, -23.93, -1.035, 60.59, 22.16, 0, 213.9, 35.54],
+ [-345.1, 580.4, -45.96, -1.989, 105.6, 19.86, 0, 219.1, 215.2]]),
+ np.array([[4.7600, -0.5701, -83.6800],
+ [0.8790, -4.7730, -2.7300],
+ [1.4820, -13.1200, 8.8760],
+ [3.8920, -35.1300, 24.8000],
+ [10.3400, -92.7500, 66.8000],
+ [7.2030, -61.5900, 38.3400],
+ [4.4540, -36.8300, 20.2900],
+ [1.9710, -15.5400, 6.9370],
+ [3.7730, -30.2800, 14.6900]]) * 0.001,
+ np.diag([50, 0, 0, 0, 50, 0, 0, 0, 0]),
+ np.eye(3),
+ None),
+ # TEST CASE 15 : darex #12 - numerically least accurate example
+ (np.array([[0, 1e6], [0, 0]]),
+ np.array([[0], [1]]),
+ np.eye(2),
+ np.array([[1]]),
+ "Presumed issue with OpenBLAS, see gh-16926"),
+ # TEST CASE 16 : darex #13
+ (np.array([[16, 10, -2],
+ [10, 13, -8],
+ [-2, -8, 7]]) * (1/9),
+ np.eye(3),
+ 1e6 * np.eye(3),
+ 1e6 * np.eye(3),
+ "Issue with OpenBLAS, see gh-16926"),
+ # TEST CASE 17 : darex #14
+ (np.array([[1 - 1/1e8, 0, 0, 0],
+ [1, 0, 0, 0],
+ [0, 1, 0, 0],
+ [0, 0, 1, 0]]),
+ np.array([[1e-08], [0], [0], [0]]),
+ np.diag([0, 0, 0, 1]),
+ np.array([[0.25]]),
+ None),
+ # TEST CASE 18 : darex #15
+ (np.eye(100, k=1),
+ np.flipud(np.eye(100, 1)),
+ np.eye(100),
+ np.array([[1]]),
+ None)
+ ]
+
+ # Makes the minimum precision requirements customized to the test.
+ # Here numbers represent the number of decimals that agrees with zero
+ # matrix when the solution x is plugged in to the equation.
+ #
+ # res = array([[8e-3,1e-16],[1e-16,1e-20]]) --> min_decimal[k] = 2
+ #
+ # If the test is failing use "None" for that entry.
+ #
+ min_decimal = (12, 14, 13, 14, 13, 16, 18, 14, 14, 13,
+ 14, 13, 13, 14, 12, 2, 5, 6, 10)
+ max_tol = [1.5 * 10**-ind for ind in min_decimal]
+ # relaxed tolerance in gh-18012 after bump to OpenBLAS
+ max_tol[11] = 2.5e-13
+
+ @pytest.mark.parametrize("j, case", enumerate(cases))
+ def test_solve_discrete_are(self, j, case):
+ """Checks if X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q) is true"""
+ a, b, q, r, knownfailure = case
+ if knownfailure:
+ pytest.xfail(reason=knownfailure)
+
+ atol = self.max_tol[j]
+
+ x = solve_discrete_are(a, b, q, r)
+ bH = b.conj().T
+ xa, xb = x @ a, x @ b
+
+ res = a.conj().T @ xa - x + q
+ res -= a.conj().T @ xb @ (solve(r + bH @ xb, bH) @ xa)
+
+ # changed from
+ # assert_array_almost_equal(res, np.zeros_like(res), decimal=dec)
+ # in gh-18012 as it's easier to relax a tolerance and allclose is
+ # preferred
+ assert_allclose(res, np.zeros_like(res), atol=atol)
+
+ def test_infeasible(self):
+ # An infeasible example taken from https://arxiv.org/abs/1505.04861v1
+ A = np.triu(np.ones((3, 3)))
+ A[0, 1] = -1
+ B = np.array([[1, 1, 0], [0, 0, 1]]).T
+ Q = np.full_like(A, -2) + np.diag([8, -1, -1.9])
+ R = np.diag([-10, 0.1])
+ assert_raises(LinAlgError, solve_continuous_are, A, B, Q, R)
+
+
+def test_solve_generalized_continuous_are():
+ cases = [
+ # Two random examples differ by s term
+ # in the absence of any literature for demanding examples.
+ (np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01],
+ [4.617139e-02, 6.948286e-01, 3.444608e-02],
+ [9.713178e-02, 3.170995e-01, 4.387444e-01]]),
+ np.array([[3.815585e-01, 1.868726e-01],
+ [7.655168e-01, 4.897644e-01],
+ [7.951999e-01, 4.455862e-01]]),
+ np.eye(3),
+ np.eye(2),
+ np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01],
+ [7.093648e-01, 6.797027e-01, 1.189977e-01],
+ [7.546867e-01, 6.550980e-01, 4.983641e-01]]),
+ np.zeros((3, 2)),
+ None),
+ (np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01],
+ [4.617139e-02, 6.948286e-01, 3.444608e-02],
+ [9.713178e-02, 3.170995e-01, 4.387444e-01]]),
+ np.array([[3.815585e-01, 1.868726e-01],
+ [7.655168e-01, 4.897644e-01],
+ [7.951999e-01, 4.455862e-01]]),
+ np.eye(3),
+ np.eye(2),
+ np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01],
+ [7.093648e-01, 6.797027e-01, 1.189977e-01],
+ [7.546867e-01, 6.550980e-01, 4.983641e-01]]),
+ np.ones((3, 2)),
+ None)
+ ]
+
+ min_decimal = (10, 10)
+
+ def _test_factory(case, dec):
+ """Checks if X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q) is true"""
+ a, b, q, r, e, s, knownfailure = case
+ if knownfailure:
+ pytest.xfail(reason=knownfailure)
+
+ x = solve_continuous_are(a, b, q, r, e, s)
+ res = a.conj().T.dot(x.dot(e)) + e.conj().T.dot(x.dot(a)) + q
+ out_fact = e.conj().T.dot(x).dot(b) + s
+ res -= out_fact.dot(solve(np.atleast_2d(r), out_fact.conj().T))
+ assert_array_almost_equal(res, np.zeros_like(res), decimal=dec)
+
+ for ind, case in enumerate(cases):
+ _test_factory(case, min_decimal[ind])
+
+
+def test_solve_generalized_discrete_are():
+ mat20170120 = _load_data('gendare_20170120_data.npz')
+
+ cases = [
+ # Two random examples differ by s term
+ # in the absence of any literature for demanding examples.
+ (np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01],
+ [4.617139e-02, 6.948286e-01, 3.444608e-02],
+ [9.713178e-02, 3.170995e-01, 4.387444e-01]]),
+ np.array([[3.815585e-01, 1.868726e-01],
+ [7.655168e-01, 4.897644e-01],
+ [7.951999e-01, 4.455862e-01]]),
+ np.eye(3),
+ np.eye(2),
+ np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01],
+ [7.093648e-01, 6.797027e-01, 1.189977e-01],
+ [7.546867e-01, 6.550980e-01, 4.983641e-01]]),
+ np.zeros((3, 2)),
+ None),
+ (np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01],
+ [4.617139e-02, 6.948286e-01, 3.444608e-02],
+ [9.713178e-02, 3.170995e-01, 4.387444e-01]]),
+ np.array([[3.815585e-01, 1.868726e-01],
+ [7.655168e-01, 4.897644e-01],
+ [7.951999e-01, 4.455862e-01]]),
+ np.eye(3),
+ np.eye(2),
+ np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01],
+ [7.093648e-01, 6.797027e-01, 1.189977e-01],
+ [7.546867e-01, 6.550980e-01, 4.983641e-01]]),
+ np.ones((3, 2)),
+ "Presumed issue with OpenBLAS, see gh-16926"),
+ # user-reported (under PR-6616) 20-Jan-2017
+ # tests against the case where E is None but S is provided
+ (mat20170120['A'],
+ mat20170120['B'],
+ mat20170120['Q'],
+ mat20170120['R'],
+ None,
+ mat20170120['S'],
+ None),
+ ]
+
+ max_atol = (1.5e-11, 1.5e-11, 3.5e-16)
+
+ def _test_factory(case, atol):
+ """Checks if X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q) is true"""
+ a, b, q, r, e, s, knownfailure = case
+ if knownfailure:
+ pytest.xfail(reason=knownfailure)
+
+ x = solve_discrete_are(a, b, q, r, e, s)
+ if e is None:
+ e = np.eye(a.shape[0])
+ if s is None:
+ s = np.zeros_like(b)
+ res = a.conj().T.dot(x.dot(a)) - e.conj().T.dot(x.dot(e)) + q
+ res -= (a.conj().T.dot(x.dot(b)) + s).dot(
+ solve(r+b.conj().T.dot(x.dot(b)),
+ (b.conj().T.dot(x.dot(a)) + s.conj().T)
+ )
+ )
+ # changed from:
+ # assert_array_almost_equal(res, np.zeros_like(res), decimal=dec)
+ # in gh-17950 because of a Linux 32 bit fail.
+ assert_allclose(res, np.zeros_like(res), atol=atol)
+
+ for ind, case in enumerate(cases):
+ _test_factory(case, max_atol[ind])
+
+
+def test_are_validate_args():
+
+ def test_square_shape():
+ nsq = np.ones((3, 2))
+ sq = np.eye(3)
+ for x in (solve_continuous_are, solve_discrete_are):
+ assert_raises(ValueError, x, nsq, 1, 1, 1)
+ assert_raises(ValueError, x, sq, sq, nsq, 1)
+ assert_raises(ValueError, x, sq, sq, sq, nsq)
+ assert_raises(ValueError, x, sq, sq, sq, sq, nsq)
+
+ def test_compatible_sizes():
+ nsq = np.ones((3, 2))
+ sq = np.eye(4)
+ for x in (solve_continuous_are, solve_discrete_are):
+ assert_raises(ValueError, x, sq, nsq, 1, 1)
+ assert_raises(ValueError, x, sq, sq, sq, sq, sq, nsq)
+ assert_raises(ValueError, x, sq, sq, np.eye(3), sq)
+ assert_raises(ValueError, x, sq, sq, sq, np.eye(3))
+ assert_raises(ValueError, x, sq, sq, sq, sq, np.eye(3))
+
+ def test_symmetry():
+ nsym = np.arange(9).reshape(3, 3)
+ sym = np.eye(3)
+ for x in (solve_continuous_are, solve_discrete_are):
+ assert_raises(ValueError, x, sym, sym, nsym, sym)
+ assert_raises(ValueError, x, sym, sym, sym, nsym)
+
+ def test_singularity():
+ sing = np.full((3, 3), 1e12)
+ sing[2, 2] -= 1
+ sq = np.eye(3)
+ for x in (solve_continuous_are, solve_discrete_are):
+ assert_raises(ValueError, x, sq, sq, sq, sq, sing)
+
+ assert_raises(ValueError, solve_continuous_are, sq, sq, sq, sing)
+
+ def test_finiteness():
+ nm = np.full((2, 2), np.nan)
+ sq = np.eye(2)
+ for x in (solve_continuous_are, solve_discrete_are):
+ assert_raises(ValueError, x, nm, sq, sq, sq)
+ assert_raises(ValueError, x, sq, nm, sq, sq)
+ assert_raises(ValueError, x, sq, sq, nm, sq)
+ assert_raises(ValueError, x, sq, sq, sq, nm)
+ assert_raises(ValueError, x, sq, sq, sq, sq, nm)
+ assert_raises(ValueError, x, sq, sq, sq, sq, sq, nm)
+
+
+class TestSolveSylvester:
+
+ cases = [
+ # a, b, c all real.
+ (np.array([[1, 2], [0, 4]]),
+ np.array([[5, 6], [0, 8]]),
+ np.array([[9, 10], [11, 12]])),
+ # a, b, c all real, 4x4. a and b have non-trival 2x2 blocks in their
+ # quasi-triangular form.
+ (np.array([[1.0, 0, 0, 0],
+ [0, 1.0, 2.0, 0.0],
+ [0, 0, 3.0, -4],
+ [0, 0, 2, 5]]),
+ np.array([[2.0, 0, 0, 1.0],
+ [0, 1.0, 0.0, 0.0],
+ [0, 0, 1.0, -1],
+ [0, 0, 1, 1]]),
+ np.array([[1.0, 0, 0, 0],
+ [0, 1.0, 0, 0],
+ [0, 0, 1.0, 0],
+ [0, 0, 0, 1.0]])),
+ # a, b, c all complex.
+ (np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
+ np.array([[-1.0, 2j], [3.0, 4.0]]),
+ np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
+ # a and b real; c complex.
+ (np.array([[1.0, 2.0], [3.0, 5.0]]),
+ np.array([[-1.0, 0], [3.0, 4.0]]),
+ np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
+ # a and c complex; b real.
+ (np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
+ np.array([[-1.0, 0], [3.0, 4.0]]),
+ np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])),
+ # a complex; b and c real.
+ (np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]),
+ np.array([[-1.0, 0], [3.0, 4.0]]),
+ np.array([[2.0, 2.0], [-1.0, 2.0]])),
+ # not square matrices, real
+ (np.array([[8, 1, 6], [3, 5, 7], [4, 9, 2]]),
+ np.array([[2, 3], [4, 5]]),
+ np.array([[1, 2], [3, 4], [5, 6]])),
+ # not square matrices, complex
+ (np.array([[8, 1j, 6+2j], [3, 5, 7], [4, 9, 2]]),
+ np.array([[2, 3], [4, 5-1j]]),
+ np.array([[1, 2j], [3, 4j], [5j, 6+7j]])),
+ ]
+
+ def check_case(self, a, b, c):
+ x = solve_sylvester(a, b, c)
+ assert_array_almost_equal(np.dot(a, x) + np.dot(x, b), c)
+
+ def test_cases(self):
+ for case in self.cases:
+ self.check_case(case[0], case[1], case[2])
+
+ def test_trivial(self):
+ a = np.array([[1.0, 0.0], [0.0, 1.0]])
+ b = np.array([[1.0]])
+ c = np.array([2.0, 2.0]).reshape(-1, 1)
+ x = solve_sylvester(a, b, c)
+ assert_array_almost_equal(x, np.array([1.0, 1.0]).reshape(-1, 1))
diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_special_matrices.py b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_special_matrices.py
new file mode 100644
index 0000000000000000000000000000000000000000..3edc8c176717fffe55268a12b2bdd6e609feb556
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/linalg/tests/test_special_matrices.py
@@ -0,0 +1,597 @@
+import pytest
+import numpy as np
+from numpy import arange, array, eye, copy, sqrt
+from numpy.testing import (assert_equal, assert_array_equal,
+ assert_array_almost_equal, assert_allclose)
+from pytest import raises as assert_raises
+
+from scipy.fft import fft
+from scipy.special import comb
+from scipy.linalg import (toeplitz, hankel, circulant, hadamard, leslie, dft,
+ companion, kron, block_diag,
+ helmert, hilbert, invhilbert, pascal, invpascal,
+ fiedler, fiedler_companion, eigvals,
+ convolution_matrix)
+from numpy.linalg import cond
+
+
+class TestToeplitz:
+
+ def test_basic(self):
+ y = toeplitz([1, 2, 3])
+ assert_array_equal(y, [[1, 2, 3], [2, 1, 2], [3, 2, 1]])
+ y = toeplitz([1, 2, 3], [1, 4, 5])
+ assert_array_equal(y, [[1, 4, 5], [2, 1, 4], [3, 2, 1]])
+
+ def test_complex_01(self):
+ data = (1.0 + arange(3.0)) * (1.0 + 1.0j)
+ x = copy(data)
+ t = toeplitz(x)
+ # Calling toeplitz should not change x.
+ assert_array_equal(x, data)
+ # According to the docstring, x should be the first column of t.
+ col0 = t[:, 0]
+ assert_array_equal(col0, data)
+ assert_array_equal(t[0, 1:], data[1:].conj())
+
+ def test_scalar_00(self):
+ """Scalar arguments still produce a 2D array."""
+ t = toeplitz(10)
+ assert_array_equal(t, [[10]])
+ t = toeplitz(10, 20)
+ assert_array_equal(t, [[10]])
+
+ def test_scalar_01(self):
+ c = array([1, 2, 3])
+ t = toeplitz(c, 1)
+ assert_array_equal(t, [[1], [2], [3]])
+
+ def test_scalar_02(self):
+ c = array([1, 2, 3])
+ t = toeplitz(c, array(1))
+ assert_array_equal(t, [[1], [2], [3]])
+
+ def test_scalar_03(self):
+ c = array([1, 2, 3])
+ t = toeplitz(c, array([1]))
+ assert_array_equal(t, [[1], [2], [3]])
+
+ def test_scalar_04(self):
+ r = array([10, 2, 3])
+ t = toeplitz(1, r)
+ assert_array_equal(t, [[1, 2, 3]])
+
+
+class TestHankel:
+ def test_basic(self):
+ y = hankel([1, 2, 3])
+ assert_array_equal(y, [[1, 2, 3], [2, 3, 0], [3, 0, 0]])
+ y = hankel([1, 2, 3], [3, 4, 5])
+ assert_array_equal(y, [[1, 2, 3], [2, 3, 4], [3, 4, 5]])
+
+
+class TestCirculant:
+ def test_basic(self):
+ y = circulant([1, 2, 3])
+ assert_array_equal(y, [[1, 3, 2], [2, 1, 3], [3, 2, 1]])
+
+
+class TestHadamard:
+
+ def test_basic(self):
+
+ y = hadamard(1)
+ assert_array_equal(y, [[1]])
+
+ y = hadamard(2, dtype=float)
+ assert_array_equal(y, [[1.0, 1.0], [1.0, -1.0]])
+
+ y = hadamard(4)
+ assert_array_equal(y, [[1, 1, 1, 1],
+ [1, -1, 1, -1],
+ [1, 1, -1, -1],
+ [1, -1, -1, 1]])
+
+ assert_raises(ValueError, hadamard, 0)
+ assert_raises(ValueError, hadamard, 5)
+
+
+class TestLeslie:
+
+ def test_bad_shapes(self):
+ assert_raises(ValueError, leslie, [[1, 1], [2, 2]], [3, 4, 5])
+ assert_raises(ValueError, leslie, [3, 4, 5], [[1, 1], [2, 2]])
+ assert_raises(ValueError, leslie, [1, 2], [1, 2])
+ assert_raises(ValueError, leslie, [1], [])
+
+ def test_basic(self):
+ a = leslie([1, 2, 3], [0.25, 0.5])
+ expected = array([[1.0, 2.0, 3.0],
+ [0.25, 0.0, 0.0],
+ [0.0, 0.5, 0.0]])
+ assert_array_equal(a, expected)
+
+
+class TestCompanion:
+
+ def test_bad_shapes(self):
+ assert_raises(ValueError, companion, [[1, 1], [2, 2]])
+ assert_raises(ValueError, companion, [0, 4, 5])
+ assert_raises(ValueError, companion, [1])
+ assert_raises(ValueError, companion, [])
+
+ def test_basic(self):
+ c = companion([1, 2, 3])
+ expected = array([
+ [-2.0, -3.0],
+ [1.0, 0.0]])
+ assert_array_equal(c, expected)
+
+ c = companion([2.0, 5.0, -10.0])
+ expected = array([
+ [-2.5, 5.0],
+ [1.0, 0.0]])
+ assert_array_equal(c, expected)
+
+
+class TestBlockDiag:
+ def test_basic(self):
+ x = block_diag(eye(2), [[1, 2], [3, 4], [5, 6]], [[1, 2, 3]])
+ assert_array_equal(x, [[1, 0, 0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0, 0, 0],
+ [0, 0, 1, 2, 0, 0, 0],
+ [0, 0, 3, 4, 0, 0, 0],
+ [0, 0, 5, 6, 0, 0, 0],
+ [0, 0, 0, 0, 1, 2, 3]])
+
+ def test_dtype(self):
+ x = block_diag([[1.5]])
+ assert_equal(x.dtype, float)
+
+ x = block_diag([[True]])
+ assert_equal(x.dtype, bool)
+
+ def test_mixed_dtypes(self):
+ actual = block_diag([[1]], [[1j]])
+ desired = np.array([[1, 0], [0, 1j]])
+ assert_array_equal(actual, desired)
+
+ def test_scalar_and_1d_args(self):
+ a = block_diag(1)
+ assert_equal(a.shape, (1, 1))
+ assert_array_equal(a, [[1]])
+
+ a = block_diag([2, 3], 4)
+ assert_array_equal(a, [[2, 3, 0], [0, 0, 4]])
+
+ def test_bad_arg(self):
+ assert_raises(ValueError, block_diag, [[[1]]])
+
+ def test_no_args(self):
+ a = block_diag()
+ assert_equal(a.ndim, 2)
+ assert_equal(a.nbytes, 0)
+
+ def test_empty_matrix_arg(self):
+ # regression test for gh-4596: check the shape of the result
+ # for empty matrix inputs. Empty matrices are no longer ignored
+ # (gh-4908) it is viewed as a shape (1, 0) matrix.
+ a = block_diag([[1, 0], [0, 1]],
+ [],
+ [[2, 3], [4, 5], [6, 7]])
+ assert_array_equal(a, [[1, 0, 0, 0],
+ [0, 1, 0, 0],
+ [0, 0, 0, 0],
+ [0, 0, 2, 3],
+ [0, 0, 4, 5],
+ [0, 0, 6, 7]])
+
+ def test_zerosized_matrix_arg(self):
+ # test for gh-4908: check the shape of the result for
+ # zero-sized matrix inputs, i.e. matrices with shape (0,n) or (n,0).
+ # note that [[]] takes shape (1,0)
+ a = block_diag([[1, 0], [0, 1]],
+ [[]],
+ [[2, 3], [4, 5], [6, 7]],
+ np.zeros([0, 2], dtype='int32'))
+ assert_array_equal(a, [[1, 0, 0, 0, 0, 0],
+ [0, 1, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0],
+ [0, 0, 2, 3, 0, 0],
+ [0, 0, 4, 5, 0, 0],
+ [0, 0, 6, 7, 0, 0]])
+
+
+class TestKron:
+
+ def test_basic(self):
+
+ a = kron(array([[1, 2], [3, 4]]), array([[1, 1, 1]]))
+ assert_array_equal(a, array([[1, 1, 1, 2, 2, 2],
+ [3, 3, 3, 4, 4, 4]]))
+
+ m1 = array([[1, 2], [3, 4]])
+ m2 = array([[10], [11]])
+ a = kron(m1, m2)
+ expected = array([[10, 20],
+ [11, 22],
+ [30, 40],
+ [33, 44]])
+ assert_array_equal(a, expected)
+
+
+class TestHelmert:
+
+ def test_orthogonality(self):
+ for n in range(1, 7):
+ H = helmert(n, full=True)
+ Id = np.eye(n)
+ assert_allclose(H.dot(H.T), Id, atol=1e-12)
+ assert_allclose(H.T.dot(H), Id, atol=1e-12)
+
+ def test_subspace(self):
+ for n in range(2, 7):
+ H_full = helmert(n, full=True)
+ H_partial = helmert(n)
+ for U in H_full[1:, :].T, H_partial.T:
+ C = np.eye(n) - np.full((n, n), 1 / n)
+ assert_allclose(U.dot(U.T), C)
+ assert_allclose(U.T.dot(U), np.eye(n-1), atol=1e-12)
+
+
+class TestHilbert:
+
+ def test_basic(self):
+ h3 = array([[1.0, 1/2., 1/3.],
+ [1/2., 1/3., 1/4.],
+ [1/3., 1/4., 1/5.]])
+ assert_array_almost_equal(hilbert(3), h3)
+
+ assert_array_equal(hilbert(1), [[1.0]])
+
+ h0 = hilbert(0)
+ assert_equal(h0.shape, (0, 0))
+
+
+class TestInvHilbert:
+
+ def test_basic(self):
+ invh1 = array([[1]])
+ assert_array_equal(invhilbert(1, exact=True), invh1)
+ assert_array_equal(invhilbert(1), invh1)
+
+ invh2 = array([[4, -6],
+ [-6, 12]])
+ assert_array_equal(invhilbert(2, exact=True), invh2)
+ assert_array_almost_equal(invhilbert(2), invh2)
+
+ invh3 = array([[9, -36, 30],
+ [-36, 192, -180],
+ [30, -180, 180]])
+ assert_array_equal(invhilbert(3, exact=True), invh3)
+ assert_array_almost_equal(invhilbert(3), invh3)
+
+ invh4 = array([[16, -120, 240, -140],
+ [-120, 1200, -2700, 1680],
+ [240, -2700, 6480, -4200],
+ [-140, 1680, -4200, 2800]])
+ assert_array_equal(invhilbert(4, exact=True), invh4)
+ assert_array_almost_equal(invhilbert(4), invh4)
+
+ invh5 = array([[25, -300, 1050, -1400, 630],
+ [-300, 4800, -18900, 26880, -12600],
+ [1050, -18900, 79380, -117600, 56700],
+ [-1400, 26880, -117600, 179200, -88200],
+ [630, -12600, 56700, -88200, 44100]])
+ assert_array_equal(invhilbert(5, exact=True), invh5)
+ assert_array_almost_equal(invhilbert(5), invh5)
+
+ invh17 = array([
+ [289, -41616, 1976760, -46124400, 629598060, -5540462928,
+ 33374693352, -143034400080, 446982500250, -1033026222800,
+ 1774926873720, -2258997839280, 2099709530100, -1384423866000,
+ 613101997800, -163493866080, 19835652870],
+ [-41616, 7990272, -426980160, 10627061760, -151103534400,
+ 1367702848512, -8410422724704, 36616806420480, -115857864064800,
+ 270465047424000, -468580694662080, 600545887119360,
+ -561522320049600, 372133135180800, -165537539406000,
+ 44316454993920, -5395297580640],
+ [1976760, -426980160, 24337869120, -630981792000, 9228108708000,
+ -85267724461920, 532660105897920, -2348052711713280,
+ 7504429831470000, -17664748409880000, 30818191841236800,
+ -39732544853164800, 37341234283298400, -24857330514030000,
+ 11100752642520000, -2982128117299200, 364182586693200],
+ [-46124400, 10627061760, -630981792000, 16826181120000,
+ -251209625940000, 2358021022156800, -14914482965141760,
+ 66409571644416000, -214015221119700000, 507295338950400000,
+ -890303319857952000, 1153715376477081600, -1089119333262870000,
+ 727848632044800000, -326170262829600000, 87894302404608000,
+ -10763618673376800],
+ [629598060, -151103534400, 9228108708000,
+ -251209625940000, 3810012660090000, -36210360321495360,
+ 231343968720664800, -1038687206500944000, 3370739732635275000,
+ -8037460526495400000, 14178080368737885600, -18454939322943942000,
+ 17489975175339030000, -11728977435138600000, 5272370630081100000,
+ -1424711708039692800, 174908803442373000],
+ [-5540462928, 1367702848512, -85267724461920, 2358021022156800,
+ -36210360321495360, 347619459086355456, -2239409617216035264,
+ 10124803292907663360, -33052510749726468000,
+ 79217210949138662400, -140362995650505067440,
+ 183420385176741672960, -174433352415381259200,
+ 117339159519533952000, -52892422160973595200,
+ 14328529177999196160, -1763080738699119840],
+ [33374693352, -8410422724704, 532660105897920,
+ -14914482965141760, 231343968720664800, -2239409617216035264,
+ 14527452132196331328, -66072377044391477760,
+ 216799987176909536400, -521925895055522958000,
+ 928414062734059661760, -1217424500995626443520,
+ 1161358898976091015200, -783401860847777371200,
+ 354015418167362952000, -96120549902411274240,
+ 11851820521255194480],
+ [-143034400080, 36616806420480, -2348052711713280,
+ 66409571644416000, -1038687206500944000, 10124803292907663360,
+ -66072377044391477760, 302045152202932469760,
+ -995510145200094810000, 2405996923185123840000,
+ -4294704507885446054400, 5649058909023744614400,
+ -5403874060541811254400, 3654352703663101440000,
+ -1655137020003255360000, 450325202737117593600,
+ -55630994283442749600],
+ [446982500250, -115857864064800, 7504429831470000,
+ -214015221119700000, 3370739732635275000, -33052510749726468000,
+ 216799987176909536400, -995510145200094810000,
+ 3293967392206196062500, -7988661659013106500000,
+ 14303908928401362270000, -18866974090684772052000,
+ 18093328327706957325000, -12263364009096700500000,
+ 5565847995255512250000, -1517208935002984080000,
+ 187754605706619279900],
+ [-1033026222800, 270465047424000, -17664748409880000,
+ 507295338950400000, -8037460526495400000, 79217210949138662400,
+ -521925895055522958000, 2405996923185123840000,
+ -7988661659013106500000, 19434404971634224000000,
+ -34894474126569249192000, 46141453390504792320000,
+ -44349976506971935800000, 30121928988527376000000,
+ -13697025107665828500000, 3740200989399948902400,
+ -463591619028689580000],
+ [1774926873720, -468580694662080,
+ 30818191841236800, -890303319857952000, 14178080368737885600,
+ -140362995650505067440, 928414062734059661760,
+ -4294704507885446054400, 14303908928401362270000,
+ -34894474126569249192000, 62810053427824648545600,
+ -83243376594051600326400, 80177044485212743068000,
+ -54558343880470209780000, 24851882355348879230400,
+ -6797096028813368678400, 843736746632215035600],
+ [-2258997839280, 600545887119360, -39732544853164800,
+ 1153715376477081600, -18454939322943942000, 183420385176741672960,
+ -1217424500995626443520, 5649058909023744614400,
+ -18866974090684772052000, 46141453390504792320000,
+ -83243376594051600326400, 110552468520163390156800,
+ -106681852579497947388000, 72720410752415168870400,
+ -33177973900974346080000, 9087761081682520473600,
+ -1129631016152221783200],
+ [2099709530100, -561522320049600, 37341234283298400,
+ -1089119333262870000, 17489975175339030000,
+ -174433352415381259200, 1161358898976091015200,
+ -5403874060541811254400, 18093328327706957325000,
+ -44349976506971935800000, 80177044485212743068000,
+ -106681852579497947388000, 103125790826848015808400,
+ -70409051543137015800000, 32171029219823375700000,
+ -8824053728865840192000, 1098252376814660067000],
+ [-1384423866000, 372133135180800,
+ -24857330514030000, 727848632044800000, -11728977435138600000,
+ 117339159519533952000, -783401860847777371200,
+ 3654352703663101440000, -12263364009096700500000,
+ 30121928988527376000000, -54558343880470209780000,
+ 72720410752415168870400, -70409051543137015800000,
+ 48142941226076592000000, -22027500987368499000000,
+ 6049545098753157120000, -753830033789944188000],
+ [613101997800, -165537539406000,
+ 11100752642520000, -326170262829600000, 5272370630081100000,
+ -52892422160973595200, 354015418167362952000,
+ -1655137020003255360000, 5565847995255512250000,
+ -13697025107665828500000, 24851882355348879230400,
+ -33177973900974346080000, 32171029219823375700000,
+ -22027500987368499000000, 10091416708498869000000,
+ -2774765838662800128000, 346146444087219270000],
+ [-163493866080, 44316454993920, -2982128117299200,
+ 87894302404608000, -1424711708039692800,
+ 14328529177999196160, -96120549902411274240,
+ 450325202737117593600, -1517208935002984080000,
+ 3740200989399948902400, -6797096028813368678400,
+ 9087761081682520473600, -8824053728865840192000,
+ 6049545098753157120000, -2774765838662800128000,
+ 763806510427609497600, -95382575704033754400],
+ [19835652870, -5395297580640, 364182586693200, -10763618673376800,
+ 174908803442373000, -1763080738699119840, 11851820521255194480,
+ -55630994283442749600, 187754605706619279900,
+ -463591619028689580000, 843736746632215035600,
+ -1129631016152221783200, 1098252376814660067000,
+ -753830033789944188000, 346146444087219270000,
+ -95382575704033754400, 11922821963004219300]
+ ])
+ assert_array_equal(invhilbert(17, exact=True), invh17)
+ assert_allclose(invhilbert(17), invh17.astype(float), rtol=1e-12)
+
+ def test_inverse(self):
+ for n in range(1, 10):
+ a = hilbert(n)
+ b = invhilbert(n)
+ # The Hilbert matrix is increasingly badly conditioned,
+ # so take that into account in the test
+ c = cond(a)
+ assert_allclose(a.dot(b), eye(n), atol=1e-15*c, rtol=1e-15*c)
+
+
+class TestPascal:
+
+ cases = [
+ (1, array([[1]]), array([[1]])),
+ (2, array([[1, 1],
+ [1, 2]]),
+ array([[1, 0],
+ [1, 1]])),
+ (3, array([[1, 1, 1],
+ [1, 2, 3],
+ [1, 3, 6]]),
+ array([[1, 0, 0],
+ [1, 1, 0],
+ [1, 2, 1]])),
+ (4, array([[1, 1, 1, 1],
+ [1, 2, 3, 4],
+ [1, 3, 6, 10],
+ [1, 4, 10, 20]]),
+ array([[1, 0, 0, 0],
+ [1, 1, 0, 0],
+ [1, 2, 1, 0],
+ [1, 3, 3, 1]])),
+ ]
+
+ def check_case(self, n, sym, low):
+ assert_array_equal(pascal(n), sym)
+ assert_array_equal(pascal(n, kind='lower'), low)
+ assert_array_equal(pascal(n, kind='upper'), low.T)
+ assert_array_almost_equal(pascal(n, exact=False), sym)
+ assert_array_almost_equal(pascal(n, exact=False, kind='lower'), low)
+ assert_array_almost_equal(pascal(n, exact=False, kind='upper'), low.T)
+
+ def test_cases(self):
+ for n, sym, low in self.cases:
+ self.check_case(n, sym, low)
+
+ def test_big(self):
+ p = pascal(50)
+ assert p[-1, -1] == comb(98, 49, exact=True)
+
+ def test_threshold(self):
+ # Regression test. An early version of `pascal` returned an
+ # array of type np.uint64 for n=35, but that data type is too small
+ # to hold p[-1, -1]. The second assert_equal below would fail
+ # because p[-1, -1] overflowed.
+ p = pascal(34)
+ assert_equal(2*p.item(-1, -2), p.item(-1, -1), err_msg="n = 34")
+ p = pascal(35)
+ assert_equal(2.*p.item(-1, -2), 1.*p.item(-1, -1), err_msg="n = 35")
+
+
+def test_invpascal():
+
+ def check_invpascal(n, kind, exact):
+ ip = invpascal(n, kind=kind, exact=exact)
+ p = pascal(n, kind=kind, exact=exact)
+ # Matrix-multiply ip and p, and check that we get the identity matrix.
+ # We can't use the simple expression e = ip.dot(p), because when
+ # n < 35 and exact is True, p.dtype is np.uint64 and ip.dtype is
+ # np.int64. The product of those dtypes is np.float64, which loses
+ # precision when n is greater than 18. Instead we'll cast both to
+ # object arrays, and then multiply.
+ e = ip.astype(object).dot(p.astype(object))
+ assert_array_equal(e, eye(n), err_msg="n=%d kind=%r exact=%r" %
+ (n, kind, exact))
+
+ kinds = ['symmetric', 'lower', 'upper']
+
+ ns = [1, 2, 5, 18]
+ for n in ns:
+ for kind in kinds:
+ for exact in [True, False]:
+ check_invpascal(n, kind, exact)
+
+ ns = [19, 34, 35, 50]
+ for n in ns:
+ for kind in kinds:
+ check_invpascal(n, kind, True)
+
+
+def test_dft():
+ m = dft(2)
+ expected = array([[1.0, 1.0], [1.0, -1.0]])
+ assert_array_almost_equal(m, expected)
+ m = dft(2, scale='n')
+ assert_array_almost_equal(m, expected/2.0)
+ m = dft(2, scale='sqrtn')
+ assert_array_almost_equal(m, expected/sqrt(2.0))
+
+ x = array([0, 1, 2, 3, 4, 5, 0, 1])
+ m = dft(8)
+ mx = m.dot(x)
+ fx = fft(x)
+ assert_array_almost_equal(mx, fx)
+
+
+def test_fiedler():
+ f = fiedler([])
+ assert_equal(f.size, 0)
+ f = fiedler([123.])
+ assert_array_equal(f, np.array([[0.]]))
+ f = fiedler(np.arange(1, 7))
+ des = np.array([[0, 1, 2, 3, 4, 5],
+ [1, 0, 1, 2, 3, 4],
+ [2, 1, 0, 1, 2, 3],
+ [3, 2, 1, 0, 1, 2],
+ [4, 3, 2, 1, 0, 1],
+ [5, 4, 3, 2, 1, 0]])
+ assert_array_equal(f, des)
+
+
+def test_fiedler_companion():
+ fc = fiedler_companion([])
+ assert_equal(fc.size, 0)
+ fc = fiedler_companion([1.])
+ assert_equal(fc.size, 0)
+ fc = fiedler_companion([1., 2.])
+ assert_array_equal(fc, np.array([[-2.]]))
+ fc = fiedler_companion([1e-12, 2., 3.])
+ assert_array_almost_equal(fc, companion([1e-12, 2., 3.]))
+ with assert_raises(ValueError):
+ fiedler_companion([0, 1, 2])
+ fc = fiedler_companion([1., -16., 86., -176., 105.])
+ assert_array_almost_equal(eigvals(fc),
+ np.array([7., 5., 3., 1.]))
+
+
+class TestConvolutionMatrix:
+ """
+ Test convolution_matrix vs. numpy.convolve for various parameters.
+ """
+
+ def create_vector(self, n, cpx):
+ """Make a complex or real test vector of length n."""
+ x = np.linspace(-2.5, 2.2, n)
+ if cpx:
+ x = x + 1j*np.linspace(-1.5, 3.1, n)
+ return x
+
+ def test_bad_n(self):
+ # n must be a positive integer
+ with pytest.raises(ValueError, match='n must be a positive integer'):
+ convolution_matrix([1, 2, 3], 0)
+
+ def test_bad_first_arg(self):
+ # first arg must be a 1d array, otherwise ValueError
+ with pytest.raises(ValueError, match='one-dimensional'):
+ convolution_matrix(1, 4)
+
+ def test_empty_first_arg(self):
+ # first arg must have at least one value
+ with pytest.raises(ValueError, match=r'len\(a\)'):
+ convolution_matrix([], 4)
+
+ def test_bad_mode(self):
+ # mode must be in ('full', 'valid', 'same')
+ with pytest.raises(ValueError, match='mode.*must be one of'):
+ convolution_matrix((1, 1), 4, mode='invalid argument')
+
+ @pytest.mark.parametrize('cpx', [False, True])
+ @pytest.mark.parametrize('na', [1, 2, 9])
+ @pytest.mark.parametrize('nv', [1, 2, 9])
+ @pytest.mark.parametrize('mode', [None, 'full', 'valid', 'same'])
+ def test_against_numpy_convolve(self, cpx, na, nv, mode):
+ a = self.create_vector(na, cpx)
+ v = self.create_vector(nv, cpx)
+ if mode is None:
+ y1 = np.convolve(v, a)
+ A = convolution_matrix(a, nv)
+ else:
+ y1 = np.convolve(v, a, mode)
+ A = convolution_matrix(a, nv, mode)
+ y2 = A @ v
+ assert_array_almost_equal(y1, y2)
diff --git a/venv/lib/python3.10/site-packages/scipy/optimize/_basinhopping.py b/venv/lib/python3.10/site-packages/scipy/optimize/_basinhopping.py
new file mode 100644
index 0000000000000000000000000000000000000000..d874a708b9a22ba72be1e63a18a082298e84bbe8
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/scipy/optimize/_basinhopping.py
@@ -0,0 +1,753 @@
+"""
+basinhopping: The basinhopping global optimization algorithm
+"""
+import numpy as np
+import math
+import inspect
+import scipy.optimize
+from scipy._lib._util import check_random_state
+
+__all__ = ['basinhopping']
+
+
+_params = (inspect.Parameter('res_new', kind=inspect.Parameter.KEYWORD_ONLY),
+ inspect.Parameter('res_old', kind=inspect.Parameter.KEYWORD_ONLY))
+_new_accept_test_signature = inspect.Signature(parameters=_params)
+
+
+class Storage:
+ """
+ Class used to store the lowest energy structure
+ """
+ def __init__(self, minres):
+ self._add(minres)
+
+ def _add(self, minres):
+ self.minres = minres
+ self.minres.x = np.copy(minres.x)
+
+ def update(self, minres):
+ if minres.success and (minres.fun < self.minres.fun
+ or not self.minres.success):
+ self._add(minres)
+ return True
+ else:
+ return False
+
+ def get_lowest(self):
+ return self.minres
+
+
+class BasinHoppingRunner:
+ """This class implements the core of the basinhopping algorithm.
+
+ x0 : ndarray
+ The starting coordinates.
+ minimizer : callable
+ The local minimizer, with signature ``result = minimizer(x)``.
+ The return value is an `optimize.OptimizeResult` object.
+ step_taking : callable
+ This function displaces the coordinates randomly. Signature should
+ be ``x_new = step_taking(x)``. Note that `x` may be modified in-place.
+ accept_tests : list of callables
+ Each test is passed the kwargs `f_new`, `x_new`, `f_old` and
+ `x_old`. These tests will be used to judge whether or not to accept
+ the step. The acceptable return values are True, False, or ``"force
+ accept"``. If any of the tests return False then the step is rejected.
+ If ``"force accept"``, then this will override any other tests in
+ order to accept the step. This can be used, for example, to forcefully
+ escape from a local minimum that ``basinhopping`` is trapped in.
+ disp : bool, optional
+ Display status messages.
+
+ """
+ def __init__(self, x0, minimizer, step_taking, accept_tests, disp=False):
+ self.x = np.copy(x0)
+ self.minimizer = minimizer
+ self.step_taking = step_taking
+ self.accept_tests = accept_tests
+ self.disp = disp
+
+ self.nstep = 0
+
+ # initialize return object
+ self.res = scipy.optimize.OptimizeResult()
+ self.res.minimization_failures = 0
+
+ # do initial minimization
+ minres = minimizer(self.x)
+ if not minres.success:
+ self.res.minimization_failures += 1
+ if self.disp:
+ print("warning: basinhopping: local minimization failure")
+ self.x = np.copy(minres.x)
+ self.energy = minres.fun
+ self.incumbent_minres = minres # best minimize result found so far
+ if self.disp:
+ print("basinhopping step %d: f %g" % (self.nstep, self.energy))
+
+ # initialize storage class
+ self.storage = Storage(minres)
+
+ if hasattr(minres, "nfev"):
+ self.res.nfev = minres.nfev
+ if hasattr(minres, "njev"):
+ self.res.njev = minres.njev
+ if hasattr(minres, "nhev"):
+ self.res.nhev = minres.nhev
+
+ def _monte_carlo_step(self):
+ """Do one Monte Carlo iteration
+
+ Randomly displace the coordinates, minimize, and decide whether
+ or not to accept the new coordinates.
+ """
+ # Take a random step. Make a copy of x because the step_taking
+ # algorithm might change x in place
+ x_after_step = np.copy(self.x)
+ x_after_step = self.step_taking(x_after_step)
+
+ # do a local minimization
+ minres = self.minimizer(x_after_step)
+ x_after_quench = minres.x
+ energy_after_quench = minres.fun
+ if not minres.success:
+ self.res.minimization_failures += 1
+ if self.disp:
+ print("warning: basinhopping: local minimization failure")
+ if hasattr(minres, "nfev"):
+ self.res.nfev += minres.nfev
+ if hasattr(minres, "njev"):
+ self.res.njev += minres.njev
+ if hasattr(minres, "nhev"):
+ self.res.nhev += minres.nhev
+
+ # accept the move based on self.accept_tests. If any test is False,
+ # then reject the step. If any test returns the special string
+ # 'force accept', then accept the step regardless. This can be used
+ # to forcefully escape from a local minimum if normal basin hopping
+ # steps are not sufficient.
+ accept = True
+ for test in self.accept_tests:
+ if inspect.signature(test) == _new_accept_test_signature:
+ testres = test(res_new=minres, res_old=self.incumbent_minres)
+ else:
+ testres = test(f_new=energy_after_quench, x_new=x_after_quench,
+ f_old=self.energy, x_old=self.x)
+
+ if testres == 'force accept':
+ accept = True
+ break
+ elif testres is None:
+ raise ValueError("accept_tests must return True, False, or "
+ "'force accept'")
+ elif not testres:
+ accept = False
+
+ # Report the result of the acceptance test to the take step class.
+ # This is for adaptive step taking
+ if hasattr(self.step_taking, "report"):
+ self.step_taking.report(accept, f_new=energy_after_quench,
+ x_new=x_after_quench, f_old=self.energy,
+ x_old=self.x)
+
+ return accept, minres
+
+ def one_cycle(self):
+ """Do one cycle of the basinhopping algorithm
+ """
+ self.nstep += 1
+ new_global_min = False
+
+ accept, minres = self._monte_carlo_step()
+
+ if accept:
+ self.energy = minres.fun
+ self.x = np.copy(minres.x)
+ self.incumbent_minres = minres # best minimize result found so far
+ new_global_min = self.storage.update(minres)
+
+ # print some information
+ if self.disp:
+ self.print_report(minres.fun, accept)
+ if new_global_min:
+ print("found new global minimum on step %d with function"
+ " value %g" % (self.nstep, self.energy))
+
+ # save some variables as BasinHoppingRunner attributes
+ self.xtrial = minres.x
+ self.energy_trial = minres.fun
+ self.accept = accept
+
+ return new_global_min
+
+ def print_report(self, energy_trial, accept):
+ """print a status update"""
+ minres = self.storage.get_lowest()
+ print("basinhopping step %d: f %g trial_f %g accepted %d "
+ " lowest_f %g" % (self.nstep, self.energy, energy_trial,
+ accept, minres.fun))
+
+
+class AdaptiveStepsize:
+ """
+ Class to implement adaptive stepsize.
+
+ This class wraps the step taking class and modifies the stepsize to
+ ensure the true acceptance rate is as close as possible to the target.
+
+ Parameters
+ ----------
+ takestep : callable
+ The step taking routine. Must contain modifiable attribute
+ takestep.stepsize
+ accept_rate : float, optional
+ The target step acceptance rate
+ interval : int, optional
+ Interval for how often to update the stepsize
+ factor : float, optional
+ The step size is multiplied or divided by this factor upon each
+ update.
+ verbose : bool, optional
+ Print information about each update
+
+ """
+ def __init__(self, takestep, accept_rate=0.5, interval=50, factor=0.9,
+ verbose=True):
+ self.takestep = takestep
+ self.target_accept_rate = accept_rate
+ self.interval = interval
+ self.factor = factor
+ self.verbose = verbose
+
+ self.nstep = 0
+ self.nstep_tot = 0
+ self.naccept = 0
+
+ def __call__(self, x):
+ return self.take_step(x)
+
+ def _adjust_step_size(self):
+ old_stepsize = self.takestep.stepsize
+ accept_rate = float(self.naccept) / self.nstep
+ if accept_rate > self.target_accept_rate:
+ # We're accepting too many steps. This generally means we're
+ # trapped in a basin. Take bigger steps.
+ self.takestep.stepsize /= self.factor
+ else:
+ # We're not accepting enough steps. Take smaller steps.
+ self.takestep.stepsize *= self.factor
+ if self.verbose:
+ print("adaptive stepsize: acceptance rate {:f} target {:f} new "
+ "stepsize {:g} old stepsize {:g}".format(accept_rate,
+ self.target_accept_rate, self.takestep.stepsize,
+ old_stepsize))
+
+ def take_step(self, x):
+ self.nstep += 1
+ self.nstep_tot += 1
+ if self.nstep % self.interval == 0:
+ self._adjust_step_size()
+ return self.takestep(x)
+
+ def report(self, accept, **kwargs):
+ "called by basinhopping to report the result of the step"
+ if accept:
+ self.naccept += 1
+
+
+class RandomDisplacement:
+ """Add a random displacement of maximum size `stepsize` to each coordinate.
+
+ Calling this updates `x` in-place.
+
+ Parameters
+ ----------
+ stepsize : float, optional
+ Maximum stepsize in any dimension
+ random_gen : {None, int, `numpy.random.Generator`,
+ `numpy.random.RandomState`}, optional
+
+ If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+ singleton is used.
+ If `seed` is an int, a new ``RandomState`` instance is used,
+ seeded with `seed`.
+ If `seed` is already a ``Generator`` or ``RandomState`` instance then
+ that instance is used.
+
+ """
+
+ def __init__(self, stepsize=0.5, random_gen=None):
+ self.stepsize = stepsize
+ self.random_gen = check_random_state(random_gen)
+
+ def __call__(self, x):
+ x += self.random_gen.uniform(-self.stepsize, self.stepsize,
+ np.shape(x))
+ return x
+
+
+class MinimizerWrapper:
+ """
+ wrap a minimizer function as a minimizer class
+ """
+ def __init__(self, minimizer, func=None, **kwargs):
+ self.minimizer = minimizer
+ self.func = func
+ self.kwargs = kwargs
+
+ def __call__(self, x0):
+ if self.func is None:
+ return self.minimizer(x0, **self.kwargs)
+ else:
+ return self.minimizer(self.func, x0, **self.kwargs)
+
+
+class Metropolis:
+ """Metropolis acceptance criterion.
+
+ Parameters
+ ----------
+ T : float
+ The "temperature" parameter for the accept or reject criterion.
+ random_gen : {None, int, `numpy.random.Generator`,
+ `numpy.random.RandomState`}, optional
+
+ If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+ singleton is used.
+ If `seed` is an int, a new ``RandomState`` instance is used,
+ seeded with `seed`.
+ If `seed` is already a ``Generator`` or ``RandomState`` instance then
+ that instance is used.
+ Random number generator used for acceptance test.
+
+ """
+
+ def __init__(self, T, random_gen=None):
+ # Avoid ZeroDivisionError since "MBH can be regarded as a special case
+ # of the BH framework with the Metropolis criterion, where temperature
+ # T = 0." (Reject all steps that increase energy.)
+ self.beta = 1.0 / T if T != 0 else float('inf')
+ self.random_gen = check_random_state(random_gen)
+
+ def accept_reject(self, res_new, res_old):
+ """
+ Assuming the local search underlying res_new was successful:
+ If new energy is lower than old, it will always be accepted.
+ If new is higher than old, there is a chance it will be accepted,
+ less likely for larger differences.
+ """
+ with np.errstate(invalid='ignore'):
+ # The energy values being fed to Metropolis are 1-length arrays, and if
+ # they are equal, their difference is 0, which gets multiplied by beta,
+ # which is inf, and array([0]) * float('inf') causes
+ #
+ # RuntimeWarning: invalid value encountered in multiply
+ #
+ # Ignore this warning so when the algorithm is on a flat plane, it always
+ # accepts the step, to try to move off the plane.
+ prod = -(res_new.fun - res_old.fun) * self.beta
+ w = math.exp(min(0, prod))
+
+ rand = self.random_gen.uniform()
+ return w >= rand and (res_new.success or not res_old.success)
+
+ def __call__(self, *, res_new, res_old):
+ """
+ f_new and f_old are mandatory in kwargs
+ """
+ return bool(self.accept_reject(res_new, res_old))
+
+
+def basinhopping(func, x0, niter=100, T=1.0, stepsize=0.5,
+ minimizer_kwargs=None, take_step=None, accept_test=None,
+ callback=None, interval=50, disp=False, niter_success=None,
+ seed=None, *, target_accept_rate=0.5, stepwise_factor=0.9):
+ """Find the global minimum of a function using the basin-hopping algorithm.
+
+ Basin-hopping is a two-phase method that combines a global stepping
+ algorithm with local minimization at each step. Designed to mimic
+ the natural process of energy minimization of clusters of atoms, it works
+ well for similar problems with "funnel-like, but rugged" energy landscapes
+ [5]_.
+
+ As the step-taking, step acceptance, and minimization methods are all
+ customizable, this function can also be used to implement other two-phase
+ methods.
+
+ Parameters
+ ----------
+ func : callable ``f(x, *args)``
+ Function to be optimized. ``args`` can be passed as an optional item
+ in the dict `minimizer_kwargs`
+ x0 : array_like
+ Initial guess.
+ niter : integer, optional
+ The number of basin-hopping iterations. There will be a total of
+ ``niter + 1`` runs of the local minimizer.
+ T : float, optional
+ The "temperature" parameter for the acceptance or rejection criterion.
+ Higher "temperatures" mean that larger jumps in function value will be
+ accepted. For best results `T` should be comparable to the
+ separation (in function value) between local minima.
+ stepsize : float, optional
+ Maximum step size for use in the random displacement.
+ minimizer_kwargs : dict, optional
+ Extra keyword arguments to be passed to the local minimizer
+ `scipy.optimize.minimize` Some important options could be:
+
+ method : str
+ The minimization method (e.g. ``"L-BFGS-B"``)
+ args : tuple
+ Extra arguments passed to the objective function (`func`) and
+ its derivatives (Jacobian, Hessian).
+
+ take_step : callable ``take_step(x)``, optional
+ Replace the default step-taking routine with this routine. The default
+ step-taking routine is a random displacement of the coordinates, but
+ other step-taking algorithms may be better for some systems.
+ `take_step` can optionally have the attribute ``take_step.stepsize``.
+ If this attribute exists, then `basinhopping` will adjust
+ ``take_step.stepsize`` in order to try to optimize the global minimum
+ search.
+ accept_test : callable, ``accept_test(f_new=f_new, x_new=x_new, f_old=fold, x_old=x_old)``, optional
+ Define a test which will be used to judge whether to accept the
+ step. This will be used in addition to the Metropolis test based on
+ "temperature" `T`. The acceptable return values are True,
+ False, or ``"force accept"``. If any of the tests return False
+ then the step is rejected. If the latter, then this will override any
+ other tests in order to accept the step. This can be used, for example,
+ to forcefully escape from a local minimum that `basinhopping` is
+ trapped in.
+ callback : callable, ``callback(x, f, accept)``, optional
+ A callback function which will be called for all minima found. ``x``
+ and ``f`` are the coordinates and function value of the trial minimum,
+ and ``accept`` is whether that minimum was accepted. This can
+ be used, for example, to save the lowest N minima found. Also,
+ `callback` can be used to specify a user defined stop criterion by
+ optionally returning True to stop the `basinhopping` routine.
+ interval : integer, optional
+ interval for how often to update the `stepsize`
+ disp : bool, optional
+ Set to True to print status messages
+ niter_success : integer, optional
+ Stop the run if the global minimum candidate remains the same for this
+ number of iterations.
+ seed : {None, int, `numpy.random.Generator`, `numpy.random.RandomState`}, optional
+
+ If `seed` is None (or `np.random`), the `numpy.random.RandomState`
+ singleton is used.
+ If `seed` is an int, a new ``RandomState`` instance is used,
+ seeded with `seed`.
+ If `seed` is already a ``Generator`` or ``RandomState`` instance then
+ that instance is used.
+ Specify `seed` for repeatable minimizations. The random numbers
+ generated with this seed only affect the default Metropolis
+ `accept_test` and the default `take_step`. If you supply your own
+ `take_step` and `accept_test`, and these functions use random
+ number generation, then those functions are responsible for the state
+ of their random number generator.
+ target_accept_rate : float, optional
+ The target acceptance rate that is used to adjust the `stepsize`.
+ If the current acceptance rate is greater than the target,
+ then the `stepsize` is increased. Otherwise, it is decreased.
+ Range is (0, 1). Default is 0.5.
+
+ .. versionadded:: 1.8.0
+
+ stepwise_factor : float, optional
+ The `stepsize` is multiplied or divided by this stepwise factor upon
+ each update. Range is (0, 1). Default is 0.9.
+
+ .. versionadded:: 1.8.0
+
+ Returns
+ -------
+ res : OptimizeResult
+ The optimization result represented as a `OptimizeResult` object.
+ Important attributes are: ``x`` the solution array, ``fun`` the value
+ of the function at the solution, and ``message`` which describes the
+ cause of the termination. The ``OptimizeResult`` object returned by the
+ selected minimizer at the lowest minimum is also contained within this
+ object and can be accessed through the ``lowest_optimization_result``
+ attribute. See `OptimizeResult` for a description of other attributes.
+
+ See Also
+ --------
+ minimize :
+ The local minimization function called once for each basinhopping step.
+ `minimizer_kwargs` is passed to this routine.
+
+ Notes
+ -----
+ Basin-hopping is a stochastic algorithm which attempts to find the global
+ minimum of a smooth scalar function of one or more variables [1]_ [2]_ [3]_
+ [4]_. The algorithm in its current form was described by David Wales and
+ Jonathan Doye [2]_ http://www-wales.ch.cam.ac.uk/.
+
+ The algorithm is iterative with each cycle composed of the following
+ features
+
+ 1) random perturbation of the coordinates
+
+ 2) local minimization
+
+ 3) accept or reject the new coordinates based on the minimized function
+ value
+
+ The acceptance test used here is the Metropolis criterion of standard Monte
+ Carlo algorithms, although there are many other possibilities [3]_.
+
+ This global minimization method has been shown to be extremely efficient
+ for a wide variety of problems in physics and chemistry. It is
+ particularly useful when the function has many minima separated by large
+ barriers. See the `Cambridge Cluster Database
+