diff --git a/.gitattributes b/.gitattributes index 6cc6ee045a883fcdcb9bf6c21a1e58ac3176f971..041dd7d3e8884145f4d54a69cccdc153e4a2a826 100644 --- a/.gitattributes +++ b/.gitattributes @@ -158,3 +158,5 @@ env-llmeval/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_host.so env-llmeval/lib/python3.10/site-packages/tokenizers/tokenizers.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text llmeval-env/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so filter=lfs diff=lfs merge=lfs -text env-llmeval/lib/python3.10/site-packages/scipy/sparse/_sparsetools.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +env-llmeval/lib/python3.10/site-packages/scipy/linalg/_flapack.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +env-llmeval/lib/python3.10/site-packages/scipy/misc/face.dat filter=lfs diff=lfs merge=lfs -text diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/_basic.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/_basic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d14845ed38eec1f396f09a0102cca2f744c53e6c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/_basic.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/_decomp.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/_decomp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ccdb5f680734c1d5190d5f78379865da1e781d26 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/_decomp.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/_decomp_qr.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/_decomp_qr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..79ae751ea24f30d0715e2c15d4cc52e7c617703a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/_decomp_qr.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/_expm_frechet.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/_expm_frechet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa2a41c0a72c964b9670439c12aab2bdb71c58b5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/_expm_frechet.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/_matfuncs.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/_matfuncs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c343d83186f6ca614f1158955d7b0077085079a7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/_matfuncs.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/_matfuncs_inv_ssq.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/_matfuncs_inv_ssq.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1837e151564789f9ad5cf2584c051994b246166 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/_matfuncs_inv_ssq.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/_matfuncs_sqrtm.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/_matfuncs_sqrtm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ce46c68464ccb9c91fa9b5cce810fb017f6c3d6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/_matfuncs_sqrtm.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/_procrustes.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/_procrustes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a12127ee230dd3d367762327a2e92574b5de1798 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/_procrustes.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/basic.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/basic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06aa72f59f32aa9fbac9ca370c3d1ed303dd6394 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/basic.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/blas.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/blas.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7a9f1a5b6e8a2cfb8753e2d7f796679f0d22f60 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/blas.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/decomp.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/decomp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9593991e9b92849ceab22cb3b3da96f05b7e0ec Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/decomp.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/decomp_cholesky.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/decomp_cholesky.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ef864ccaf9d71536120830001c21cff5db2e731 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/decomp_cholesky.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/decomp_svd.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/decomp_svd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e061b850fa3d6188bf95da27e3936abc74bde6fb Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/decomp_svd.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/matfuncs.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/matfuncs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17c560fe27a42f1c6bbeb8e22792ea8ccaf904d3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/matfuncs.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/special_matrices.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/special_matrices.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a06e7a81adedbec2e737bfca5fe96cf09e77cf0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/__pycache__/special_matrices.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_blas_subroutines.h b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_blas_subroutines.h new file mode 100644 index 0000000000000000000000000000000000000000..d1d9308b8e8e9e6ebf30bb6acd5b028c1c929d7b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_blas_subroutines.h @@ -0,0 +1,164 @@ +/* +This file was generated by _generate_pyx.py. +Do not edit this file directly. +*/ + +#include "fortran_defs.h" +#include "npy_cblas.h" + +#ifdef __cplusplus +extern "C" { +#endif + +void BLAS_FUNC(caxpy)(int *n, npy_complex64 *ca, npy_complex64 *cx, int *incx, npy_complex64 *cy, int *incy); +void BLAS_FUNC(ccopy)(int *n, npy_complex64 *cx, int *incx, npy_complex64 *cy, int *incy); +void (cdotcwrp_)(npy_complex64 *out, int *n, npy_complex64 *cx, int *incx, npy_complex64 *cy, int *incy); +void (cdotuwrp_)(npy_complex64 *out, int *n, npy_complex64 *cx, int *incx, npy_complex64 *cy, int *incy); +void BLAS_FUNC(cgbmv)(char *trans, int *m, int *n, int *kl, int *ku, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *x, int *incx, npy_complex64 *beta, npy_complex64 *y, int *incy); +void BLAS_FUNC(cgemm)(char *transa, char *transb, int *m, int *n, int *k, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *beta, npy_complex64 *c, int *ldc); +void BLAS_FUNC(cgemv)(char *trans, int *m, int *n, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *x, int *incx, npy_complex64 *beta, npy_complex64 *y, int *incy); +void BLAS_FUNC(cgerc)(int *m, int *n, npy_complex64 *alpha, npy_complex64 *x, int *incx, npy_complex64 *y, int *incy, npy_complex64 *a, int *lda); +void BLAS_FUNC(cgeru)(int *m, int *n, npy_complex64 *alpha, npy_complex64 *x, int *incx, npy_complex64 *y, int *incy, npy_complex64 *a, int *lda); +void BLAS_FUNC(chbmv)(char *uplo, int *n, int *k, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *x, int *incx, npy_complex64 *beta, npy_complex64 *y, int *incy); +void BLAS_FUNC(chemm)(char *side, char *uplo, int *m, int *n, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *beta, npy_complex64 *c, int *ldc); +void BLAS_FUNC(chemv)(char *uplo, int *n, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *x, int *incx, npy_complex64 *beta, npy_complex64 *y, int *incy); +void BLAS_FUNC(cher)(char *uplo, int *n, float *alpha, npy_complex64 *x, int *incx, npy_complex64 *a, int *lda); +void BLAS_FUNC(cher2)(char *uplo, int *n, npy_complex64 *alpha, npy_complex64 *x, int *incx, npy_complex64 *y, int *incy, npy_complex64 *a, int *lda); +void BLAS_FUNC(cher2k)(char *uplo, char *trans, int *n, int *k, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, float *beta, npy_complex64 *c, int *ldc); +void BLAS_FUNC(cherk)(char *uplo, char *trans, int *n, int *k, float *alpha, npy_complex64 *a, int *lda, float *beta, npy_complex64 *c, int *ldc); +void BLAS_FUNC(chpmv)(char *uplo, int *n, npy_complex64 *alpha, npy_complex64 *ap, npy_complex64 *x, int *incx, npy_complex64 *beta, npy_complex64 *y, int *incy); +void BLAS_FUNC(chpr)(char *uplo, int *n, float *alpha, npy_complex64 *x, int *incx, npy_complex64 *ap); +void BLAS_FUNC(chpr2)(char *uplo, int *n, npy_complex64 *alpha, npy_complex64 *x, int *incx, npy_complex64 *y, int *incy, npy_complex64 *ap); +void BLAS_FUNC(crotg)(npy_complex64 *ca, npy_complex64 *cb, float *c, npy_complex64 *s); +void BLAS_FUNC(cscal)(int *n, npy_complex64 *ca, npy_complex64 *cx, int *incx); +void BLAS_FUNC(csrot)(int *n, npy_complex64 *cx, int *incx, npy_complex64 *cy, int *incy, float *c, float *s); +void BLAS_FUNC(csscal)(int *n, float *sa, npy_complex64 *cx, int *incx); +void BLAS_FUNC(cswap)(int *n, npy_complex64 *cx, int *incx, npy_complex64 *cy, int *incy); +void BLAS_FUNC(csymm)(char *side, char *uplo, int *m, int *n, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *beta, npy_complex64 *c, int *ldc); +void BLAS_FUNC(csyr2k)(char *uplo, char *trans, int *n, int *k, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *beta, npy_complex64 *c, int *ldc); +void BLAS_FUNC(csyrk)(char *uplo, char *trans, int *n, int *k, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *beta, npy_complex64 *c, int *ldc); +void BLAS_FUNC(ctbmv)(char *uplo, char *trans, char *diag, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *x, int *incx); +void BLAS_FUNC(ctbsv)(char *uplo, char *trans, char *diag, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *x, int *incx); +void BLAS_FUNC(ctpmv)(char *uplo, char *trans, char *diag, int *n, npy_complex64 *ap, npy_complex64 *x, int *incx); +void BLAS_FUNC(ctpsv)(char *uplo, char *trans, char *diag, int *n, npy_complex64 *ap, npy_complex64 *x, int *incx); +void BLAS_FUNC(ctrmm)(char *side, char *uplo, char *transa, char *diag, int *m, int *n, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb); +void BLAS_FUNC(ctrmv)(char *uplo, char *trans, char *diag, int *n, npy_complex64 *a, int *lda, npy_complex64 *x, int *incx); +void BLAS_FUNC(ctrsm)(char *side, char *uplo, char *transa, char *diag, int *m, int *n, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb); +void BLAS_FUNC(ctrsv)(char *uplo, char *trans, char *diag, int *n, npy_complex64 *a, int *lda, npy_complex64 *x, int *incx); +double BLAS_FUNC(dasum)(int *n, double *dx, int *incx); +void BLAS_FUNC(daxpy)(int *n, double *da, double *dx, int *incx, double *dy, int *incy); +double BLAS_FUNC(dcabs1)(npy_complex128 *z); +void BLAS_FUNC(dcopy)(int *n, double *dx, int *incx, double *dy, int *incy); +double BLAS_FUNC(ddot)(int *n, double *dx, int *incx, double *dy, int *incy); +void BLAS_FUNC(dgbmv)(char *trans, int *m, int *n, int *kl, int *ku, double *alpha, double *a, int *lda, double *x, int *incx, double *beta, double *y, int *incy); +void BLAS_FUNC(dgemm)(char *transa, char *transb, int *m, int *n, int *k, double *alpha, double *a, int *lda, double *b, int *ldb, double *beta, double *c, int *ldc); +void BLAS_FUNC(dgemv)(char *trans, int *m, int *n, double *alpha, double *a, int *lda, double *x, int *incx, double *beta, double *y, int *incy); +void BLAS_FUNC(dger)(int *m, int *n, double *alpha, double *x, int *incx, double *y, int *incy, double *a, int *lda); +double BLAS_FUNC(dnrm2)(int *n, double *x, int *incx); +void BLAS_FUNC(drot)(int *n, double *dx, int *incx, double *dy, int *incy, double *c, double *s); +void BLAS_FUNC(drotg)(double *da, double *db, double *c, double *s); +void BLAS_FUNC(drotm)(int *n, double *dx, int *incx, double *dy, int *incy, double *dparam); +void BLAS_FUNC(drotmg)(double *dd1, double *dd2, double *dx1, double *dy1, double *dparam); +void BLAS_FUNC(dsbmv)(char *uplo, int *n, int *k, double *alpha, double *a, int *lda, double *x, int *incx, double *beta, double *y, int *incy); +void BLAS_FUNC(dscal)(int *n, double *da, double *dx, int *incx); +double BLAS_FUNC(dsdot)(int *n, float *sx, int *incx, float *sy, int *incy); +void BLAS_FUNC(dspmv)(char *uplo, int *n, double *alpha, double *ap, double *x, int *incx, double *beta, double *y, int *incy); +void BLAS_FUNC(dspr)(char *uplo, int *n, double *alpha, double *x, int *incx, double *ap); +void BLAS_FUNC(dspr2)(char *uplo, int *n, double *alpha, double *x, int *incx, double *y, int *incy, double *ap); +void BLAS_FUNC(dswap)(int *n, double *dx, int *incx, double *dy, int *incy); +void BLAS_FUNC(dsymm)(char *side, char *uplo, int *m, int *n, double *alpha, double *a, int *lda, double *b, int *ldb, double *beta, double *c, int *ldc); +void BLAS_FUNC(dsymv)(char *uplo, int *n, double *alpha, double *a, int *lda, double *x, int *incx, double *beta, double *y, int *incy); +void BLAS_FUNC(dsyr)(char *uplo, int *n, double *alpha, double *x, int *incx, double *a, int *lda); +void BLAS_FUNC(dsyr2)(char *uplo, int *n, double *alpha, double *x, int *incx, double *y, int *incy, double *a, int *lda); +void BLAS_FUNC(dsyr2k)(char *uplo, char *trans, int *n, int *k, double *alpha, double *a, int *lda, double *b, int *ldb, double *beta, double *c, int *ldc); +void BLAS_FUNC(dsyrk)(char *uplo, char *trans, int *n, int *k, double *alpha, double *a, int *lda, double *beta, double *c, int *ldc); +void BLAS_FUNC(dtbmv)(char *uplo, char *trans, char *diag, int *n, int *k, double *a, int *lda, double *x, int *incx); +void BLAS_FUNC(dtbsv)(char *uplo, char *trans, char *diag, int *n, int *k, double *a, int *lda, double *x, int *incx); +void BLAS_FUNC(dtpmv)(char *uplo, char *trans, char *diag, int *n, double *ap, double *x, int *incx); +void BLAS_FUNC(dtpsv)(char *uplo, char *trans, char *diag, int *n, double *ap, double *x, int *incx); +void BLAS_FUNC(dtrmm)(char *side, char *uplo, char *transa, char *diag, int *m, int *n, double *alpha, double *a, int *lda, double *b, int *ldb); +void BLAS_FUNC(dtrmv)(char *uplo, char *trans, char *diag, int *n, double *a, int *lda, double *x, int *incx); +void BLAS_FUNC(dtrsm)(char *side, char *uplo, char *transa, char *diag, int *m, int *n, double *alpha, double *a, int *lda, double *b, int *ldb); +void BLAS_FUNC(dtrsv)(char *uplo, char *trans, char *diag, int *n, double *a, int *lda, double *x, int *incx); +double BLAS_FUNC(dzasum)(int *n, npy_complex128 *zx, int *incx); +double BLAS_FUNC(dznrm2)(int *n, npy_complex128 *x, int *incx); +int BLAS_FUNC(icamax)(int *n, npy_complex64 *cx, int *incx); +int BLAS_FUNC(idamax)(int *n, double *dx, int *incx); +int BLAS_FUNC(isamax)(int *n, float *sx, int *incx); +int BLAS_FUNC(izamax)(int *n, npy_complex128 *zx, int *incx); +int BLAS_FUNC(lsame)(char *ca, char *cb); +float BLAS_FUNC(sasum)(int *n, float *sx, int *incx); +void BLAS_FUNC(saxpy)(int *n, float *sa, float *sx, int *incx, float *sy, int *incy); +float BLAS_FUNC(scasum)(int *n, npy_complex64 *cx, int *incx); +float BLAS_FUNC(scnrm2)(int *n, npy_complex64 *x, int *incx); +void BLAS_FUNC(scopy)(int *n, float *sx, int *incx, float *sy, int *incy); +float BLAS_FUNC(sdot)(int *n, float *sx, int *incx, float *sy, int *incy); +float BLAS_FUNC(sdsdot)(int *n, float *sb, float *sx, int *incx, float *sy, int *incy); +void BLAS_FUNC(sgbmv)(char *trans, int *m, int *n, int *kl, int *ku, float *alpha, float *a, int *lda, float *x, int *incx, float *beta, float *y, int *incy); +void BLAS_FUNC(sgemm)(char *transa, char *transb, int *m, int *n, int *k, float *alpha, float *a, int *lda, float *b, int *ldb, float *beta, float *c, int *ldc); +void BLAS_FUNC(sgemv)(char *trans, int *m, int *n, float *alpha, float *a, int *lda, float *x, int *incx, float *beta, float *y, int *incy); +void BLAS_FUNC(sger)(int *m, int *n, float *alpha, float *x, int *incx, float *y, int *incy, float *a, int *lda); +float BLAS_FUNC(snrm2)(int *n, float *x, int *incx); +void BLAS_FUNC(srot)(int *n, float *sx, int *incx, float *sy, int *incy, float *c, float *s); +void BLAS_FUNC(srotg)(float *sa, float *sb, float *c, float *s); +void BLAS_FUNC(srotm)(int *n, float *sx, int *incx, float *sy, int *incy, float *sparam); +void BLAS_FUNC(srotmg)(float *sd1, float *sd2, float *sx1, float *sy1, float *sparam); +void BLAS_FUNC(ssbmv)(char *uplo, int *n, int *k, float *alpha, float *a, int *lda, float *x, int *incx, float *beta, float *y, int *incy); +void BLAS_FUNC(sscal)(int *n, float *sa, float *sx, int *incx); +void BLAS_FUNC(sspmv)(char *uplo, int *n, float *alpha, float *ap, float *x, int *incx, float *beta, float *y, int *incy); +void BLAS_FUNC(sspr)(char *uplo, int *n, float *alpha, float *x, int *incx, float *ap); +void BLAS_FUNC(sspr2)(char *uplo, int *n, float *alpha, float *x, int *incx, float *y, int *incy, float *ap); +void BLAS_FUNC(sswap)(int *n, float *sx, int *incx, float *sy, int *incy); +void BLAS_FUNC(ssymm)(char *side, char *uplo, int *m, int *n, float *alpha, float *a, int *lda, float *b, int *ldb, float *beta, float *c, int *ldc); +void BLAS_FUNC(ssymv)(char *uplo, int *n, float *alpha, float *a, int *lda, float *x, int *incx, float *beta, float *y, int *incy); +void BLAS_FUNC(ssyr)(char *uplo, int *n, float *alpha, float *x, int *incx, float *a, int *lda); +void BLAS_FUNC(ssyr2)(char *uplo, int *n, float *alpha, float *x, int *incx, float *y, int *incy, float *a, int *lda); +void BLAS_FUNC(ssyr2k)(char *uplo, char *trans, int *n, int *k, float *alpha, float *a, int *lda, float *b, int *ldb, float *beta, float *c, int *ldc); +void BLAS_FUNC(ssyrk)(char *uplo, char *trans, int *n, int *k, float *alpha, float *a, int *lda, float *beta, float *c, int *ldc); +void BLAS_FUNC(stbmv)(char *uplo, char *trans, char *diag, int *n, int *k, float *a, int *lda, float *x, int *incx); +void BLAS_FUNC(stbsv)(char *uplo, char *trans, char *diag, int *n, int *k, float *a, int *lda, float *x, int *incx); +void BLAS_FUNC(stpmv)(char *uplo, char *trans, char *diag, int *n, float *ap, float *x, int *incx); +void BLAS_FUNC(stpsv)(char *uplo, char *trans, char *diag, int *n, float *ap, float *x, int *incx); +void BLAS_FUNC(strmm)(char *side, char *uplo, char *transa, char *diag, int *m, int *n, float *alpha, float *a, int *lda, float *b, int *ldb); +void BLAS_FUNC(strmv)(char *uplo, char *trans, char *diag, int *n, float *a, int *lda, float *x, int *incx); +void BLAS_FUNC(strsm)(char *side, char *uplo, char *transa, char *diag, int *m, int *n, float *alpha, float *a, int *lda, float *b, int *ldb); +void BLAS_FUNC(strsv)(char *uplo, char *trans, char *diag, int *n, float *a, int *lda, float *x, int *incx); +void BLAS_FUNC(zaxpy)(int *n, npy_complex128 *za, npy_complex128 *zx, int *incx, npy_complex128 *zy, int *incy); +void BLAS_FUNC(zcopy)(int *n, npy_complex128 *zx, int *incx, npy_complex128 *zy, int *incy); +void (zdotcwrp_)(npy_complex128 *out, int *n, npy_complex128 *zx, int *incx, npy_complex128 *zy, int *incy); +void (zdotuwrp_)(npy_complex128 *out, int *n, npy_complex128 *zx, int *incx, npy_complex128 *zy, int *incy); +void BLAS_FUNC(zdrot)(int *n, npy_complex128 *cx, int *incx, npy_complex128 *cy, int *incy, double *c, double *s); +void BLAS_FUNC(zdscal)(int *n, double *da, npy_complex128 *zx, int *incx); +void BLAS_FUNC(zgbmv)(char *trans, int *m, int *n, int *kl, int *ku, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *x, int *incx, npy_complex128 *beta, npy_complex128 *y, int *incy); +void BLAS_FUNC(zgemm)(char *transa, char *transb, int *m, int *n, int *k, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *beta, npy_complex128 *c, int *ldc); +void BLAS_FUNC(zgemv)(char *trans, int *m, int *n, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *x, int *incx, npy_complex128 *beta, npy_complex128 *y, int *incy); +void BLAS_FUNC(zgerc)(int *m, int *n, npy_complex128 *alpha, npy_complex128 *x, int *incx, npy_complex128 *y, int *incy, npy_complex128 *a, int *lda); +void BLAS_FUNC(zgeru)(int *m, int *n, npy_complex128 *alpha, npy_complex128 *x, int *incx, npy_complex128 *y, int *incy, npy_complex128 *a, int *lda); +void BLAS_FUNC(zhbmv)(char *uplo, int *n, int *k, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *x, int *incx, npy_complex128 *beta, npy_complex128 *y, int *incy); +void BLAS_FUNC(zhemm)(char *side, char *uplo, int *m, int *n, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *beta, npy_complex128 *c, int *ldc); +void BLAS_FUNC(zhemv)(char *uplo, int *n, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *x, int *incx, npy_complex128 *beta, npy_complex128 *y, int *incy); +void BLAS_FUNC(zher)(char *uplo, int *n, double *alpha, npy_complex128 *x, int *incx, npy_complex128 *a, int *lda); +void BLAS_FUNC(zher2)(char *uplo, int *n, npy_complex128 *alpha, npy_complex128 *x, int *incx, npy_complex128 *y, int *incy, npy_complex128 *a, int *lda); +void BLAS_FUNC(zher2k)(char *uplo, char *trans, int *n, int *k, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, double *beta, npy_complex128 *c, int *ldc); +void BLAS_FUNC(zherk)(char *uplo, char *trans, int *n, int *k, double *alpha, npy_complex128 *a, int *lda, double *beta, npy_complex128 *c, int *ldc); +void BLAS_FUNC(zhpmv)(char *uplo, int *n, npy_complex128 *alpha, npy_complex128 *ap, npy_complex128 *x, int *incx, npy_complex128 *beta, npy_complex128 *y, int *incy); +void BLAS_FUNC(zhpr)(char *uplo, int *n, double *alpha, npy_complex128 *x, int *incx, npy_complex128 *ap); +void BLAS_FUNC(zhpr2)(char *uplo, int *n, npy_complex128 *alpha, npy_complex128 *x, int *incx, npy_complex128 *y, int *incy, npy_complex128 *ap); +void BLAS_FUNC(zrotg)(npy_complex128 *ca, npy_complex128 *cb, double *c, npy_complex128 *s); +void BLAS_FUNC(zscal)(int *n, npy_complex128 *za, npy_complex128 *zx, int *incx); +void BLAS_FUNC(zswap)(int *n, npy_complex128 *zx, int *incx, npy_complex128 *zy, int *incy); +void BLAS_FUNC(zsymm)(char *side, char *uplo, int *m, int *n, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *beta, npy_complex128 *c, int *ldc); +void BLAS_FUNC(zsyr2k)(char *uplo, char *trans, int *n, int *k, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *beta, npy_complex128 *c, int *ldc); +void BLAS_FUNC(zsyrk)(char *uplo, char *trans, int *n, int *k, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *beta, npy_complex128 *c, int *ldc); +void BLAS_FUNC(ztbmv)(char *uplo, char *trans, char *diag, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *x, int *incx); +void BLAS_FUNC(ztbsv)(char *uplo, char *trans, char *diag, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *x, int *incx); +void BLAS_FUNC(ztpmv)(char *uplo, char *trans, char *diag, int *n, npy_complex128 *ap, npy_complex128 *x, int *incx); +void BLAS_FUNC(ztpsv)(char *uplo, char *trans, char *diag, int *n, npy_complex128 *ap, npy_complex128 *x, int *incx); +void BLAS_FUNC(ztrmm)(char *side, char *uplo, char *transa, char *diag, int *m, int *n, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb); +void BLAS_FUNC(ztrmv)(char *uplo, char *trans, char *diag, int *n, npy_complex128 *a, int *lda, npy_complex128 *x, int *incx); +void BLAS_FUNC(ztrsm)(char *side, char *uplo, char *transa, char *diag, int *m, int *n, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb); +void BLAS_FUNC(ztrsv)(char *uplo, char *trans, char *diag, int *n, npy_complex128 *a, int *lda, npy_complex128 *x, int *incx); + +#ifdef __cplusplus +} +#endif diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_cythonized_array_utils.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_cythonized_array_utils.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..9a3d9c8de436abe431b2bbc74d990c2d74526242 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_cythonized_array_utils.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_cythonized_array_utils.pxd b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_cythonized_array_utils.pxd new file mode 100644 index 0000000000000000000000000000000000000000..ccec61c078e57ba7b6a310ec57189fcf236c972d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_cythonized_array_utils.pxd @@ -0,0 +1,40 @@ +cimport numpy as cnp + +ctypedef fused lapack_t: + float + double + (float complex) + (double complex) + +ctypedef fused lapack_cz_t: + (float complex) + (double complex) + +ctypedef fused lapack_sd_t: + float + double + +ctypedef fused np_numeric_t: + cnp.int8_t + cnp.int16_t + cnp.int32_t + cnp.int64_t + cnp.uint8_t + cnp.uint16_t + cnp.uint32_t + cnp.uint64_t + cnp.float32_t + cnp.float64_t + cnp.longdouble_t + cnp.complex64_t + cnp.complex128_t + +ctypedef fused np_complex_numeric_t: + cnp.complex64_t + cnp.complex128_t + + +cdef void swap_c_and_f_layout(lapack_t *a, lapack_t *b, int r, int c) noexcept nogil +cdef (int, int) band_check_internal_c(np_numeric_t[:, ::1]A) noexcept nogil +cdef bint is_sym_her_real_c_internal(np_numeric_t[:, ::1]A) noexcept nogil +cdef bint is_sym_her_complex_c_internal(np_complex_numeric_t[:, ::1]A) noexcept nogil diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_decomp.py b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_decomp.py new file mode 100644 index 0000000000000000000000000000000000000000..2918295ec5b21d2a560a5e7af20d8b32c719e73f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_decomp.py @@ -0,0 +1,1621 @@ +# +# Author: Pearu Peterson, March 2002 +# +# additions by Travis Oliphant, March 2002 +# additions by Eric Jones, June 2002 +# additions by Johannes Loehnert, June 2006 +# additions by Bart Vandereycken, June 2006 +# additions by Andrew D Straw, May 2007 +# additions by Tiziano Zito, November 2008 +# +# April 2010: Functions for LU, QR, SVD, Schur, and Cholesky decompositions +# were moved to their own files. Still in this file are functions for +# eigenstuff and for the Hessenberg form. + +__all__ = ['eig', 'eigvals', 'eigh', 'eigvalsh', + 'eig_banded', 'eigvals_banded', + 'eigh_tridiagonal', 'eigvalsh_tridiagonal', 'hessenberg', 'cdf2rdf'] + +import warnings + +import numpy +from numpy import (array, isfinite, inexact, nonzero, iscomplexobj, + flatnonzero, conj, asarray, argsort, empty, + iscomplex, zeros, einsum, eye, inf) +# Local imports +from scipy._lib._util import _asarray_validated +from ._misc import LinAlgError, _datacopied, norm +from .lapack import get_lapack_funcs, _compute_lwork +from scipy._lib.deprecation import _NoValue, _deprecate_positional_args + + +_I = numpy.array(1j, dtype='F') + + +def _make_complex_eigvecs(w, vin, dtype): + """ + Produce complex-valued eigenvectors from LAPACK DGGEV real-valued output + """ + # - see LAPACK man page DGGEV at ALPHAI + v = numpy.array(vin, dtype=dtype) + m = (w.imag > 0) + m[:-1] |= (w.imag[1:] < 0) # workaround for LAPACK bug, cf. ticket #709 + for i in flatnonzero(m): + v.imag[:, i] = vin[:, i+1] + conj(v[:, i], v[:, i+1]) + return v + + +def _make_eigvals(alpha, beta, homogeneous_eigvals): + if homogeneous_eigvals: + if beta is None: + return numpy.vstack((alpha, numpy.ones_like(alpha))) + else: + return numpy.vstack((alpha, beta)) + else: + if beta is None: + return alpha + else: + w = numpy.empty_like(alpha) + alpha_zero = (alpha == 0) + beta_zero = (beta == 0) + beta_nonzero = ~beta_zero + w[beta_nonzero] = alpha[beta_nonzero]/beta[beta_nonzero] + # Use numpy.inf for complex values too since + # 1/numpy.inf = 0, i.e., it correctly behaves as projective + # infinity. + w[~alpha_zero & beta_zero] = numpy.inf + if numpy.all(alpha.imag == 0): + w[alpha_zero & beta_zero] = numpy.nan + else: + w[alpha_zero & beta_zero] = complex(numpy.nan, numpy.nan) + return w + + +def _geneig(a1, b1, left, right, overwrite_a, overwrite_b, + homogeneous_eigvals): + ggev, = get_lapack_funcs(('ggev',), (a1, b1)) + cvl, cvr = left, right + res = ggev(a1, b1, lwork=-1) + lwork = res[-2][0].real.astype(numpy.int_) + if ggev.typecode in 'cz': + alpha, beta, vl, vr, work, info = ggev(a1, b1, cvl, cvr, lwork, + overwrite_a, overwrite_b) + w = _make_eigvals(alpha, beta, homogeneous_eigvals) + else: + alphar, alphai, beta, vl, vr, work, info = ggev(a1, b1, cvl, cvr, + lwork, overwrite_a, + overwrite_b) + alpha = alphar + _I * alphai + w = _make_eigvals(alpha, beta, homogeneous_eigvals) + _check_info(info, 'generalized eig algorithm (ggev)') + + only_real = numpy.all(w.imag == 0.0) + if not (ggev.typecode in 'cz' or only_real): + t = w.dtype.char + if left: + vl = _make_complex_eigvecs(w, vl, t) + if right: + vr = _make_complex_eigvecs(w, vr, t) + + # the eigenvectors returned by the lapack function are NOT normalized + for i in range(vr.shape[0]): + if right: + vr[:, i] /= norm(vr[:, i]) + if left: + vl[:, i] /= norm(vl[:, i]) + + if not (left or right): + return w + if left: + if right: + return w, vl, vr + return w, vl + return w, vr + + +def eig(a, b=None, left=False, right=True, overwrite_a=False, + overwrite_b=False, check_finite=True, homogeneous_eigvals=False): + """ + Solve an ordinary or generalized eigenvalue problem of a square matrix. + + Find eigenvalues w and right or left eigenvectors of a general matrix:: + + a vr[:,i] = w[i] b vr[:,i] + a.H vl[:,i] = w[i].conj() b.H vl[:,i] + + where ``.H`` is the Hermitian conjugation. + + Parameters + ---------- + a : (M, M) array_like + A complex or real matrix whose eigenvalues and eigenvectors + will be computed. + b : (M, M) array_like, optional + Right-hand side matrix in a generalized eigenvalue problem. + Default is None, identity matrix is assumed. + left : bool, optional + Whether to calculate and return left eigenvectors. Default is False. + right : bool, optional + Whether to calculate and return right eigenvectors. Default is True. + overwrite_a : bool, optional + Whether to overwrite `a`; may improve performance. Default is False. + overwrite_b : bool, optional + Whether to overwrite `b`; may improve performance. Default is False. + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + homogeneous_eigvals : bool, optional + If True, return the eigenvalues in homogeneous coordinates. + In this case ``w`` is a (2, M) array so that:: + + w[1,i] a vr[:,i] = w[0,i] b vr[:,i] + + Default is False. + + Returns + ------- + w : (M,) or (2, M) double or complex ndarray + The eigenvalues, each repeated according to its + multiplicity. The shape is (M,) unless + ``homogeneous_eigvals=True``. + vl : (M, M) double or complex ndarray + The left eigenvector corresponding to the eigenvalue + ``w[i]`` is the column ``vl[:,i]``. Only returned if ``left=True``. + The left eigenvector is not normalized. + vr : (M, M) double or complex ndarray + The normalized right eigenvector corresponding to the eigenvalue + ``w[i]`` is the column ``vr[:,i]``. Only returned if ``right=True``. + + Raises + ------ + LinAlgError + If eigenvalue computation does not converge. + + See Also + -------- + eigvals : eigenvalues of general arrays + eigh : Eigenvalues and right eigenvectors for symmetric/Hermitian arrays. + eig_banded : eigenvalues and right eigenvectors for symmetric/Hermitian + band matrices + eigh_tridiagonal : eigenvalues and right eiegenvectors for + symmetric/Hermitian tridiagonal matrices + + Examples + -------- + >>> import numpy as np + >>> from scipy import linalg + >>> a = np.array([[0., -1.], [1., 0.]]) + >>> linalg.eigvals(a) + array([0.+1.j, 0.-1.j]) + + >>> b = np.array([[0., 1.], [1., 1.]]) + >>> linalg.eigvals(a, b) + array([ 1.+0.j, -1.+0.j]) + + >>> a = np.array([[3., 0., 0.], [0., 8., 0.], [0., 0., 7.]]) + >>> linalg.eigvals(a, homogeneous_eigvals=True) + array([[3.+0.j, 8.+0.j, 7.+0.j], + [1.+0.j, 1.+0.j, 1.+0.j]]) + + >>> a = np.array([[0., -1.], [1., 0.]]) + >>> linalg.eigvals(a) == linalg.eig(a)[0] + array([ True, True]) + >>> linalg.eig(a, left=True, right=False)[1] # normalized left eigenvector + array([[-0.70710678+0.j , -0.70710678-0.j ], + [-0. +0.70710678j, -0. -0.70710678j]]) + >>> linalg.eig(a, left=False, right=True)[1] # normalized right eigenvector + array([[0.70710678+0.j , 0.70710678-0.j ], + [0. -0.70710678j, 0. +0.70710678j]]) + + + + """ + a1 = _asarray_validated(a, check_finite=check_finite) + if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]: + raise ValueError('expected square matrix') + overwrite_a = overwrite_a or (_datacopied(a1, a)) + if b is not None: + b1 = _asarray_validated(b, check_finite=check_finite) + overwrite_b = overwrite_b or _datacopied(b1, b) + if len(b1.shape) != 2 or b1.shape[0] != b1.shape[1]: + raise ValueError('expected square matrix') + if b1.shape != a1.shape: + raise ValueError('a and b must have the same shape') + return _geneig(a1, b1, left, right, overwrite_a, overwrite_b, + homogeneous_eigvals) + + geev, geev_lwork = get_lapack_funcs(('geev', 'geev_lwork'), (a1,)) + compute_vl, compute_vr = left, right + + lwork = _compute_lwork(geev_lwork, a1.shape[0], + compute_vl=compute_vl, + compute_vr=compute_vr) + + if geev.typecode in 'cz': + w, vl, vr, info = geev(a1, lwork=lwork, + compute_vl=compute_vl, + compute_vr=compute_vr, + overwrite_a=overwrite_a) + w = _make_eigvals(w, None, homogeneous_eigvals) + else: + wr, wi, vl, vr, info = geev(a1, lwork=lwork, + compute_vl=compute_vl, + compute_vr=compute_vr, + overwrite_a=overwrite_a) + t = {'f': 'F', 'd': 'D'}[wr.dtype.char] + w = wr + _I * wi + w = _make_eigvals(w, None, homogeneous_eigvals) + + _check_info(info, 'eig algorithm (geev)', + positive='did not converge (only eigenvalues ' + 'with order >= %d have converged)') + + only_real = numpy.all(w.imag == 0.0) + if not (geev.typecode in 'cz' or only_real): + t = w.dtype.char + if left: + vl = _make_complex_eigvecs(w, vl, t) + if right: + vr = _make_complex_eigvecs(w, vr, t) + if not (left or right): + return w + if left: + if right: + return w, vl, vr + return w, vl + return w, vr + + +@_deprecate_positional_args(version="1.14.0") +def eigh(a, b=None, *, lower=True, eigvals_only=False, overwrite_a=False, + overwrite_b=False, turbo=_NoValue, eigvals=_NoValue, type=1, + check_finite=True, subset_by_index=None, subset_by_value=None, + driver=None): + """ + Solve a standard or generalized eigenvalue problem for a complex + Hermitian or real symmetric matrix. + + Find eigenvalues array ``w`` and optionally eigenvectors array ``v`` of + array ``a``, where ``b`` is positive definite such that for every + eigenvalue λ (i-th entry of w) and its eigenvector ``vi`` (i-th column of + ``v``) satisfies:: + + a @ vi = λ * b @ vi + vi.conj().T @ a @ vi = λ + vi.conj().T @ b @ vi = 1 + + In the standard problem, ``b`` is assumed to be the identity matrix. + + Parameters + ---------- + a : (M, M) array_like + A complex Hermitian or real symmetric matrix whose eigenvalues and + eigenvectors will be computed. + b : (M, M) array_like, optional + A complex Hermitian or real symmetric definite positive matrix in. + If omitted, identity matrix is assumed. + lower : bool, optional + Whether the pertinent array data is taken from the lower or upper + triangle of ``a`` and, if applicable, ``b``. (Default: lower) + eigvals_only : bool, optional + Whether to calculate only eigenvalues and no eigenvectors. + (Default: both are calculated) + subset_by_index : iterable, optional + If provided, this two-element iterable defines the start and the end + indices of the desired eigenvalues (ascending order and 0-indexed). + To return only the second smallest to fifth smallest eigenvalues, + ``[1, 4]`` is used. ``[n-3, n-1]`` returns the largest three. Only + available with "evr", "evx", and "gvx" drivers. The entries are + directly converted to integers via ``int()``. + subset_by_value : iterable, optional + If provided, this two-element iterable defines the half-open interval + ``(a, b]`` that, if any, only the eigenvalues between these values + are returned. Only available with "evr", "evx", and "gvx" drivers. Use + ``np.inf`` for the unconstrained ends. + driver : str, optional + Defines which LAPACK driver should be used. Valid options are "ev", + "evd", "evr", "evx" for standard problems and "gv", "gvd", "gvx" for + generalized (where b is not None) problems. See the Notes section. + The default for standard problems is "evr". For generalized problems, + "gvd" is used for full set, and "gvx" for subset requested cases. + type : int, optional + For the generalized problems, this keyword specifies the problem type + to be solved for ``w`` and ``v`` (only takes 1, 2, 3 as possible + inputs):: + + 1 => a @ v = w @ b @ v + 2 => a @ b @ v = w @ v + 3 => b @ a @ v = w @ v + + This keyword is ignored for standard problems. + overwrite_a : bool, optional + Whether to overwrite data in ``a`` (may improve performance). Default + is False. + overwrite_b : bool, optional + Whether to overwrite data in ``b`` (may improve performance). Default + is False. + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + turbo : bool, optional, deprecated + .. deprecated:: 1.5.0 + `eigh` keyword argument `turbo` is deprecated in favour of + ``driver=gvd`` keyword instead and will be removed in SciPy + 1.14.0. + eigvals : tuple (lo, hi), optional, deprecated + .. deprecated:: 1.5.0 + `eigh` keyword argument `eigvals` is deprecated in favour of + `subset_by_index` keyword instead and will be removed in SciPy + 1.14.0. + + Returns + ------- + w : (N,) ndarray + The N (N<=M) selected eigenvalues, in ascending order, each + repeated according to its multiplicity. + v : (M, N) ndarray + The normalized eigenvector corresponding to the eigenvalue ``w[i]`` is + the column ``v[:,i]``. Only returned if ``eigvals_only=False``. + + Raises + ------ + LinAlgError + If eigenvalue computation does not converge, an error occurred, or + b matrix is not definite positive. Note that if input matrices are + not symmetric or Hermitian, no error will be reported but results will + be wrong. + + See Also + -------- + eigvalsh : eigenvalues of symmetric or Hermitian arrays + eig : eigenvalues and right eigenvectors for non-symmetric arrays + eigh_tridiagonal : eigenvalues and right eiegenvectors for + symmetric/Hermitian tridiagonal matrices + + Notes + ----- + This function does not check the input array for being Hermitian/symmetric + in order to allow for representing arrays with only their upper/lower + triangular parts. Also, note that even though not taken into account, + finiteness check applies to the whole array and unaffected by "lower" + keyword. + + This function uses LAPACK drivers for computations in all possible keyword + combinations, prefixed with ``sy`` if arrays are real and ``he`` if + complex, e.g., a float array with "evr" driver is solved via + "syevr", complex arrays with "gvx" driver problem is solved via "hegvx" + etc. + + As a brief summary, the slowest and the most robust driver is the + classical ``ev`` which uses symmetric QR. ``evr`` is seen as + the optimal choice for the most general cases. However, there are certain + occasions that ``evd`` computes faster at the expense of more + memory usage. ``evx``, while still being faster than ``ev``, + often performs worse than the rest except when very few eigenvalues are + requested for large arrays though there is still no performance guarantee. + + + For the generalized problem, normalization with respect to the given + type argument:: + + type 1 and 3 : v.conj().T @ a @ v = w + type 2 : inv(v).conj().T @ a @ inv(v) = w + + type 1 or 2 : v.conj().T @ b @ v = I + type 3 : v.conj().T @ inv(b) @ v = I + + + Examples + -------- + >>> import numpy as np + >>> from scipy.linalg import eigh + >>> A = np.array([[6, 3, 1, 5], [3, 0, 5, 1], [1, 5, 6, 2], [5, 1, 2, 2]]) + >>> w, v = eigh(A) + >>> np.allclose(A @ v - v @ np.diag(w), np.zeros((4, 4))) + True + + Request only the eigenvalues + + >>> w = eigh(A, eigvals_only=True) + + Request eigenvalues that are less than 10. + + >>> A = np.array([[34, -4, -10, -7, 2], + ... [-4, 7, 2, 12, 0], + ... [-10, 2, 44, 2, -19], + ... [-7, 12, 2, 79, -34], + ... [2, 0, -19, -34, 29]]) + >>> eigh(A, eigvals_only=True, subset_by_value=[-np.inf, 10]) + array([6.69199443e-07, 9.11938152e+00]) + + Request the second smallest eigenvalue and its eigenvector + + >>> w, v = eigh(A, subset_by_index=[1, 1]) + >>> w + array([9.11938152]) + >>> v.shape # only a single column is returned + (5, 1) + + """ + if turbo is not _NoValue: + warnings.warn("Keyword argument 'turbo' is deprecated in favour of '" + "driver=gvd' keyword instead and will be removed in " + "SciPy 1.14.0.", + DeprecationWarning, stacklevel=2) + if eigvals is not _NoValue: + warnings.warn("Keyword argument 'eigvals' is deprecated in favour of " + "'subset_by_index' keyword instead and will be removed " + "in SciPy 1.14.0.", + DeprecationWarning, stacklevel=2) + + # set lower + uplo = 'L' if lower else 'U' + # Set job for Fortran routines + _job = 'N' if eigvals_only else 'V' + + drv_str = [None, "ev", "evd", "evr", "evx", "gv", "gvd", "gvx"] + if driver not in drv_str: + raise ValueError('"{}" is unknown. Possible values are "None", "{}".' + ''.format(driver, '", "'.join(drv_str[1:]))) + + a1 = _asarray_validated(a, check_finite=check_finite) + if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]: + raise ValueError('expected square "a" matrix') + overwrite_a = overwrite_a or (_datacopied(a1, a)) + cplx = True if iscomplexobj(a1) else False + n = a1.shape[0] + drv_args = {'overwrite_a': overwrite_a} + + if b is not None: + b1 = _asarray_validated(b, check_finite=check_finite) + overwrite_b = overwrite_b or _datacopied(b1, b) + if len(b1.shape) != 2 or b1.shape[0] != b1.shape[1]: + raise ValueError('expected square "b" matrix') + + if b1.shape != a1.shape: + raise ValueError(f"wrong b dimensions {b1.shape}, should be {a1.shape}") + + if type not in [1, 2, 3]: + raise ValueError('"type" keyword only accepts 1, 2, and 3.') + + cplx = True if iscomplexobj(b1) else (cplx or False) + drv_args.update({'overwrite_b': overwrite_b, 'itype': type}) + + # backwards-compatibility handling + subset_by_index = subset_by_index if (eigvals in (None, _NoValue)) else eigvals + + subset = (subset_by_index is not None) or (subset_by_value is not None) + + # Both subsets can't be given + if subset_by_index and subset_by_value: + raise ValueError('Either index or value subset can be requested.') + + # Take turbo into account if all conditions are met otherwise ignore + if turbo not in (None, _NoValue) and b is not None: + driver = 'gvx' if subset else 'gvd' + + # Check indices if given + if subset_by_index: + lo, hi = (int(x) for x in subset_by_index) + if not (0 <= lo <= hi < n): + raise ValueError('Requested eigenvalue indices are not valid. ' + f'Valid range is [0, {n-1}] and start <= end, but ' + f'start={lo}, end={hi} is given') + # fortran is 1-indexed + drv_args.update({'range': 'I', 'il': lo + 1, 'iu': hi + 1}) + + if subset_by_value: + lo, hi = subset_by_value + if not (-inf <= lo < hi <= inf): + raise ValueError('Requested eigenvalue bounds are not valid. ' + 'Valid range is (-inf, inf) and low < high, but ' + f'low={lo}, high={hi} is given') + + drv_args.update({'range': 'V', 'vl': lo, 'vu': hi}) + + # fix prefix for lapack routines + pfx = 'he' if cplx else 'sy' + + # decide on the driver if not given + # first early exit on incompatible choice + if driver: + if b is None and (driver in ["gv", "gvd", "gvx"]): + raise ValueError(f'{driver} requires input b array to be supplied ' + 'for generalized eigenvalue problems.') + if (b is not None) and (driver in ['ev', 'evd', 'evr', 'evx']): + raise ValueError(f'"{driver}" does not accept input b array ' + 'for standard eigenvalue problems.') + if subset and (driver in ["ev", "evd", "gv", "gvd"]): + raise ValueError(f'"{driver}" cannot compute subsets of eigenvalues') + + # Default driver is evr and gvd + else: + driver = "evr" if b is None else ("gvx" if subset else "gvd") + + lwork_spec = { + 'syevd': ['lwork', 'liwork'], + 'syevr': ['lwork', 'liwork'], + 'heevd': ['lwork', 'liwork', 'lrwork'], + 'heevr': ['lwork', 'lrwork', 'liwork'], + } + + if b is None: # Standard problem + drv, drvlw = get_lapack_funcs((pfx + driver, pfx+driver+'_lwork'), + [a1]) + clw_args = {'n': n, 'lower': lower} + if driver == 'evd': + clw_args.update({'compute_v': 0 if _job == "N" else 1}) + + lw = _compute_lwork(drvlw, **clw_args) + # Multiple lwork vars + if isinstance(lw, tuple): + lwork_args = dict(zip(lwork_spec[pfx+driver], lw)) + else: + lwork_args = {'lwork': lw} + + drv_args.update({'lower': lower, 'compute_v': 0 if _job == "N" else 1}) + w, v, *other_args, info = drv(a=a1, **drv_args, **lwork_args) + + else: # Generalized problem + # 'gvd' doesn't have lwork query + if driver == "gvd": + drv = get_lapack_funcs(pfx + "gvd", [a1, b1]) + lwork_args = {} + else: + drv, drvlw = get_lapack_funcs((pfx + driver, pfx+driver+'_lwork'), + [a1, b1]) + # generalized drivers use uplo instead of lower + lw = _compute_lwork(drvlw, n, uplo=uplo) + lwork_args = {'lwork': lw} + + drv_args.update({'uplo': uplo, 'jobz': _job}) + + w, v, *other_args, info = drv(a=a1, b=b1, **drv_args, **lwork_args) + + # m is always the first extra argument + w = w[:other_args[0]] if subset else w + v = v[:, :other_args[0]] if (subset and not eigvals_only) else v + + # Check if we had a successful exit + if info == 0: + if eigvals_only: + return w + else: + return w, v + else: + if info < -1: + raise LinAlgError('Illegal value in argument {} of internal {}' + ''.format(-info, drv.typecode + pfx + driver)) + elif info > n: + raise LinAlgError(f'The leading minor of order {info-n} of B is not ' + 'positive definite. The factorization of B ' + 'could not be completed and no eigenvalues ' + 'or eigenvectors were computed.') + else: + drv_err = {'ev': 'The algorithm failed to converge; {} ' + 'off-diagonal elements of an intermediate ' + 'tridiagonal form did not converge to zero.', + 'evx': '{} eigenvectors failed to converge.', + 'evd': 'The algorithm failed to compute an eigenvalue ' + 'while working on the submatrix lying in rows ' + 'and columns {0}/{1} through mod({0},{1}).', + 'evr': 'Internal Error.' + } + if driver in ['ev', 'gv']: + msg = drv_err['ev'].format(info) + elif driver in ['evx', 'gvx']: + msg = drv_err['evx'].format(info) + elif driver in ['evd', 'gvd']: + if eigvals_only: + msg = drv_err['ev'].format(info) + else: + msg = drv_err['evd'].format(info, n+1) + else: + msg = drv_err['evr'] + + raise LinAlgError(msg) + + +_conv_dict = {0: 0, 1: 1, 2: 2, + 'all': 0, 'value': 1, 'index': 2, + 'a': 0, 'v': 1, 'i': 2} + + +def _check_select(select, select_range, max_ev, max_len): + """Check that select is valid, convert to Fortran style.""" + if isinstance(select, str): + select = select.lower() + try: + select = _conv_dict[select] + except KeyError as e: + raise ValueError('invalid argument for select') from e + vl, vu = 0., 1. + il = iu = 1 + if select != 0: # (non-all) + sr = asarray(select_range) + if sr.ndim != 1 or sr.size != 2 or sr[1] < sr[0]: + raise ValueError('select_range must be a 2-element array-like ' + 'in nondecreasing order') + if select == 1: # (value) + vl, vu = sr + if max_ev == 0: + max_ev = max_len + else: # 2 (index) + if sr.dtype.char.lower() not in 'hilqp': + raise ValueError( + f'when using select="i", select_range must ' + f'contain integers, got dtype {sr.dtype} ({sr.dtype.char})' + ) + # translate Python (0 ... N-1) into Fortran (1 ... N) with + 1 + il, iu = sr + 1 + if min(il, iu) < 1 or max(il, iu) > max_len: + raise ValueError('select_range out of bounds') + max_ev = iu - il + 1 + return select, vl, vu, il, iu, max_ev + + +def eig_banded(a_band, lower=False, eigvals_only=False, overwrite_a_band=False, + select='a', select_range=None, max_ev=0, check_finite=True): + """ + Solve real symmetric or complex Hermitian band matrix eigenvalue problem. + + Find eigenvalues w and optionally right eigenvectors v of a:: + + a v[:,i] = w[i] v[:,i] + v.H v = identity + + The matrix a is stored in a_band either in lower diagonal or upper + diagonal ordered form: + + a_band[u + i - j, j] == a[i,j] (if upper form; i <= j) + a_band[ i - j, j] == a[i,j] (if lower form; i >= j) + + where u is the number of bands above the diagonal. + + Example of a_band (shape of a is (6,6), u=2):: + + upper form: + * * a02 a13 a24 a35 + * a01 a12 a23 a34 a45 + a00 a11 a22 a33 a44 a55 + + lower form: + a00 a11 a22 a33 a44 a55 + a10 a21 a32 a43 a54 * + a20 a31 a42 a53 * * + + Cells marked with * are not used. + + Parameters + ---------- + a_band : (u+1, M) array_like + The bands of the M by M matrix a. + lower : bool, optional + Is the matrix in the lower form. (Default is upper form) + eigvals_only : bool, optional + Compute only the eigenvalues and no eigenvectors. + (Default: calculate also eigenvectors) + overwrite_a_band : bool, optional + Discard data in a_band (may enhance performance) + select : {'a', 'v', 'i'}, optional + Which eigenvalues to calculate + + ====== ======================================== + select calculated + ====== ======================================== + 'a' All eigenvalues + 'v' Eigenvalues in the interval (min, max] + 'i' Eigenvalues with indices min <= i <= max + ====== ======================================== + select_range : (min, max), optional + Range of selected eigenvalues + max_ev : int, optional + For select=='v', maximum number of eigenvalues expected. + For other values of select, has no meaning. + + In doubt, leave this parameter untouched. + + check_finite : bool, optional + Whether to check that the input matrix contains only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + w : (M,) ndarray + The eigenvalues, in ascending order, each repeated according to its + multiplicity. + v : (M, M) float or complex ndarray + The normalized eigenvector corresponding to the eigenvalue w[i] is + the column v[:,i]. Only returned if ``eigvals_only=False``. + + Raises + ------ + LinAlgError + If eigenvalue computation does not converge. + + See Also + -------- + eigvals_banded : eigenvalues for symmetric/Hermitian band matrices + eig : eigenvalues and right eigenvectors of general arrays. + eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays + eigh_tridiagonal : eigenvalues and right eigenvectors for + symmetric/Hermitian tridiagonal matrices + + Examples + -------- + >>> import numpy as np + >>> from scipy.linalg import eig_banded + >>> A = np.array([[1, 5, 2, 0], [5, 2, 5, 2], [2, 5, 3, 5], [0, 2, 5, 4]]) + >>> Ab = np.array([[1, 2, 3, 4], [5, 5, 5, 0], [2, 2, 0, 0]]) + >>> w, v = eig_banded(Ab, lower=True) + >>> np.allclose(A @ v - v @ np.diag(w), np.zeros((4, 4))) + True + >>> w = eig_banded(Ab, lower=True, eigvals_only=True) + >>> w + array([-4.26200532, -2.22987175, 3.95222349, 12.53965359]) + + Request only the eigenvalues between ``[-3, 4]`` + + >>> w, v = eig_banded(Ab, lower=True, select='v', select_range=[-3, 4]) + >>> w + array([-2.22987175, 3.95222349]) + + """ + if eigvals_only or overwrite_a_band: + a1 = _asarray_validated(a_band, check_finite=check_finite) + overwrite_a_band = overwrite_a_band or (_datacopied(a1, a_band)) + else: + a1 = array(a_band) + if issubclass(a1.dtype.type, inexact) and not isfinite(a1).all(): + raise ValueError("array must not contain infs or NaNs") + overwrite_a_band = 1 + + if len(a1.shape) != 2: + raise ValueError('expected a 2-D array') + select, vl, vu, il, iu, max_ev = _check_select( + select, select_range, max_ev, a1.shape[1]) + del select_range + if select == 0: + if a1.dtype.char in 'GFD': + # FIXME: implement this somewhen, for now go with builtin values + # FIXME: calc optimal lwork by calling ?hbevd(lwork=-1) + # or by using calc_lwork.f ??? + # lwork = calc_lwork.hbevd(bevd.typecode, a1.shape[0], lower) + internal_name = 'hbevd' + else: # a1.dtype.char in 'fd': + # FIXME: implement this somewhen, for now go with builtin values + # see above + # lwork = calc_lwork.sbevd(bevd.typecode, a1.shape[0], lower) + internal_name = 'sbevd' + bevd, = get_lapack_funcs((internal_name,), (a1,)) + w, v, info = bevd(a1, compute_v=not eigvals_only, + lower=lower, overwrite_ab=overwrite_a_band) + else: # select in [1, 2] + if eigvals_only: + max_ev = 1 + # calculate optimal abstol for dsbevx (see manpage) + if a1.dtype.char in 'fF': # single precision + lamch, = get_lapack_funcs(('lamch',), (array(0, dtype='f'),)) + else: + lamch, = get_lapack_funcs(('lamch',), (array(0, dtype='d'),)) + abstol = 2 * lamch('s') + if a1.dtype.char in 'GFD': + internal_name = 'hbevx' + else: # a1.dtype.char in 'gfd' + internal_name = 'sbevx' + bevx, = get_lapack_funcs((internal_name,), (a1,)) + w, v, m, ifail, info = bevx( + a1, vl, vu, il, iu, compute_v=not eigvals_only, mmax=max_ev, + range=select, lower=lower, overwrite_ab=overwrite_a_band, + abstol=abstol) + # crop off w and v + w = w[:m] + if not eigvals_only: + v = v[:, :m] + _check_info(info, internal_name) + + if eigvals_only: + return w + return w, v + + +def eigvals(a, b=None, overwrite_a=False, check_finite=True, + homogeneous_eigvals=False): + """ + Compute eigenvalues from an ordinary or generalized eigenvalue problem. + + Find eigenvalues of a general matrix:: + + a vr[:,i] = w[i] b vr[:,i] + + Parameters + ---------- + a : (M, M) array_like + A complex or real matrix whose eigenvalues and eigenvectors + will be computed. + b : (M, M) array_like, optional + Right-hand side matrix in a generalized eigenvalue problem. + If omitted, identity matrix is assumed. + overwrite_a : bool, optional + Whether to overwrite data in a (may improve performance) + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities + or NaNs. + homogeneous_eigvals : bool, optional + If True, return the eigenvalues in homogeneous coordinates. + In this case ``w`` is a (2, M) array so that:: + + w[1,i] a vr[:,i] = w[0,i] b vr[:,i] + + Default is False. + + Returns + ------- + w : (M,) or (2, M) double or complex ndarray + The eigenvalues, each repeated according to its multiplicity + but not in any specific order. The shape is (M,) unless + ``homogeneous_eigvals=True``. + + Raises + ------ + LinAlgError + If eigenvalue computation does not converge + + See Also + -------- + eig : eigenvalues and right eigenvectors of general arrays. + eigvalsh : eigenvalues of symmetric or Hermitian arrays + eigvals_banded : eigenvalues for symmetric/Hermitian band matrices + eigvalsh_tridiagonal : eigenvalues of symmetric/Hermitian tridiagonal + matrices + + Examples + -------- + >>> import numpy as np + >>> from scipy import linalg + >>> a = np.array([[0., -1.], [1., 0.]]) + >>> linalg.eigvals(a) + array([0.+1.j, 0.-1.j]) + + >>> b = np.array([[0., 1.], [1., 1.]]) + >>> linalg.eigvals(a, b) + array([ 1.+0.j, -1.+0.j]) + + >>> a = np.array([[3., 0., 0.], [0., 8., 0.], [0., 0., 7.]]) + >>> linalg.eigvals(a, homogeneous_eigvals=True) + array([[3.+0.j, 8.+0.j, 7.+0.j], + [1.+0.j, 1.+0.j, 1.+0.j]]) + + """ + return eig(a, b=b, left=0, right=0, overwrite_a=overwrite_a, + check_finite=check_finite, + homogeneous_eigvals=homogeneous_eigvals) + + +@_deprecate_positional_args(version="1.14.0") +def eigvalsh(a, b=None, *, lower=True, overwrite_a=False, + overwrite_b=False, turbo=_NoValue, eigvals=_NoValue, type=1, + check_finite=True, subset_by_index=None, subset_by_value=None, + driver=None): + """ + Solves a standard or generalized eigenvalue problem for a complex + Hermitian or real symmetric matrix. + + Find eigenvalues array ``w`` of array ``a``, where ``b`` is positive + definite such that for every eigenvalue λ (i-th entry of w) and its + eigenvector vi (i-th column of v) satisfies:: + + a @ vi = λ * b @ vi + vi.conj().T @ a @ vi = λ + vi.conj().T @ b @ vi = 1 + + In the standard problem, b is assumed to be the identity matrix. + + Parameters + ---------- + a : (M, M) array_like + A complex Hermitian or real symmetric matrix whose eigenvalues will + be computed. + b : (M, M) array_like, optional + A complex Hermitian or real symmetric definite positive matrix in. + If omitted, identity matrix is assumed. + lower : bool, optional + Whether the pertinent array data is taken from the lower or upper + triangle of ``a`` and, if applicable, ``b``. (Default: lower) + overwrite_a : bool, optional + Whether to overwrite data in ``a`` (may improve performance). Default + is False. + overwrite_b : bool, optional + Whether to overwrite data in ``b`` (may improve performance). Default + is False. + type : int, optional + For the generalized problems, this keyword specifies the problem type + to be solved for ``w`` and ``v`` (only takes 1, 2, 3 as possible + inputs):: + + 1 => a @ v = w @ b @ v + 2 => a @ b @ v = w @ v + 3 => b @ a @ v = w @ v + + This keyword is ignored for standard problems. + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + subset_by_index : iterable, optional + If provided, this two-element iterable defines the start and the end + indices of the desired eigenvalues (ascending order and 0-indexed). + To return only the second smallest to fifth smallest eigenvalues, + ``[1, 4]`` is used. ``[n-3, n-1]`` returns the largest three. Only + available with "evr", "evx", and "gvx" drivers. The entries are + directly converted to integers via ``int()``. + subset_by_value : iterable, optional + If provided, this two-element iterable defines the half-open interval + ``(a, b]`` that, if any, only the eigenvalues between these values + are returned. Only available with "evr", "evx", and "gvx" drivers. Use + ``np.inf`` for the unconstrained ends. + driver : str, optional + Defines which LAPACK driver should be used. Valid options are "ev", + "evd", "evr", "evx" for standard problems and "gv", "gvd", "gvx" for + generalized (where b is not None) problems. See the Notes section of + `scipy.linalg.eigh`. + turbo : bool, optional, deprecated + .. deprecated:: 1.5.0 + 'eigvalsh' keyword argument `turbo` is deprecated in favor of + ``driver=gvd`` option and will be removed in SciPy 1.14.0. + + eigvals : tuple (lo, hi), optional + .. deprecated:: 1.5.0 + 'eigvalsh' keyword argument `eigvals` is deprecated in favor of + `subset_by_index` option and will be removed in SciPy 1.14.0. + + Returns + ------- + w : (N,) ndarray + The N (N<=M) selected eigenvalues, in ascending order, each + repeated according to its multiplicity. + + Raises + ------ + LinAlgError + If eigenvalue computation does not converge, an error occurred, or + b matrix is not definite positive. Note that if input matrices are + not symmetric or Hermitian, no error will be reported but results will + be wrong. + + See Also + -------- + eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays + eigvals : eigenvalues of general arrays + eigvals_banded : eigenvalues for symmetric/Hermitian band matrices + eigvalsh_tridiagonal : eigenvalues of symmetric/Hermitian tridiagonal + matrices + + Notes + ----- + This function does not check the input array for being Hermitian/symmetric + in order to allow for representing arrays with only their upper/lower + triangular parts. + + This function serves as a one-liner shorthand for `scipy.linalg.eigh` with + the option ``eigvals_only=True`` to get the eigenvalues and not the + eigenvectors. Here it is kept as a legacy convenience. It might be + beneficial to use the main function to have full control and to be a bit + more pythonic. + + Examples + -------- + For more examples see `scipy.linalg.eigh`. + + >>> import numpy as np + >>> from scipy.linalg import eigvalsh + >>> A = np.array([[6, 3, 1, 5], [3, 0, 5, 1], [1, 5, 6, 2], [5, 1, 2, 2]]) + >>> w = eigvalsh(A) + >>> w + array([-3.74637491, -0.76263923, 6.08502336, 12.42399079]) + + """ + return eigh(a, b=b, lower=lower, eigvals_only=True, + overwrite_a=overwrite_a, overwrite_b=overwrite_b, + turbo=turbo, eigvals=eigvals, type=type, + check_finite=check_finite, subset_by_index=subset_by_index, + subset_by_value=subset_by_value, driver=driver) + + +def eigvals_banded(a_band, lower=False, overwrite_a_band=False, + select='a', select_range=None, check_finite=True): + """ + Solve real symmetric or complex Hermitian band matrix eigenvalue problem. + + Find eigenvalues w of a:: + + a v[:,i] = w[i] v[:,i] + v.H v = identity + + The matrix a is stored in a_band either in lower diagonal or upper + diagonal ordered form: + + a_band[u + i - j, j] == a[i,j] (if upper form; i <= j) + a_band[ i - j, j] == a[i,j] (if lower form; i >= j) + + where u is the number of bands above the diagonal. + + Example of a_band (shape of a is (6,6), u=2):: + + upper form: + * * a02 a13 a24 a35 + * a01 a12 a23 a34 a45 + a00 a11 a22 a33 a44 a55 + + lower form: + a00 a11 a22 a33 a44 a55 + a10 a21 a32 a43 a54 * + a20 a31 a42 a53 * * + + Cells marked with * are not used. + + Parameters + ---------- + a_band : (u+1, M) array_like + The bands of the M by M matrix a. + lower : bool, optional + Is the matrix in the lower form. (Default is upper form) + overwrite_a_band : bool, optional + Discard data in a_band (may enhance performance) + select : {'a', 'v', 'i'}, optional + Which eigenvalues to calculate + + ====== ======================================== + select calculated + ====== ======================================== + 'a' All eigenvalues + 'v' Eigenvalues in the interval (min, max] + 'i' Eigenvalues with indices min <= i <= max + ====== ======================================== + select_range : (min, max), optional + Range of selected eigenvalues + check_finite : bool, optional + Whether to check that the input matrix contains only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + w : (M,) ndarray + The eigenvalues, in ascending order, each repeated according to its + multiplicity. + + Raises + ------ + LinAlgError + If eigenvalue computation does not converge. + + See Also + -------- + eig_banded : eigenvalues and right eigenvectors for symmetric/Hermitian + band matrices + eigvalsh_tridiagonal : eigenvalues of symmetric/Hermitian tridiagonal + matrices + eigvals : eigenvalues of general arrays + eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays + eig : eigenvalues and right eigenvectors for non-symmetric arrays + + Examples + -------- + >>> import numpy as np + >>> from scipy.linalg import eigvals_banded + >>> A = np.array([[1, 5, 2, 0], [5, 2, 5, 2], [2, 5, 3, 5], [0, 2, 5, 4]]) + >>> Ab = np.array([[1, 2, 3, 4], [5, 5, 5, 0], [2, 2, 0, 0]]) + >>> w = eigvals_banded(Ab, lower=True) + >>> w + array([-4.26200532, -2.22987175, 3.95222349, 12.53965359]) + """ + return eig_banded(a_band, lower=lower, eigvals_only=1, + overwrite_a_band=overwrite_a_band, select=select, + select_range=select_range, check_finite=check_finite) + + +def eigvalsh_tridiagonal(d, e, select='a', select_range=None, + check_finite=True, tol=0., lapack_driver='auto'): + """ + Solve eigenvalue problem for a real symmetric tridiagonal matrix. + + Find eigenvalues `w` of ``a``:: + + a v[:,i] = w[i] v[:,i] + v.H v = identity + + For a real symmetric matrix ``a`` with diagonal elements `d` and + off-diagonal elements `e`. + + Parameters + ---------- + d : ndarray, shape (ndim,) + The diagonal elements of the array. + e : ndarray, shape (ndim-1,) + The off-diagonal elements of the array. + select : {'a', 'v', 'i'}, optional + Which eigenvalues to calculate + + ====== ======================================== + select calculated + ====== ======================================== + 'a' All eigenvalues + 'v' Eigenvalues in the interval (min, max] + 'i' Eigenvalues with indices min <= i <= max + ====== ======================================== + select_range : (min, max), optional + Range of selected eigenvalues + check_finite : bool, optional + Whether to check that the input matrix contains only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + tol : float + The absolute tolerance to which each eigenvalue is required + (only used when ``lapack_driver='stebz'``). + An eigenvalue (or cluster) is considered to have converged if it + lies in an interval of this width. If <= 0. (default), + the value ``eps*|a|`` is used where eps is the machine precision, + and ``|a|`` is the 1-norm of the matrix ``a``. + lapack_driver : str + LAPACK function to use, can be 'auto', 'stemr', 'stebz', 'sterf', + or 'stev'. When 'auto' (default), it will use 'stemr' if ``select='a'`` + and 'stebz' otherwise. 'sterf' and 'stev' can only be used when + ``select='a'``. + + Returns + ------- + w : (M,) ndarray + The eigenvalues, in ascending order, each repeated according to its + multiplicity. + + Raises + ------ + LinAlgError + If eigenvalue computation does not converge. + + See Also + -------- + eigh_tridiagonal : eigenvalues and right eiegenvectors for + symmetric/Hermitian tridiagonal matrices + + Examples + -------- + >>> import numpy as np + >>> from scipy.linalg import eigvalsh_tridiagonal, eigvalsh + >>> d = 3*np.ones(4) + >>> e = -1*np.ones(3) + >>> w = eigvalsh_tridiagonal(d, e) + >>> A = np.diag(d) + np.diag(e, k=1) + np.diag(e, k=-1) + >>> w2 = eigvalsh(A) # Verify with other eigenvalue routines + >>> np.allclose(w - w2, np.zeros(4)) + True + """ + return eigh_tridiagonal( + d, e, eigvals_only=True, select=select, select_range=select_range, + check_finite=check_finite, tol=tol, lapack_driver=lapack_driver) + + +def eigh_tridiagonal(d, e, eigvals_only=False, select='a', select_range=None, + check_finite=True, tol=0., lapack_driver='auto'): + """ + Solve eigenvalue problem for a real symmetric tridiagonal matrix. + + Find eigenvalues `w` and optionally right eigenvectors `v` of ``a``:: + + a v[:,i] = w[i] v[:,i] + v.H v = identity + + For a real symmetric matrix ``a`` with diagonal elements `d` and + off-diagonal elements `e`. + + Parameters + ---------- + d : ndarray, shape (ndim,) + The diagonal elements of the array. + e : ndarray, shape (ndim-1,) + The off-diagonal elements of the array. + eigvals_only : bool, optional + Compute only the eigenvalues and no eigenvectors. + (Default: calculate also eigenvectors) + select : {'a', 'v', 'i'}, optional + Which eigenvalues to calculate + + ====== ======================================== + select calculated + ====== ======================================== + 'a' All eigenvalues + 'v' Eigenvalues in the interval (min, max] + 'i' Eigenvalues with indices min <= i <= max + ====== ======================================== + select_range : (min, max), optional + Range of selected eigenvalues + check_finite : bool, optional + Whether to check that the input matrix contains only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + tol : float + The absolute tolerance to which each eigenvalue is required + (only used when 'stebz' is the `lapack_driver`). + An eigenvalue (or cluster) is considered to have converged if it + lies in an interval of this width. If <= 0. (default), + the value ``eps*|a|`` is used where eps is the machine precision, + and ``|a|`` is the 1-norm of the matrix ``a``. + lapack_driver : str + LAPACK function to use, can be 'auto', 'stemr', 'stebz', 'sterf', + or 'stev'. When 'auto' (default), it will use 'stemr' if ``select='a'`` + and 'stebz' otherwise. When 'stebz' is used to find the eigenvalues and + ``eigvals_only=False``, then a second LAPACK call (to ``?STEIN``) is + used to find the corresponding eigenvectors. 'sterf' can only be + used when ``eigvals_only=True`` and ``select='a'``. 'stev' can only + be used when ``select='a'``. + + Returns + ------- + w : (M,) ndarray + The eigenvalues, in ascending order, each repeated according to its + multiplicity. + v : (M, M) ndarray + The normalized eigenvector corresponding to the eigenvalue ``w[i]`` is + the column ``v[:,i]``. Only returned if ``eigvals_only=False``. + + Raises + ------ + LinAlgError + If eigenvalue computation does not converge. + + See Also + -------- + eigvalsh_tridiagonal : eigenvalues of symmetric/Hermitian tridiagonal + matrices + eig : eigenvalues and right eigenvectors for non-symmetric arrays + eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays + eig_banded : eigenvalues and right eigenvectors for symmetric/Hermitian + band matrices + + Notes + ----- + This function makes use of LAPACK ``S/DSTEMR`` routines. + + Examples + -------- + >>> import numpy as np + >>> from scipy.linalg import eigh_tridiagonal + >>> d = 3*np.ones(4) + >>> e = -1*np.ones(3) + >>> w, v = eigh_tridiagonal(d, e) + >>> A = np.diag(d) + np.diag(e, k=1) + np.diag(e, k=-1) + >>> np.allclose(A @ v - v @ np.diag(w), np.zeros((4, 4))) + True + """ + d = _asarray_validated(d, check_finite=check_finite) + e = _asarray_validated(e, check_finite=check_finite) + for check in (d, e): + if check.ndim != 1: + raise ValueError('expected a 1-D array') + if check.dtype.char in 'GFD': # complex + raise TypeError('Only real arrays currently supported') + if d.size != e.size + 1: + raise ValueError(f'd ({d.size}) must have one more element than e ({e.size})') + select, vl, vu, il, iu, _ = _check_select( + select, select_range, 0, d.size) + if not isinstance(lapack_driver, str): + raise TypeError('lapack_driver must be str') + drivers = ('auto', 'stemr', 'sterf', 'stebz', 'stev') + if lapack_driver not in drivers: + raise ValueError(f'lapack_driver must be one of {drivers}, ' + f'got {lapack_driver}') + if lapack_driver == 'auto': + lapack_driver = 'stemr' if select == 0 else 'stebz' + + # Quick exit for 1x1 case + if len(d) == 1: + if select == 1 and (not (vl < d[0] <= vu)): # request by value + w = array([]) + v = empty([1, 0], dtype=d.dtype) + else: # all and request by index + w = array([d[0]], dtype=d.dtype) + v = array([[1.]], dtype=d.dtype) + + if eigvals_only: + return w + else: + return w, v + + func, = get_lapack_funcs((lapack_driver,), (d, e)) + compute_v = not eigvals_only + if lapack_driver == 'sterf': + if select != 0: + raise ValueError('sterf can only be used when select == "a"') + if not eigvals_only: + raise ValueError('sterf can only be used when eigvals_only is ' + 'True') + w, info = func(d, e) + m = len(w) + elif lapack_driver == 'stev': + if select != 0: + raise ValueError('stev can only be used when select == "a"') + w, v, info = func(d, e, compute_v=compute_v) + m = len(w) + elif lapack_driver == 'stebz': + tol = float(tol) + internal_name = 'stebz' + stebz, = get_lapack_funcs((internal_name,), (d, e)) + # If getting eigenvectors, needs to be block-ordered (B) instead of + # matrix-ordered (E), and we will reorder later + order = 'E' if eigvals_only else 'B' + m, w, iblock, isplit, info = stebz(d, e, select, vl, vu, il, iu, tol, + order) + else: # 'stemr' + # ?STEMR annoyingly requires size N instead of N-1 + e_ = empty(e.size+1, e.dtype) + e_[:-1] = e + stemr_lwork, = get_lapack_funcs(('stemr_lwork',), (d, e)) + lwork, liwork, info = stemr_lwork(d, e_, select, vl, vu, il, iu, + compute_v=compute_v) + _check_info(info, 'stemr_lwork') + m, w, v, info = func(d, e_, select, vl, vu, il, iu, + compute_v=compute_v, lwork=lwork, liwork=liwork) + _check_info(info, lapack_driver + ' (eigh_tridiagonal)') + w = w[:m] + if eigvals_only: + return w + else: + # Do we still need to compute the eigenvalues? + if lapack_driver == 'stebz': + func, = get_lapack_funcs(('stein',), (d, e)) + v, info = func(d, e, w, iblock, isplit) + _check_info(info, 'stein (eigh_tridiagonal)', + positive='%d eigenvectors failed to converge') + # Convert block-order to matrix-order + order = argsort(w) + w, v = w[order], v[:, order] + else: + v = v[:, :m] + return w, v + + +def _check_info(info, driver, positive='did not converge (LAPACK info=%d)'): + """Check info return value.""" + if info < 0: + raise ValueError('illegal value in argument %d of internal %s' + % (-info, driver)) + if info > 0 and positive: + raise LinAlgError(("%s " + positive) % (driver, info,)) + + +def hessenberg(a, calc_q=False, overwrite_a=False, check_finite=True): + """ + Compute Hessenberg form of a matrix. + + The Hessenberg decomposition is:: + + A = Q H Q^H + + where `Q` is unitary/orthogonal and `H` has only zero elements below + the first sub-diagonal. + + Parameters + ---------- + a : (M, M) array_like + Matrix to bring into Hessenberg form. + calc_q : bool, optional + Whether to compute the transformation matrix. Default is False. + overwrite_a : bool, optional + Whether to overwrite `a`; may improve performance. + Default is False. + check_finite : bool, optional + Whether to check that the input matrix contains only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + H : (M, M) ndarray + Hessenberg form of `a`. + Q : (M, M) ndarray + Unitary/orthogonal similarity transformation matrix ``A = Q H Q^H``. + Only returned if ``calc_q=True``. + + Examples + -------- + >>> import numpy as np + >>> from scipy.linalg import hessenberg + >>> A = np.array([[2, 5, 8, 7], [5, 2, 2, 8], [7, 5, 6, 6], [5, 4, 4, 8]]) + >>> H, Q = hessenberg(A, calc_q=True) + >>> H + array([[ 2. , -11.65843866, 1.42005301, 0.25349066], + [ -9.94987437, 14.53535354, -5.31022304, 2.43081618], + [ 0. , -1.83299243, 0.38969961, -0.51527034], + [ 0. , 0. , -3.83189513, 1.07494686]]) + >>> np.allclose(Q @ H @ Q.conj().T - A, np.zeros((4, 4))) + True + """ + a1 = _asarray_validated(a, check_finite=check_finite) + if len(a1.shape) != 2 or (a1.shape[0] != a1.shape[1]): + raise ValueError('expected square matrix') + overwrite_a = overwrite_a or (_datacopied(a1, a)) + + # if 2x2 or smaller: already in Hessenberg + if a1.shape[0] <= 2: + if calc_q: + return a1, eye(a1.shape[0]) + return a1 + + gehrd, gebal, gehrd_lwork = get_lapack_funcs(('gehrd', 'gebal', + 'gehrd_lwork'), (a1,)) + ba, lo, hi, pivscale, info = gebal(a1, permute=0, overwrite_a=overwrite_a) + _check_info(info, 'gebal (hessenberg)', positive=False) + n = len(a1) + + lwork = _compute_lwork(gehrd_lwork, ba.shape[0], lo=lo, hi=hi) + + hq, tau, info = gehrd(ba, lo=lo, hi=hi, lwork=lwork, overwrite_a=1) + _check_info(info, 'gehrd (hessenberg)', positive=False) + h = numpy.triu(hq, -1) + if not calc_q: + return h + + # use orghr/unghr to compute q + orghr, orghr_lwork = get_lapack_funcs(('orghr', 'orghr_lwork'), (a1,)) + lwork = _compute_lwork(orghr_lwork, n, lo=lo, hi=hi) + + q, info = orghr(a=hq, tau=tau, lo=lo, hi=hi, lwork=lwork, overwrite_a=1) + _check_info(info, 'orghr (hessenberg)', positive=False) + return h, q + + +def cdf2rdf(w, v): + """ + Converts complex eigenvalues ``w`` and eigenvectors ``v`` to real + eigenvalues in a block diagonal form ``wr`` and the associated real + eigenvectors ``vr``, such that:: + + vr @ wr = X @ vr + + continues to hold, where ``X`` is the original array for which ``w`` and + ``v`` are the eigenvalues and eigenvectors. + + .. versionadded:: 1.1.0 + + Parameters + ---------- + w : (..., M) array_like + Complex or real eigenvalues, an array or stack of arrays + + Conjugate pairs must not be interleaved, else the wrong result + will be produced. So ``[1+1j, 1, 1-1j]`` will give a correct result, + but ``[1+1j, 2+1j, 1-1j, 2-1j]`` will not. + + v : (..., M, M) array_like + Complex or real eigenvectors, a square array or stack of square arrays. + + Returns + ------- + wr : (..., M, M) ndarray + Real diagonal block form of eigenvalues + vr : (..., M, M) ndarray + Real eigenvectors associated with ``wr`` + + See Also + -------- + eig : Eigenvalues and right eigenvectors for non-symmetric arrays + rsf2csf : Convert real Schur form to complex Schur form + + Notes + ----- + ``w``, ``v`` must be the eigenstructure for some *real* matrix ``X``. + For example, obtained by ``w, v = scipy.linalg.eig(X)`` or + ``w, v = numpy.linalg.eig(X)`` in which case ``X`` can also represent + stacked arrays. + + .. versionadded:: 1.1.0 + + Examples + -------- + >>> import numpy as np + >>> X = np.array([[1, 2, 3], [0, 4, 5], [0, -5, 4]]) + >>> X + array([[ 1, 2, 3], + [ 0, 4, 5], + [ 0, -5, 4]]) + + >>> from scipy import linalg + >>> w, v = linalg.eig(X) + >>> w + array([ 1.+0.j, 4.+5.j, 4.-5.j]) + >>> v + array([[ 1.00000+0.j , -0.01906-0.40016j, -0.01906+0.40016j], + [ 0.00000+0.j , 0.00000-0.64788j, 0.00000+0.64788j], + [ 0.00000+0.j , 0.64788+0.j , 0.64788-0.j ]]) + + >>> wr, vr = linalg.cdf2rdf(w, v) + >>> wr + array([[ 1., 0., 0.], + [ 0., 4., 5.], + [ 0., -5., 4.]]) + >>> vr + array([[ 1. , 0.40016, -0.01906], + [ 0. , 0.64788, 0. ], + [ 0. , 0. , 0.64788]]) + + >>> vr @ wr + array([[ 1. , 1.69593, 1.9246 ], + [ 0. , 2.59153, 3.23942], + [ 0. , -3.23942, 2.59153]]) + >>> X @ vr + array([[ 1. , 1.69593, 1.9246 ], + [ 0. , 2.59153, 3.23942], + [ 0. , -3.23942, 2.59153]]) + """ + w, v = _asarray_validated(w), _asarray_validated(v) + + # check dimensions + if w.ndim < 1: + raise ValueError('expected w to be at least 1D') + if v.ndim < 2: + raise ValueError('expected v to be at least 2D') + if v.ndim != w.ndim + 1: + raise ValueError('expected eigenvectors array to have exactly one ' + 'dimension more than eigenvalues array') + + # check shapes + n = w.shape[-1] + M = w.shape[:-1] + if v.shape[-2] != v.shape[-1]: + raise ValueError('expected v to be a square matrix or stacked square ' + 'matrices: v.shape[-2] = v.shape[-1]') + if v.shape[-1] != n: + raise ValueError('expected the same number of eigenvalues as ' + 'eigenvectors') + + # get indices for each first pair of complex eigenvalues + complex_mask = iscomplex(w) + n_complex = complex_mask.sum(axis=-1) + + # check if all complex eigenvalues have conjugate pairs + if not (n_complex % 2 == 0).all(): + raise ValueError('expected complex-conjugate pairs of eigenvalues') + + # find complex indices + idx = nonzero(complex_mask) + idx_stack = idx[:-1] + idx_elem = idx[-1] + + # filter them to conjugate indices, assuming pairs are not interleaved + j = idx_elem[0::2] + k = idx_elem[1::2] + stack_ind = () + for i in idx_stack: + # should never happen, assuming nonzero orders by the last axis + assert (i[0::2] == i[1::2]).all(), \ + "Conjugate pair spanned different arrays!" + stack_ind += (i[0::2],) + + # all eigenvalues to diagonal form + wr = zeros(M + (n, n), dtype=w.real.dtype) + di = range(n) + wr[..., di, di] = w.real + + # complex eigenvalues to real block diagonal form + wr[stack_ind + (j, k)] = w[stack_ind + (j,)].imag + wr[stack_ind + (k, j)] = w[stack_ind + (k,)].imag + + # compute real eigenvectors associated with real block diagonal eigenvalues + u = zeros(M + (n, n), dtype=numpy.cdouble) + u[..., di, di] = 1.0 + u[stack_ind + (j, j)] = 0.5j + u[stack_ind + (j, k)] = 0.5 + u[stack_ind + (k, j)] = -0.5j + u[stack_ind + (k, k)] = 0.5 + + # multiply matrices v and u (equivalent to v @ u) + vr = einsum('...ij,...jk->...ik', v, u).real + + return wr, vr diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_decomp_cholesky.py b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_decomp_cholesky.py new file mode 100644 index 0000000000000000000000000000000000000000..76de9de3fab7358d1e9d66efa22fafa043b961ca --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_decomp_cholesky.py @@ -0,0 +1,356 @@ +"""Cholesky decomposition functions.""" + +from numpy import asarray_chkfinite, asarray, atleast_2d + +# Local imports +from ._misc import LinAlgError, _datacopied +from .lapack import get_lapack_funcs + +__all__ = ['cholesky', 'cho_factor', 'cho_solve', 'cholesky_banded', + 'cho_solve_banded'] + + +def _cholesky(a, lower=False, overwrite_a=False, clean=True, + check_finite=True): + """Common code for cholesky() and cho_factor().""" + + a1 = asarray_chkfinite(a) if check_finite else asarray(a) + a1 = atleast_2d(a1) + + # Dimension check + if a1.ndim != 2: + raise ValueError(f'Input array needs to be 2D but received a {a1.ndim}d-array.') + # Squareness check + if a1.shape[0] != a1.shape[1]: + raise ValueError('Input array is expected to be square but has ' + f'the shape: {a1.shape}.') + + # Quick return for square empty array + if a1.size == 0: + return a1.copy(), lower + + overwrite_a = overwrite_a or _datacopied(a1, a) + potrf, = get_lapack_funcs(('potrf',), (a1,)) + c, info = potrf(a1, lower=lower, overwrite_a=overwrite_a, clean=clean) + if info > 0: + raise LinAlgError("%d-th leading minor of the array is not positive " + "definite" % info) + if info < 0: + raise ValueError(f'LAPACK reported an illegal value in {-info}-th argument' + 'on entry to "POTRF".') + return c, lower + + +def cholesky(a, lower=False, overwrite_a=False, check_finite=True): + """ + Compute the Cholesky decomposition of a matrix. + + Returns the Cholesky decomposition, :math:`A = L L^*` or + :math:`A = U^* U` of a Hermitian positive-definite matrix A. + + Parameters + ---------- + a : (M, M) array_like + Matrix to be decomposed + lower : bool, optional + Whether to compute the upper- or lower-triangular Cholesky + factorization. Default is upper-triangular. + overwrite_a : bool, optional + Whether to overwrite data in `a` (may improve performance). + check_finite : bool, optional + Whether to check that the input matrix contains only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + c : (M, M) ndarray + Upper- or lower-triangular Cholesky factor of `a`. + + Raises + ------ + LinAlgError : if decomposition fails. + + Examples + -------- + >>> import numpy as np + >>> from scipy.linalg import cholesky + >>> a = np.array([[1,-2j],[2j,5]]) + >>> L = cholesky(a, lower=True) + >>> L + array([[ 1.+0.j, 0.+0.j], + [ 0.+2.j, 1.+0.j]]) + >>> L @ L.T.conj() + array([[ 1.+0.j, 0.-2.j], + [ 0.+2.j, 5.+0.j]]) + + """ + c, lower = _cholesky(a, lower=lower, overwrite_a=overwrite_a, clean=True, + check_finite=check_finite) + return c + + +def cho_factor(a, lower=False, overwrite_a=False, check_finite=True): + """ + Compute the Cholesky decomposition of a matrix, to use in cho_solve + + Returns a matrix containing the Cholesky decomposition, + ``A = L L*`` or ``A = U* U`` of a Hermitian positive-definite matrix `a`. + The return value can be directly used as the first parameter to cho_solve. + + .. warning:: + The returned matrix also contains random data in the entries not + used by the Cholesky decomposition. If you need to zero these + entries, use the function `cholesky` instead. + + Parameters + ---------- + a : (M, M) array_like + Matrix to be decomposed + lower : bool, optional + Whether to compute the upper or lower triangular Cholesky factorization + (Default: upper-triangular) + overwrite_a : bool, optional + Whether to overwrite data in a (may improve performance) + check_finite : bool, optional + Whether to check that the input matrix contains only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + c : (M, M) ndarray + Matrix whose upper or lower triangle contains the Cholesky factor + of `a`. Other parts of the matrix contain random data. + lower : bool + Flag indicating whether the factor is in the lower or upper triangle + + Raises + ------ + LinAlgError + Raised if decomposition fails. + + See Also + -------- + cho_solve : Solve a linear set equations using the Cholesky factorization + of a matrix. + + Examples + -------- + >>> import numpy as np + >>> from scipy.linalg import cho_factor + >>> A = np.array([[9, 3, 1, 5], [3, 7, 5, 1], [1, 5, 9, 2], [5, 1, 2, 6]]) + >>> c, low = cho_factor(A) + >>> c + array([[3. , 1. , 0.33333333, 1.66666667], + [3. , 2.44948974, 1.90515869, -0.27216553], + [1. , 5. , 2.29330749, 0.8559528 ], + [5. , 1. , 2. , 1.55418563]]) + >>> np.allclose(np.triu(c).T @ np. triu(c) - A, np.zeros((4, 4))) + True + + """ + c, lower = _cholesky(a, lower=lower, overwrite_a=overwrite_a, clean=False, + check_finite=check_finite) + return c, lower + + +def cho_solve(c_and_lower, b, overwrite_b=False, check_finite=True): + """Solve the linear equations A x = b, given the Cholesky factorization of A. + + Parameters + ---------- + (c, lower) : tuple, (array, bool) + Cholesky factorization of a, as given by cho_factor + b : array + Right-hand side + overwrite_b : bool, optional + Whether to overwrite data in b (may improve performance) + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + x : array + The solution to the system A x = b + + See Also + -------- + cho_factor : Cholesky factorization of a matrix + + Examples + -------- + >>> import numpy as np + >>> from scipy.linalg import cho_factor, cho_solve + >>> A = np.array([[9, 3, 1, 5], [3, 7, 5, 1], [1, 5, 9, 2], [5, 1, 2, 6]]) + >>> c, low = cho_factor(A) + >>> x = cho_solve((c, low), [1, 1, 1, 1]) + >>> np.allclose(A @ x - [1, 1, 1, 1], np.zeros(4)) + True + + """ + (c, lower) = c_and_lower + if check_finite: + b1 = asarray_chkfinite(b) + c = asarray_chkfinite(c) + else: + b1 = asarray(b) + c = asarray(c) + if c.ndim != 2 or c.shape[0] != c.shape[1]: + raise ValueError("The factored matrix c is not square.") + if c.shape[1] != b1.shape[0]: + raise ValueError(f"incompatible dimensions ({c.shape} and {b1.shape})") + + overwrite_b = overwrite_b or _datacopied(b1, b) + + potrs, = get_lapack_funcs(('potrs',), (c, b1)) + x, info = potrs(c, b1, lower=lower, overwrite_b=overwrite_b) + if info != 0: + raise ValueError('illegal value in %dth argument of internal potrs' + % -info) + return x + + +def cholesky_banded(ab, overwrite_ab=False, lower=False, check_finite=True): + """ + Cholesky decompose a banded Hermitian positive-definite matrix + + The matrix a is stored in ab either in lower-diagonal or upper- + diagonal ordered form:: + + ab[u + i - j, j] == a[i,j] (if upper form; i <= j) + ab[ i - j, j] == a[i,j] (if lower form; i >= j) + + Example of ab (shape of a is (6,6), u=2):: + + upper form: + * * a02 a13 a24 a35 + * a01 a12 a23 a34 a45 + a00 a11 a22 a33 a44 a55 + + lower form: + a00 a11 a22 a33 a44 a55 + a10 a21 a32 a43 a54 * + a20 a31 a42 a53 * * + + Parameters + ---------- + ab : (u + 1, M) array_like + Banded matrix + overwrite_ab : bool, optional + Discard data in ab (may enhance performance) + lower : bool, optional + Is the matrix in the lower form. (Default is upper form) + check_finite : bool, optional + Whether to check that the input matrix contains only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + c : (u + 1, M) ndarray + Cholesky factorization of a, in the same banded format as ab + + See Also + -------- + cho_solve_banded : + Solve a linear set equations, given the Cholesky factorization + of a banded Hermitian. + + Examples + -------- + >>> import numpy as np + >>> from scipy.linalg import cholesky_banded + >>> from numpy import allclose, zeros, diag + >>> Ab = np.array([[0, 0, 1j, 2, 3j], [0, -1, -2, 3, 4], [9, 8, 7, 6, 9]]) + >>> A = np.diag(Ab[0,2:], k=2) + np.diag(Ab[1,1:], k=1) + >>> A = A + A.conj().T + np.diag(Ab[2, :]) + >>> c = cholesky_banded(Ab) + >>> C = np.diag(c[0, 2:], k=2) + np.diag(c[1, 1:], k=1) + np.diag(c[2, :]) + >>> np.allclose(C.conj().T @ C - A, np.zeros((5, 5))) + True + + """ + if check_finite: + ab = asarray_chkfinite(ab) + else: + ab = asarray(ab) + + pbtrf, = get_lapack_funcs(('pbtrf',), (ab,)) + c, info = pbtrf(ab, lower=lower, overwrite_ab=overwrite_ab) + if info > 0: + raise LinAlgError("%d-th leading minor not positive definite" % info) + if info < 0: + raise ValueError('illegal value in %d-th argument of internal pbtrf' + % -info) + return c + + +def cho_solve_banded(cb_and_lower, b, overwrite_b=False, check_finite=True): + """ + Solve the linear equations ``A x = b``, given the Cholesky factorization of + the banded Hermitian ``A``. + + Parameters + ---------- + (cb, lower) : tuple, (ndarray, bool) + `cb` is the Cholesky factorization of A, as given by cholesky_banded. + `lower` must be the same value that was given to cholesky_banded. + b : array_like + Right-hand side + overwrite_b : bool, optional + If True, the function will overwrite the values in `b`. + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + x : array + The solution to the system A x = b + + See Also + -------- + cholesky_banded : Cholesky factorization of a banded matrix + + Notes + ----- + + .. versionadded:: 0.8.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy.linalg import cholesky_banded, cho_solve_banded + >>> Ab = np.array([[0, 0, 1j, 2, 3j], [0, -1, -2, 3, 4], [9, 8, 7, 6, 9]]) + >>> A = np.diag(Ab[0,2:], k=2) + np.diag(Ab[1,1:], k=1) + >>> A = A + A.conj().T + np.diag(Ab[2, :]) + >>> c = cholesky_banded(Ab) + >>> x = cho_solve_banded((c, False), np.ones(5)) + >>> np.allclose(A @ x - np.ones(5), np.zeros(5)) + True + + """ + (cb, lower) = cb_and_lower + if check_finite: + cb = asarray_chkfinite(cb) + b = asarray_chkfinite(b) + else: + cb = asarray(cb) + b = asarray(b) + + # Validate shapes. + if cb.shape[-1] != b.shape[0]: + raise ValueError("shapes of cb and b are not compatible.") + + pbtrs, = get_lapack_funcs(('pbtrs',), (cb, b)) + x, info = pbtrs(cb, b, lower=lower, overwrite_b=overwrite_b) + if info > 0: + raise LinAlgError("%dth leading minor not positive definite" % info) + if info < 0: + raise ValueError('illegal value in %dth argument of internal pbtrs' + % -info) + return x diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_decomp_ldl.py b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_decomp_ldl.py new file mode 100644 index 0000000000000000000000000000000000000000..336df1d5fb416f635c91afe3cc2cfb3c340239fc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_decomp_ldl.py @@ -0,0 +1,353 @@ +from warnings import warn + +import numpy as np +from numpy import (atleast_2d, arange, zeros_like, imag, diag, + iscomplexobj, tril, triu, argsort, empty_like) +from scipy._lib._util import ComplexWarning +from ._decomp import _asarray_validated +from .lapack import get_lapack_funcs, _compute_lwork + +__all__ = ['ldl'] + + +def ldl(A, lower=True, hermitian=True, overwrite_a=False, check_finite=True): + """ Computes the LDLt or Bunch-Kaufman factorization of a symmetric/ + hermitian matrix. + + This function returns a block diagonal matrix D consisting blocks of size + at most 2x2 and also a possibly permuted unit lower triangular matrix + ``L`` such that the factorization ``A = L D L^H`` or ``A = L D L^T`` + holds. If `lower` is False then (again possibly permuted) upper + triangular matrices are returned as outer factors. + + The permutation array can be used to triangularize the outer factors + simply by a row shuffle, i.e., ``lu[perm, :]`` is an upper/lower + triangular matrix. This is also equivalent to multiplication with a + permutation matrix ``P.dot(lu)``, where ``P`` is a column-permuted + identity matrix ``I[:, perm]``. + + Depending on the value of the boolean `lower`, only upper or lower + triangular part of the input array is referenced. Hence, a triangular + matrix on entry would give the same result as if the full matrix is + supplied. + + Parameters + ---------- + A : array_like + Square input array + lower : bool, optional + This switches between the lower and upper triangular outer factors of + the factorization. Lower triangular (``lower=True``) is the default. + hermitian : bool, optional + For complex-valued arrays, this defines whether ``A = A.conj().T`` or + ``A = A.T`` is assumed. For real-valued arrays, this switch has no + effect. + overwrite_a : bool, optional + Allow overwriting data in `A` (may enhance performance). The default + is False. + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + lu : ndarray + The (possibly) permuted upper/lower triangular outer factor of the + factorization. + d : ndarray + The block diagonal multiplier of the factorization. + perm : ndarray + The row-permutation index array that brings lu into triangular form. + + Raises + ------ + ValueError + If input array is not square. + ComplexWarning + If a complex-valued array with nonzero imaginary parts on the + diagonal is given and hermitian is set to True. + + See Also + -------- + cholesky, lu + + Notes + ----- + This function uses ``?SYTRF`` routines for symmetric matrices and + ``?HETRF`` routines for Hermitian matrices from LAPACK. See [1]_ for + the algorithm details. + + Depending on the `lower` keyword value, only lower or upper triangular + part of the input array is referenced. Moreover, this keyword also defines + the structure of the outer factors of the factorization. + + .. versionadded:: 1.1.0 + + References + ---------- + .. [1] J.R. Bunch, L. Kaufman, Some stable methods for calculating + inertia and solving symmetric linear systems, Math. Comput. Vol.31, + 1977. :doi:`10.2307/2005787` + + Examples + -------- + Given an upper triangular array ``a`` that represents the full symmetric + array with its entries, obtain ``l``, 'd' and the permutation vector `perm`: + + >>> import numpy as np + >>> from scipy.linalg import ldl + >>> a = np.array([[2, -1, 3], [0, 2, 0], [0, 0, 1]]) + >>> lu, d, perm = ldl(a, lower=0) # Use the upper part + >>> lu + array([[ 0. , 0. , 1. ], + [ 0. , 1. , -0.5], + [ 1. , 1. , 1.5]]) + >>> d + array([[-5. , 0. , 0. ], + [ 0. , 1.5, 0. ], + [ 0. , 0. , 2. ]]) + >>> perm + array([2, 1, 0]) + >>> lu[perm, :] + array([[ 1. , 1. , 1.5], + [ 0. , 1. , -0.5], + [ 0. , 0. , 1. ]]) + >>> lu.dot(d).dot(lu.T) + array([[ 2., -1., 3.], + [-1., 2., 0.], + [ 3., 0., 1.]]) + + """ + a = atleast_2d(_asarray_validated(A, check_finite=check_finite)) + if a.shape[0] != a.shape[1]: + raise ValueError('The input array "a" should be square.') + # Return empty arrays for empty square input + if a.size == 0: + return empty_like(a), empty_like(a), np.array([], dtype=int) + + n = a.shape[0] + r_or_c = complex if iscomplexobj(a) else float + + # Get the LAPACK routine + if r_or_c is complex and hermitian: + s, sl = 'hetrf', 'hetrf_lwork' + if np.any(imag(diag(a))): + warn('scipy.linalg.ldl():\nThe imaginary parts of the diagonal' + 'are ignored. Use "hermitian=False" for factorization of' + 'complex symmetric arrays.', ComplexWarning, stacklevel=2) + else: + s, sl = 'sytrf', 'sytrf_lwork' + + solver, solver_lwork = get_lapack_funcs((s, sl), (a,)) + lwork = _compute_lwork(solver_lwork, n, lower=lower) + ldu, piv, info = solver(a, lwork=lwork, lower=lower, + overwrite_a=overwrite_a) + if info < 0: + raise ValueError(f'{s.upper()} exited with the internal error "illegal value ' + f'in argument number {-info}". See LAPACK documentation ' + 'for the error codes.') + + swap_arr, pivot_arr = _ldl_sanitize_ipiv(piv, lower=lower) + d, lu = _ldl_get_d_and_l(ldu, pivot_arr, lower=lower, hermitian=hermitian) + lu, perm = _ldl_construct_tri_factor(lu, swap_arr, pivot_arr, lower=lower) + + return lu, d, perm + + +def _ldl_sanitize_ipiv(a, lower=True): + """ + This helper function takes the rather strangely encoded permutation array + returned by the LAPACK routines ?(HE/SY)TRF and converts it into + regularized permutation and diagonal pivot size format. + + Since FORTRAN uses 1-indexing and LAPACK uses different start points for + upper and lower formats there are certain offsets in the indices used + below. + + Let's assume a result where the matrix is 6x6 and there are two 2x2 + and two 1x1 blocks reported by the routine. To ease the coding efforts, + we still populate a 6-sized array and fill zeros as the following :: + + pivots = [2, 0, 2, 0, 1, 1] + + This denotes a diagonal matrix of the form :: + + [x x ] + [x x ] + [ x x ] + [ x x ] + [ x ] + [ x] + + In other words, we write 2 when the 2x2 block is first encountered and + automatically write 0 to the next entry and skip the next spin of the + loop. Thus, a separate counter or array appends to keep track of block + sizes are avoided. If needed, zeros can be filtered out later without + losing the block structure. + + Parameters + ---------- + a : ndarray + The permutation array ipiv returned by LAPACK + lower : bool, optional + The switch to select whether upper or lower triangle is chosen in + the LAPACK call. + + Returns + ------- + swap_ : ndarray + The array that defines the row/column swap operations. For example, + if row two is swapped with row four, the result is [0, 3, 2, 3]. + pivots : ndarray + The array that defines the block diagonal structure as given above. + + """ + n = a.size + swap_ = arange(n) + pivots = zeros_like(swap_, dtype=int) + skip_2x2 = False + + # Some upper/lower dependent offset values + # range (s)tart, r(e)nd, r(i)ncrement + x, y, rs, re, ri = (1, 0, 0, n, 1) if lower else (-1, -1, n-1, -1, -1) + + for ind in range(rs, re, ri): + # If previous spin belonged already to a 2x2 block + if skip_2x2: + skip_2x2 = False + continue + + cur_val = a[ind] + # do we have a 1x1 block or not? + if cur_val > 0: + if cur_val != ind+1: + # Index value != array value --> permutation required + swap_[ind] = swap_[cur_val-1] + pivots[ind] = 1 + # Not. + elif cur_val < 0 and cur_val == a[ind+x]: + # first neg entry of 2x2 block identifier + if -cur_val != ind+2: + # Index value != array value --> permutation required + swap_[ind+x] = swap_[-cur_val-1] + pivots[ind+y] = 2 + skip_2x2 = True + else: # Doesn't make sense, give up + raise ValueError('While parsing the permutation array ' + 'in "scipy.linalg.ldl", invalid entries ' + 'found. The array syntax is invalid.') + return swap_, pivots + + +def _ldl_get_d_and_l(ldu, pivs, lower=True, hermitian=True): + """ + Helper function to extract the diagonal and triangular matrices for + LDL.T factorization. + + Parameters + ---------- + ldu : ndarray + The compact output returned by the LAPACK routing + pivs : ndarray + The sanitized array of {0, 1, 2} denoting the sizes of the pivots. For + every 2 there is a succeeding 0. + lower : bool, optional + If set to False, upper triangular part is considered. + hermitian : bool, optional + If set to False a symmetric complex array is assumed. + + Returns + ------- + d : ndarray + The block diagonal matrix. + lu : ndarray + The upper/lower triangular matrix + """ + is_c = iscomplexobj(ldu) + d = diag(diag(ldu)) + n = d.shape[0] + blk_i = 0 # block index + + # row/column offsets for selecting sub-, super-diagonal + x, y = (1, 0) if lower else (0, 1) + + lu = tril(ldu, -1) if lower else triu(ldu, 1) + diag_inds = arange(n) + lu[diag_inds, diag_inds] = 1 + + for blk in pivs[pivs != 0]: + # increment the block index and check for 2s + # if 2 then copy the off diagonals depending on uplo + inc = blk_i + blk + + if blk == 2: + d[blk_i+x, blk_i+y] = ldu[blk_i+x, blk_i+y] + # If Hermitian matrix is factorized, the cross-offdiagonal element + # should be conjugated. + if is_c and hermitian: + d[blk_i+y, blk_i+x] = ldu[blk_i+x, blk_i+y].conj() + else: + d[blk_i+y, blk_i+x] = ldu[blk_i+x, blk_i+y] + + lu[blk_i+x, blk_i+y] = 0. + blk_i = inc + + return d, lu + + +def _ldl_construct_tri_factor(lu, swap_vec, pivs, lower=True): + """ + Helper function to construct explicit outer factors of LDL factorization. + + If lower is True the permuted factors are multiplied as L(1)*L(2)*...*L(k). + Otherwise, the permuted factors are multiplied as L(k)*...*L(2)*L(1). See + LAPACK documentation for more details. + + Parameters + ---------- + lu : ndarray + The triangular array that is extracted from LAPACK routine call with + ones on the diagonals. + swap_vec : ndarray + The array that defines the row swapping indices. If the kth entry is m + then rows k,m are swapped. Notice that the mth entry is not necessarily + k to avoid undoing the swapping. + pivs : ndarray + The array that defines the block diagonal structure returned by + _ldl_sanitize_ipiv(). + lower : bool, optional + The boolean to switch between lower and upper triangular structure. + + Returns + ------- + lu : ndarray + The square outer factor which satisfies the L * D * L.T = A + perm : ndarray + The permutation vector that brings the lu to the triangular form + + Notes + ----- + Note that the original argument "lu" is overwritten. + + """ + n = lu.shape[0] + perm = arange(n) + # Setup the reading order of the permutation matrix for upper/lower + rs, re, ri = (n-1, -1, -1) if lower else (0, n, 1) + + for ind in range(rs, re, ri): + s_ind = swap_vec[ind] + if s_ind != ind: + # Column start and end positions + col_s = ind if lower else 0 + col_e = n if lower else ind+1 + + # If we stumble upon a 2x2 block include both cols in the perm. + if pivs[ind] == (0 if lower else 2): + col_s += -1 if lower else 0 + col_e += 0 if lower else 1 + lu[[s_ind, ind], col_s:col_e] = lu[[ind, s_ind], col_s:col_e] + perm[[s_ind, ind]] = perm[[ind, s_ind]] + + return lu, argsort(perm) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_decomp_qz.py b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_decomp_qz.py new file mode 100644 index 0000000000000000000000000000000000000000..39361f172df7f1985c7ed0fbc4d919b5c4545725 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_decomp_qz.py @@ -0,0 +1,449 @@ +import warnings + +import numpy as np +from numpy import asarray_chkfinite +from ._misc import LinAlgError, _datacopied, LinAlgWarning +from .lapack import get_lapack_funcs + + +__all__ = ['qz', 'ordqz'] + +_double_precision = ['i', 'l', 'd'] + + +def _select_function(sort): + if callable(sort): + # assume the user knows what they're doing + sfunction = sort + elif sort == 'lhp': + sfunction = _lhp + elif sort == 'rhp': + sfunction = _rhp + elif sort == 'iuc': + sfunction = _iuc + elif sort == 'ouc': + sfunction = _ouc + else: + raise ValueError("sort parameter must be None, a callable, or " + "one of ('lhp','rhp','iuc','ouc')") + + return sfunction + + +def _lhp(x, y): + out = np.empty_like(x, dtype=bool) + nonzero = (y != 0) + # handles (x, y) = (0, 0) too + out[~nonzero] = False + out[nonzero] = (np.real(x[nonzero]/y[nonzero]) < 0.0) + return out + + +def _rhp(x, y): + out = np.empty_like(x, dtype=bool) + nonzero = (y != 0) + # handles (x, y) = (0, 0) too + out[~nonzero] = False + out[nonzero] = (np.real(x[nonzero]/y[nonzero]) > 0.0) + return out + + +def _iuc(x, y): + out = np.empty_like(x, dtype=bool) + nonzero = (y != 0) + # handles (x, y) = (0, 0) too + out[~nonzero] = False + out[nonzero] = (abs(x[nonzero]/y[nonzero]) < 1.0) + return out + + +def _ouc(x, y): + out = np.empty_like(x, dtype=bool) + xzero = (x == 0) + yzero = (y == 0) + out[xzero & yzero] = False + out[~xzero & yzero] = True + out[~yzero] = (abs(x[~yzero]/y[~yzero]) > 1.0) + return out + + +def _qz(A, B, output='real', lwork=None, sort=None, overwrite_a=False, + overwrite_b=False, check_finite=True): + if sort is not None: + # Disabled due to segfaults on win32, see ticket 1717. + raise ValueError("The 'sort' input of qz() has to be None and will be " + "removed in a future release. Use ordqz instead.") + + if output not in ['real', 'complex', 'r', 'c']: + raise ValueError("argument must be 'real', or 'complex'") + + if check_finite: + a1 = asarray_chkfinite(A) + b1 = asarray_chkfinite(B) + else: + a1 = np.asarray(A) + b1 = np.asarray(B) + + a_m, a_n = a1.shape + b_m, b_n = b1.shape + if not (a_m == a_n == b_m == b_n): + raise ValueError("Array dimensions must be square and agree") + + typa = a1.dtype.char + if output in ['complex', 'c'] and typa not in ['F', 'D']: + if typa in _double_precision: + a1 = a1.astype('D') + typa = 'D' + else: + a1 = a1.astype('F') + typa = 'F' + typb = b1.dtype.char + if output in ['complex', 'c'] and typb not in ['F', 'D']: + if typb in _double_precision: + b1 = b1.astype('D') + typb = 'D' + else: + b1 = b1.astype('F') + typb = 'F' + + overwrite_a = overwrite_a or (_datacopied(a1, A)) + overwrite_b = overwrite_b or (_datacopied(b1, B)) + + gges, = get_lapack_funcs(('gges',), (a1, b1)) + + if lwork is None or lwork == -1: + # get optimal work array size + result = gges(lambda x: None, a1, b1, lwork=-1) + lwork = result[-2][0].real.astype(int) + + def sfunction(x): + return None + result = gges(sfunction, a1, b1, lwork=lwork, overwrite_a=overwrite_a, + overwrite_b=overwrite_b, sort_t=0) + + info = result[-1] + if info < 0: + raise ValueError(f"Illegal value in argument {-info} of gges") + elif info > 0 and info <= a_n: + warnings.warn("The QZ iteration failed. (a,b) are not in Schur " + "form, but ALPHAR(j), ALPHAI(j), and BETA(j) should be " + f"correct for J={info-1},...,N", LinAlgWarning, + stacklevel=3) + elif info == a_n+1: + raise LinAlgError("Something other than QZ iteration failed") + elif info == a_n+2: + raise LinAlgError("After reordering, roundoff changed values of some " + "complex eigenvalues so that leading eigenvalues " + "in the Generalized Schur form no longer satisfy " + "sort=True. This could also be due to scaling.") + elif info == a_n+3: + raise LinAlgError("Reordering failed in tgsen") + + return result, gges.typecode + + +def qz(A, B, output='real', lwork=None, sort=None, overwrite_a=False, + overwrite_b=False, check_finite=True): + """ + QZ decomposition for generalized eigenvalues of a pair of matrices. + + The QZ, or generalized Schur, decomposition for a pair of n-by-n + matrices (A,B) is:: + + (A,B) = (Q @ AA @ Z*, Q @ BB @ Z*) + + where AA, BB is in generalized Schur form if BB is upper-triangular + with non-negative diagonal and AA is upper-triangular, or for real QZ + decomposition (``output='real'``) block upper triangular with 1x1 + and 2x2 blocks. In this case, the 1x1 blocks correspond to real + generalized eigenvalues and 2x2 blocks are 'standardized' by making + the corresponding elements of BB have the form:: + + [ a 0 ] + [ 0 b ] + + and the pair of corresponding 2x2 blocks in AA and BB will have a complex + conjugate pair of generalized eigenvalues. If (``output='complex'``) or + A and B are complex matrices, Z' denotes the conjugate-transpose of Z. + Q and Z are unitary matrices. + + Parameters + ---------- + A : (N, N) array_like + 2-D array to decompose + B : (N, N) array_like + 2-D array to decompose + output : {'real', 'complex'}, optional + Construct the real or complex QZ decomposition for real matrices. + Default is 'real'. + lwork : int, optional + Work array size. If None or -1, it is automatically computed. + sort : {None, callable, 'lhp', 'rhp', 'iuc', 'ouc'}, optional + NOTE: THIS INPUT IS DISABLED FOR NOW. Use ordqz instead. + + Specifies whether the upper eigenvalues should be sorted. A callable + may be passed that, given a eigenvalue, returns a boolean denoting + whether the eigenvalue should be sorted to the top-left (True). For + real matrix pairs, the sort function takes three real arguments + (alphar, alphai, beta). The eigenvalue + ``x = (alphar + alphai*1j)/beta``. For complex matrix pairs or + output='complex', the sort function takes two complex arguments + (alpha, beta). The eigenvalue ``x = (alpha/beta)``. Alternatively, + string parameters may be used: + + - 'lhp' Left-hand plane (x.real < 0.0) + - 'rhp' Right-hand plane (x.real > 0.0) + - 'iuc' Inside the unit circle (x*x.conjugate() < 1.0) + - 'ouc' Outside the unit circle (x*x.conjugate() > 1.0) + + Defaults to None (no sorting). + overwrite_a : bool, optional + Whether to overwrite data in a (may improve performance) + overwrite_b : bool, optional + Whether to overwrite data in b (may improve performance) + check_finite : bool, optional + If true checks the elements of `A` and `B` are finite numbers. If + false does no checking and passes matrix through to + underlying algorithm. + + Returns + ------- + AA : (N, N) ndarray + Generalized Schur form of A. + BB : (N, N) ndarray + Generalized Schur form of B. + Q : (N, N) ndarray + The left Schur vectors. + Z : (N, N) ndarray + The right Schur vectors. + + See Also + -------- + ordqz + + Notes + ----- + Q is transposed versus the equivalent function in Matlab. + + .. versionadded:: 0.11.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy.linalg import qz + + >>> A = np.array([[1, 2, -1], [5, 5, 5], [2, 4, -8]]) + >>> B = np.array([[1, 1, -3], [3, 1, -1], [5, 6, -2]]) + + Compute the decomposition. The QZ decomposition is not unique, so + depending on the underlying library that is used, there may be + differences in the signs of coefficients in the following output. + + >>> AA, BB, Q, Z = qz(A, B) + >>> AA + array([[-1.36949157, -4.05459025, 7.44389431], + [ 0. , 7.65653432, 5.13476017], + [ 0. , -0.65978437, 2.4186015 ]]) # may vary + >>> BB + array([[ 1.71890633, -1.64723705, -0.72696385], + [ 0. , 8.6965692 , -0. ], + [ 0. , 0. , 2.27446233]]) # may vary + >>> Q + array([[-0.37048362, 0.1903278 , 0.90912992], + [-0.90073232, 0.16534124, -0.40167593], + [ 0.22676676, 0.96769706, -0.11017818]]) # may vary + >>> Z + array([[-0.67660785, 0.63528924, -0.37230283], + [ 0.70243299, 0.70853819, -0.06753907], + [ 0.22088393, -0.30721526, -0.92565062]]) # may vary + + Verify the QZ decomposition. With real output, we only need the + transpose of ``Z`` in the following expressions. + + >>> Q @ AA @ Z.T # Should be A + array([[ 1., 2., -1.], + [ 5., 5., 5.], + [ 2., 4., -8.]]) + >>> Q @ BB @ Z.T # Should be B + array([[ 1., 1., -3.], + [ 3., 1., -1.], + [ 5., 6., -2.]]) + + Repeat the decomposition, but with ``output='complex'``. + + >>> AA, BB, Q, Z = qz(A, B, output='complex') + + For conciseness in the output, we use ``np.set_printoptions()`` to set + the output precision of NumPy arrays to 3 and display tiny values as 0. + + >>> np.set_printoptions(precision=3, suppress=True) + >>> AA + array([[-1.369+0.j , 2.248+4.237j, 4.861-5.022j], + [ 0. +0.j , 7.037+2.922j, 0.794+4.932j], + [ 0. +0.j , 0. +0.j , 2.655-1.103j]]) # may vary + >>> BB + array([[ 1.719+0.j , -1.115+1.j , -0.763-0.646j], + [ 0. +0.j , 7.24 +0.j , -3.144+3.322j], + [ 0. +0.j , 0. +0.j , 2.732+0.j ]]) # may vary + >>> Q + array([[ 0.326+0.175j, -0.273-0.029j, -0.886-0.052j], + [ 0.794+0.426j, -0.093+0.134j, 0.402-0.02j ], + [-0.2 -0.107j, -0.816+0.482j, 0.151-0.167j]]) # may vary + >>> Z + array([[ 0.596+0.32j , -0.31 +0.414j, 0.393-0.347j], + [-0.619-0.332j, -0.479+0.314j, 0.154-0.393j], + [-0.195-0.104j, 0.576+0.27j , 0.715+0.187j]]) # may vary + + With complex arrays, we must use ``Z.conj().T`` in the following + expressions to verify the decomposition. + + >>> Q @ AA @ Z.conj().T # Should be A + array([[ 1.-0.j, 2.-0.j, -1.-0.j], + [ 5.+0.j, 5.+0.j, 5.-0.j], + [ 2.+0.j, 4.+0.j, -8.+0.j]]) + >>> Q @ BB @ Z.conj().T # Should be B + array([[ 1.+0.j, 1.+0.j, -3.+0.j], + [ 3.-0.j, 1.-0.j, -1.+0.j], + [ 5.+0.j, 6.+0.j, -2.+0.j]]) + + """ + # output for real + # AA, BB, sdim, alphar, alphai, beta, vsl, vsr, work, info + # output for complex + # AA, BB, sdim, alpha, beta, vsl, vsr, work, info + result, _ = _qz(A, B, output=output, lwork=lwork, sort=sort, + overwrite_a=overwrite_a, overwrite_b=overwrite_b, + check_finite=check_finite) + return result[0], result[1], result[-4], result[-3] + + +def ordqz(A, B, sort='lhp', output='real', overwrite_a=False, + overwrite_b=False, check_finite=True): + """QZ decomposition for a pair of matrices with reordering. + + Parameters + ---------- + A : (N, N) array_like + 2-D array to decompose + B : (N, N) array_like + 2-D array to decompose + sort : {callable, 'lhp', 'rhp', 'iuc', 'ouc'}, optional + Specifies whether the upper eigenvalues should be sorted. A + callable may be passed that, given an ordered pair ``(alpha, + beta)`` representing the eigenvalue ``x = (alpha/beta)``, + returns a boolean denoting whether the eigenvalue should be + sorted to the top-left (True). For the real matrix pairs + ``beta`` is real while ``alpha`` can be complex, and for + complex matrix pairs both ``alpha`` and ``beta`` can be + complex. The callable must be able to accept a NumPy + array. Alternatively, string parameters may be used: + + - 'lhp' Left-hand plane (x.real < 0.0) + - 'rhp' Right-hand plane (x.real > 0.0) + - 'iuc' Inside the unit circle (x*x.conjugate() < 1.0) + - 'ouc' Outside the unit circle (x*x.conjugate() > 1.0) + + With the predefined sorting functions, an infinite eigenvalue + (i.e., ``alpha != 0`` and ``beta = 0``) is considered to lie in + neither the left-hand nor the right-hand plane, but it is + considered to lie outside the unit circle. For the eigenvalue + ``(alpha, beta) = (0, 0)``, the predefined sorting functions + all return `False`. + output : str {'real','complex'}, optional + Construct the real or complex QZ decomposition for real matrices. + Default is 'real'. + overwrite_a : bool, optional + If True, the contents of A are overwritten. + overwrite_b : bool, optional + If True, the contents of B are overwritten. + check_finite : bool, optional + If true checks the elements of `A` and `B` are finite numbers. If + false does no checking and passes matrix through to + underlying algorithm. + + Returns + ------- + AA : (N, N) ndarray + Generalized Schur form of A. + BB : (N, N) ndarray + Generalized Schur form of B. + alpha : (N,) ndarray + alpha = alphar + alphai * 1j. See notes. + beta : (N,) ndarray + See notes. + Q : (N, N) ndarray + The left Schur vectors. + Z : (N, N) ndarray + The right Schur vectors. + + See Also + -------- + qz + + Notes + ----- + On exit, ``(ALPHAR(j) + ALPHAI(j)*i)/BETA(j), j=1,...,N``, will be the + generalized eigenvalues. ``ALPHAR(j) + ALPHAI(j)*i`` and + ``BETA(j),j=1,...,N`` are the diagonals of the complex Schur form (S,T) + that would result if the 2-by-2 diagonal blocks of the real generalized + Schur form of (A,B) were further reduced to triangular form using complex + unitary transformations. If ALPHAI(j) is zero, then the jth eigenvalue is + real; if positive, then the ``j``\\ th and ``(j+1)``\\ st eigenvalues are a + complex conjugate pair, with ``ALPHAI(j+1)`` negative. + + .. versionadded:: 0.17.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy.linalg import ordqz + >>> A = np.array([[2, 5, 8, 7], [5, 2, 2, 8], [7, 5, 6, 6], [5, 4, 4, 8]]) + >>> B = np.array([[0, 6, 0, 0], [5, 0, 2, 1], [5, 2, 6, 6], [4, 7, 7, 7]]) + >>> AA, BB, alpha, beta, Q, Z = ordqz(A, B, sort='lhp') + + Since we have sorted for left half plane eigenvalues, negatives come first + + >>> (alpha/beta).real < 0 + array([ True, True, False, False], dtype=bool) + + """ + (AA, BB, _, *ab, Q, Z, _, _), typ = _qz(A, B, output=output, sort=None, + overwrite_a=overwrite_a, + overwrite_b=overwrite_b, + check_finite=check_finite) + + if typ == 's': + alpha, beta = ab[0] + ab[1]*np.complex64(1j), ab[2] + elif typ == 'd': + alpha, beta = ab[0] + ab[1]*1.j, ab[2] + else: + alpha, beta = ab + + sfunction = _select_function(sort) + select = sfunction(alpha, beta) + + tgsen = get_lapack_funcs('tgsen', (AA, BB)) + # the real case needs 4n + 16 lwork + lwork = 4*AA.shape[0] + 16 if typ in 'sd' else 1 + AAA, BBB, *ab, QQ, ZZ, _, _, _, _, info = tgsen(select, AA, BB, Q, Z, + ijob=0, + lwork=lwork, liwork=1) + + # Once more for tgsen output + if typ == 's': + alpha, beta = ab[0] + ab[1]*np.complex64(1j), ab[2] + elif typ == 'd': + alpha, beta = ab[0] + ab[1]*1.j, ab[2] + else: + alpha, beta = ab + + if info < 0: + raise ValueError(f"Illegal value in argument {-info} of tgsen") + elif info == 1: + raise ValueError("Reordering of (A, B) failed because the transformed" + " matrix pair (A, B) would be too far from " + "generalized Schur form; the problem is very " + "ill-conditioned. (A, B) may have been partially " + "reordered.") + + return AAA, BBB, alpha, beta, QQ, ZZ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_decomp_schur.py b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_decomp_schur.py new file mode 100644 index 0000000000000000000000000000000000000000..ed41e4f9ea7e42bc241a7dfcb810fe8ed024e27c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_decomp_schur.py @@ -0,0 +1,300 @@ +"""Schur decomposition functions.""" +import numpy +from numpy import asarray_chkfinite, single, asarray, array +from numpy.linalg import norm + + +# Local imports. +from ._misc import LinAlgError, _datacopied +from .lapack import get_lapack_funcs +from ._decomp import eigvals + +__all__ = ['schur', 'rsf2csf'] + +_double_precision = ['i', 'l', 'd'] + + +def schur(a, output='real', lwork=None, overwrite_a=False, sort=None, + check_finite=True): + """ + Compute Schur decomposition of a matrix. + + The Schur decomposition is:: + + A = Z T Z^H + + where Z is unitary and T is either upper-triangular, or for real + Schur decomposition (output='real'), quasi-upper triangular. In + the quasi-triangular form, 2x2 blocks describing complex-valued + eigenvalue pairs may extrude from the diagonal. + + Parameters + ---------- + a : (M, M) array_like + Matrix to decompose + output : {'real', 'complex'}, optional + Construct the real or complex Schur decomposition (for real matrices). + lwork : int, optional + Work array size. If None or -1, it is automatically computed. + overwrite_a : bool, optional + Whether to overwrite data in a (may improve performance). + sort : {None, callable, 'lhp', 'rhp', 'iuc', 'ouc'}, optional + Specifies whether the upper eigenvalues should be sorted. A callable + may be passed that, given a eigenvalue, returns a boolean denoting + whether the eigenvalue should be sorted to the top-left (True). + Alternatively, string parameters may be used:: + + 'lhp' Left-hand plane (x.real < 0.0) + 'rhp' Right-hand plane (x.real > 0.0) + 'iuc' Inside the unit circle (x*x.conjugate() <= 1.0) + 'ouc' Outside the unit circle (x*x.conjugate() > 1.0) + + Defaults to None (no sorting). + check_finite : bool, optional + Whether to check that the input matrix contains only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + T : (M, M) ndarray + Schur form of A. It is real-valued for the real Schur decomposition. + Z : (M, M) ndarray + An unitary Schur transformation matrix for A. + It is real-valued for the real Schur decomposition. + sdim : int + If and only if sorting was requested, a third return value will + contain the number of eigenvalues satisfying the sort condition. + + Raises + ------ + LinAlgError + Error raised under three conditions: + + 1. The algorithm failed due to a failure of the QR algorithm to + compute all eigenvalues. + 2. If eigenvalue sorting was requested, the eigenvalues could not be + reordered due to a failure to separate eigenvalues, usually because + of poor conditioning. + 3. If eigenvalue sorting was requested, roundoff errors caused the + leading eigenvalues to no longer satisfy the sorting condition. + + See Also + -------- + rsf2csf : Convert real Schur form to complex Schur form + + Examples + -------- + >>> import numpy as np + >>> from scipy.linalg import schur, eigvals + >>> A = np.array([[0, 2, 2], [0, 1, 2], [1, 0, 1]]) + >>> T, Z = schur(A) + >>> T + array([[ 2.65896708, 1.42440458, -1.92933439], + [ 0. , -0.32948354, -0.49063704], + [ 0. , 1.31178921, -0.32948354]]) + >>> Z + array([[0.72711591, -0.60156188, 0.33079564], + [0.52839428, 0.79801892, 0.28976765], + [0.43829436, 0.03590414, -0.89811411]]) + + >>> T2, Z2 = schur(A, output='complex') + >>> T2 + array([[ 2.65896708, -1.22839825+1.32378589j, 0.42590089+1.51937378j], # may vary + [ 0. , -0.32948354+0.80225456j, -0.59877807+0.56192146j], + [ 0. , 0. , -0.32948354-0.80225456j]]) + >>> eigvals(T2) + array([2.65896708, -0.32948354+0.80225456j, -0.32948354-0.80225456j]) + + An arbitrary custom eig-sorting condition, having positive imaginary part, + which is satisfied by only one eigenvalue + + >>> T3, Z3, sdim = schur(A, output='complex', sort=lambda x: x.imag > 0) + >>> sdim + 1 + + """ + if output not in ['real', 'complex', 'r', 'c']: + raise ValueError("argument must be 'real', or 'complex'") + if check_finite: + a1 = asarray_chkfinite(a) + else: + a1 = asarray(a) + if numpy.issubdtype(a1.dtype, numpy.integer): + a1 = asarray(a, dtype=numpy.dtype("long")) + if len(a1.shape) != 2 or (a1.shape[0] != a1.shape[1]): + raise ValueError('expected square matrix') + typ = a1.dtype.char + if output in ['complex', 'c'] and typ not in ['F', 'D']: + if typ in _double_precision: + a1 = a1.astype('D') + typ = 'D' + else: + a1 = a1.astype('F') + typ = 'F' + overwrite_a = overwrite_a or (_datacopied(a1, a)) + gees, = get_lapack_funcs(('gees',), (a1,)) + if lwork is None or lwork == -1: + # get optimal work array + result = gees(lambda x: None, a1, lwork=-1) + lwork = result[-2][0].real.astype(numpy.int_) + + if sort is None: + sort_t = 0 + def sfunction(x): + return None + else: + sort_t = 1 + if callable(sort): + sfunction = sort + elif sort == 'lhp': + def sfunction(x): + return x.real < 0.0 + elif sort == 'rhp': + def sfunction(x): + return x.real >= 0.0 + elif sort == 'iuc': + def sfunction(x): + return abs(x) <= 1.0 + elif sort == 'ouc': + def sfunction(x): + return abs(x) > 1.0 + else: + raise ValueError("'sort' parameter must either be 'None', or a " + "callable, or one of ('lhp','rhp','iuc','ouc')") + + result = gees(sfunction, a1, lwork=lwork, overwrite_a=overwrite_a, + sort_t=sort_t) + + info = result[-1] + if info < 0: + raise ValueError(f'illegal value in {-info}-th argument of internal gees') + elif info == a1.shape[0] + 1: + raise LinAlgError('Eigenvalues could not be separated for reordering.') + elif info == a1.shape[0] + 2: + raise LinAlgError('Leading eigenvalues do not satisfy sort condition.') + elif info > 0: + raise LinAlgError("Schur form not found. Possibly ill-conditioned.") + + if sort_t == 0: + return result[0], result[-3] + else: + return result[0], result[-3], result[1] + + +eps = numpy.finfo(float).eps +feps = numpy.finfo(single).eps + +_array_kind = {'b': 0, 'h': 0, 'B': 0, 'i': 0, 'l': 0, + 'f': 0, 'd': 0, 'F': 1, 'D': 1} +_array_precision = {'i': 1, 'l': 1, 'f': 0, 'd': 1, 'F': 0, 'D': 1} +_array_type = [['f', 'd'], ['F', 'D']] + + +def _commonType(*arrays): + kind = 0 + precision = 0 + for a in arrays: + t = a.dtype.char + kind = max(kind, _array_kind[t]) + precision = max(precision, _array_precision[t]) + return _array_type[kind][precision] + + +def _castCopy(type, *arrays): + cast_arrays = () + for a in arrays: + if a.dtype.char == type: + cast_arrays = cast_arrays + (a.copy(),) + else: + cast_arrays = cast_arrays + (a.astype(type),) + if len(cast_arrays) == 1: + return cast_arrays[0] + else: + return cast_arrays + + +def rsf2csf(T, Z, check_finite=True): + """ + Convert real Schur form to complex Schur form. + + Convert a quasi-diagonal real-valued Schur form to the upper-triangular + complex-valued Schur form. + + Parameters + ---------- + T : (M, M) array_like + Real Schur form of the original array + Z : (M, M) array_like + Schur transformation matrix + check_finite : bool, optional + Whether to check that the input arrays contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + T : (M, M) ndarray + Complex Schur form of the original array + Z : (M, M) ndarray + Schur transformation matrix corresponding to the complex form + + See Also + -------- + schur : Schur decomposition of an array + + Examples + -------- + >>> import numpy as np + >>> from scipy.linalg import schur, rsf2csf + >>> A = np.array([[0, 2, 2], [0, 1, 2], [1, 0, 1]]) + >>> T, Z = schur(A) + >>> T + array([[ 2.65896708, 1.42440458, -1.92933439], + [ 0. , -0.32948354, -0.49063704], + [ 0. , 1.31178921, -0.32948354]]) + >>> Z + array([[0.72711591, -0.60156188, 0.33079564], + [0.52839428, 0.79801892, 0.28976765], + [0.43829436, 0.03590414, -0.89811411]]) + >>> T2 , Z2 = rsf2csf(T, Z) + >>> T2 + array([[2.65896708+0.j, -1.64592781+0.743164187j, -1.21516887+1.00660462j], + [0.+0.j , -0.32948354+8.02254558e-01j, -0.82115218-2.77555756e-17j], + [0.+0.j , 0.+0.j, -0.32948354-0.802254558j]]) + >>> Z2 + array([[0.72711591+0.j, 0.28220393-0.31385693j, 0.51319638-0.17258824j], + [0.52839428+0.j, 0.24720268+0.41635578j, -0.68079517-0.15118243j], + [0.43829436+0.j, -0.76618703+0.01873251j, -0.03063006+0.46857912j]]) + + """ + if check_finite: + Z, T = map(asarray_chkfinite, (Z, T)) + else: + Z, T = map(asarray, (Z, T)) + + for ind, X in enumerate([Z, T]): + if X.ndim != 2 or X.shape[0] != X.shape[1]: + raise ValueError("Input '{}' must be square.".format('ZT'[ind])) + + if T.shape[0] != Z.shape[0]: + message = f"Input array shapes must match: Z: {Z.shape} vs. T: {T.shape}" + raise ValueError(message) + N = T.shape[0] + t = _commonType(Z, T, array([3.0], 'F')) + Z, T = _castCopy(t, Z, T) + + for m in range(N-1, 0, -1): + if abs(T[m, m-1]) > eps*(abs(T[m-1, m-1]) + abs(T[m, m])): + mu = eigvals(T[m-1:m+1, m-1:m+1]) - T[m, m] + r = norm([mu[0], T[m, m-1]]) + c = mu[0] / r + s = T[m, m-1] / r + G = array([[c.conj(), s], [-s, c]], dtype=t) + + T[m-1:m+1, m-1:] = G.dot(T[m-1:m+1, m-1:]) + T[:m+1, m-1:m+1] = T[:m+1, m-1:m+1].dot(G.conj().T) + Z[:, m-1:m+1] = Z[:, m-1:m+1].dot(G.conj().T) + + T[m, m-1] = 0.0 + return T, Z diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_decomp_update.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_decomp_update.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..ea47d937fd2808a054c12b40093d9197d12519f8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_decomp_update.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_fblas.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_fblas.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..b9163e3028fd64471a97c8d1af59366245cbd8b1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_fblas.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_flapack.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_flapack.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..e64286904927cdd471a30892f0414907df349fa5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_flapack.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4565fa0d0dc440d074193a89770c505a2a17463dcfd6346e83da0858f22f1b1e +size 2066281 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_lapack_subroutines.h b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_lapack_subroutines.h new file mode 100644 index 0000000000000000000000000000000000000000..d4a33afb9b9af82e82eec69fb02b63820053928d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_lapack_subroutines.h @@ -0,0 +1,1521 @@ +/* +This file was generated by _generate_pyx.py. +Do not edit this file directly. +*/ + +#include "fortran_defs.h" +#include "npy_cblas.h" + +typedef int (*_cselect1)(npy_complex64*); +typedef int (*_cselect2)(npy_complex64*, npy_complex64*); +typedef int (*_dselect2)(double*, double*); +typedef int (*_dselect3)(double*, double*, double*); +typedef int (*_sselect2)(float*, float*); +typedef int (*_sselect3)(float*, float*, float*); +typedef int (*_zselect1)(npy_complex128*); +typedef int (*_zselect2)(npy_complex128*, npy_complex128*); + +#ifdef __cplusplus +extern "C" { +#endif + +void BLAS_FUNC(cbbcsd)(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, int *m, int *p, int *q, float *theta, float *phi, npy_complex64 *u1, int *ldu1, npy_complex64 *u2, int *ldu2, npy_complex64 *v1t, int *ldv1t, npy_complex64 *v2t, int *ldv2t, float *b11d, float *b11e, float *b12d, float *b12e, float *b21d, float *b21e, float *b22d, float *b22e, float *rwork, int *lrwork, int *info); +void BLAS_FUNC(cbdsqr)(char *uplo, int *n, int *ncvt, int *nru, int *ncc, float *d, float *e, npy_complex64 *vt, int *ldvt, npy_complex64 *u, int *ldu, npy_complex64 *c, int *ldc, float *rwork, int *info); +void BLAS_FUNC(cgbbrd)(char *vect, int *m, int *n, int *ncc, int *kl, int *ku, npy_complex64 *ab, int *ldab, float *d, float *e, npy_complex64 *q, int *ldq, npy_complex64 *pt, int *ldpt, npy_complex64 *c, int *ldc, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(cgbcon)(char *norm, int *n, int *kl, int *ku, npy_complex64 *ab, int *ldab, int *ipiv, float *anorm, float *rcond, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(cgbequ)(int *m, int *n, int *kl, int *ku, npy_complex64 *ab, int *ldab, float *r, float *c, float *rowcnd, float *colcnd, float *amax, int *info); +void BLAS_FUNC(cgbequb)(int *m, int *n, int *kl, int *ku, npy_complex64 *ab, int *ldab, float *r, float *c, float *rowcnd, float *colcnd, float *amax, int *info); +void BLAS_FUNC(cgbrfs)(char *trans, int *n, int *kl, int *ku, int *nrhs, npy_complex64 *ab, int *ldab, npy_complex64 *afb, int *ldafb, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(cgbsv)(int *n, int *kl, int *ku, int *nrhs, npy_complex64 *ab, int *ldab, int *ipiv, npy_complex64 *b, int *ldb, int *info); +void BLAS_FUNC(cgbsvx)(char *fact, char *trans, int *n, int *kl, int *ku, int *nrhs, npy_complex64 *ab, int *ldab, npy_complex64 *afb, int *ldafb, int *ipiv, char *equed, float *r, float *c, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *rcond, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(cgbtf2)(int *m, int *n, int *kl, int *ku, npy_complex64 *ab, int *ldab, int *ipiv, int *info); +void BLAS_FUNC(cgbtrf)(int *m, int *n, int *kl, int *ku, npy_complex64 *ab, int *ldab, int *ipiv, int *info); +void BLAS_FUNC(cgbtrs)(char *trans, int *n, int *kl, int *ku, int *nrhs, npy_complex64 *ab, int *ldab, int *ipiv, npy_complex64 *b, int *ldb, int *info); +void BLAS_FUNC(cgebak)(char *job, char *side, int *n, int *ilo, int *ihi, float *scale, int *m, npy_complex64 *v, int *ldv, int *info); +void BLAS_FUNC(cgebal)(char *job, int *n, npy_complex64 *a, int *lda, int *ilo, int *ihi, float *scale, int *info); +void BLAS_FUNC(cgebd2)(int *m, int *n, npy_complex64 *a, int *lda, float *d, float *e, npy_complex64 *tauq, npy_complex64 *taup, npy_complex64 *work, int *info); +void BLAS_FUNC(cgebrd)(int *m, int *n, npy_complex64 *a, int *lda, float *d, float *e, npy_complex64 *tauq, npy_complex64 *taup, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(cgecon)(char *norm, int *n, npy_complex64 *a, int *lda, float *anorm, float *rcond, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(cgeequ)(int *m, int *n, npy_complex64 *a, int *lda, float *r, float *c, float *rowcnd, float *colcnd, float *amax, int *info); +void BLAS_FUNC(cgeequb)(int *m, int *n, npy_complex64 *a, int *lda, float *r, float *c, float *rowcnd, float *colcnd, float *amax, int *info); +void BLAS_FUNC(cgees)(char *jobvs, char *sort, _cselect1 *select, int *n, npy_complex64 *a, int *lda, int *sdim, npy_complex64 *w, npy_complex64 *vs, int *ldvs, npy_complex64 *work, int *lwork, float *rwork, int *bwork, int *info); +void BLAS_FUNC(cgeesx)(char *jobvs, char *sort, _cselect1 *select, char *sense, int *n, npy_complex64 *a, int *lda, int *sdim, npy_complex64 *w, npy_complex64 *vs, int *ldvs, float *rconde, float *rcondv, npy_complex64 *work, int *lwork, float *rwork, int *bwork, int *info); +void BLAS_FUNC(cgeev)(char *jobvl, char *jobvr, int *n, npy_complex64 *a, int *lda, npy_complex64 *w, npy_complex64 *vl, int *ldvl, npy_complex64 *vr, int *ldvr, npy_complex64 *work, int *lwork, float *rwork, int *info); +void BLAS_FUNC(cgeevx)(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, npy_complex64 *a, int *lda, npy_complex64 *w, npy_complex64 *vl, int *ldvl, npy_complex64 *vr, int *ldvr, int *ilo, int *ihi, float *scale, float *abnrm, float *rconde, float *rcondv, npy_complex64 *work, int *lwork, float *rwork, int *info); +void BLAS_FUNC(cgehd2)(int *n, int *ilo, int *ihi, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *info); +void BLAS_FUNC(cgehrd)(int *n, int *ilo, int *ihi, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(cgelq2)(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *info); +void BLAS_FUNC(cgelqf)(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(cgels)(char *trans, int *m, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(cgelsd)(int *m, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, float *s, float *rcond, int *rank, npy_complex64 *work, int *lwork, float *rwork, int *iwork, int *info); +void BLAS_FUNC(cgelss)(int *m, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, float *s, float *rcond, int *rank, npy_complex64 *work, int *lwork, float *rwork, int *info); +void BLAS_FUNC(cgelsy)(int *m, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, int *jpvt, float *rcond, int *rank, npy_complex64 *work, int *lwork, float *rwork, int *info); +void BLAS_FUNC(cgemqrt)(char *side, char *trans, int *m, int *n, int *k, int *nb, npy_complex64 *v, int *ldv, npy_complex64 *t, int *ldt, npy_complex64 *c, int *ldc, npy_complex64 *work, int *info); +void BLAS_FUNC(cgeql2)(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *info); +void BLAS_FUNC(cgeqlf)(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(cgeqp3)(int *m, int *n, npy_complex64 *a, int *lda, int *jpvt, npy_complex64 *tau, npy_complex64 *work, int *lwork, float *rwork, int *info); +void BLAS_FUNC(cgeqr2)(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *info); +void BLAS_FUNC(cgeqr2p)(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *info); +void BLAS_FUNC(cgeqrf)(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(cgeqrfp)(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(cgeqrt)(int *m, int *n, int *nb, npy_complex64 *a, int *lda, npy_complex64 *t, int *ldt, npy_complex64 *work, int *info); +void BLAS_FUNC(cgeqrt2)(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *t, int *ldt, int *info); +void BLAS_FUNC(cgeqrt3)(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *t, int *ldt, int *info); +void BLAS_FUNC(cgerfs)(char *trans, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *af, int *ldaf, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(cgerq2)(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *info); +void BLAS_FUNC(cgerqf)(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(cgesc2)(int *n, npy_complex64 *a, int *lda, npy_complex64 *rhs, int *ipiv, int *jpiv, float *scale); +void BLAS_FUNC(cgesdd)(char *jobz, int *m, int *n, npy_complex64 *a, int *lda, float *s, npy_complex64 *u, int *ldu, npy_complex64 *vt, int *ldvt, npy_complex64 *work, int *lwork, float *rwork, int *iwork, int *info); +void BLAS_FUNC(cgesv)(int *n, int *nrhs, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *b, int *ldb, int *info); +void BLAS_FUNC(cgesvd)(char *jobu, char *jobvt, int *m, int *n, npy_complex64 *a, int *lda, float *s, npy_complex64 *u, int *ldu, npy_complex64 *vt, int *ldvt, npy_complex64 *work, int *lwork, float *rwork, int *info); +void BLAS_FUNC(cgesvx)(char *fact, char *trans, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *af, int *ldaf, int *ipiv, char *equed, float *r, float *c, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *rcond, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(cgetc2)(int *n, npy_complex64 *a, int *lda, int *ipiv, int *jpiv, int *info); +void BLAS_FUNC(cgetf2)(int *m, int *n, npy_complex64 *a, int *lda, int *ipiv, int *info); +void BLAS_FUNC(cgetrf)(int *m, int *n, npy_complex64 *a, int *lda, int *ipiv, int *info); +void BLAS_FUNC(cgetri)(int *n, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(cgetrs)(char *trans, int *n, int *nrhs, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *b, int *ldb, int *info); +void BLAS_FUNC(cggbak)(char *job, char *side, int *n, int *ilo, int *ihi, float *lscale, float *rscale, int *m, npy_complex64 *v, int *ldv, int *info); +void BLAS_FUNC(cggbal)(char *job, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, int *ilo, int *ihi, float *lscale, float *rscale, float *work, int *info); +void BLAS_FUNC(cgges)(char *jobvsl, char *jobvsr, char *sort, _cselect2 *selctg, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, int *sdim, npy_complex64 *alpha, npy_complex64 *beta, npy_complex64 *vsl, int *ldvsl, npy_complex64 *vsr, int *ldvsr, npy_complex64 *work, int *lwork, float *rwork, int *bwork, int *info); +void BLAS_FUNC(cggesx)(char *jobvsl, char *jobvsr, char *sort, _cselect2 *selctg, char *sense, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, int *sdim, npy_complex64 *alpha, npy_complex64 *beta, npy_complex64 *vsl, int *ldvsl, npy_complex64 *vsr, int *ldvsr, float *rconde, float *rcondv, npy_complex64 *work, int *lwork, float *rwork, int *iwork, int *liwork, int *bwork, int *info); +void BLAS_FUNC(cggev)(char *jobvl, char *jobvr, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *alpha, npy_complex64 *beta, npy_complex64 *vl, int *ldvl, npy_complex64 *vr, int *ldvr, npy_complex64 *work, int *lwork, float *rwork, int *info); +void BLAS_FUNC(cggevx)(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *alpha, npy_complex64 *beta, npy_complex64 *vl, int *ldvl, npy_complex64 *vr, int *ldvr, int *ilo, int *ihi, float *lscale, float *rscale, float *abnrm, float *bbnrm, float *rconde, float *rcondv, npy_complex64 *work, int *lwork, float *rwork, int *iwork, int *bwork, int *info); +void BLAS_FUNC(cggglm)(int *n, int *m, int *p, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *d, npy_complex64 *x, npy_complex64 *y, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(cgghrd)(char *compq, char *compz, int *n, int *ilo, int *ihi, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *q, int *ldq, npy_complex64 *z, int *ldz, int *info); +void BLAS_FUNC(cgglse)(int *m, int *n, int *p, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *c, npy_complex64 *d, npy_complex64 *x, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(cggqrf)(int *n, int *m, int *p, npy_complex64 *a, int *lda, npy_complex64 *taua, npy_complex64 *b, int *ldb, npy_complex64 *taub, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(cggrqf)(int *m, int *p, int *n, npy_complex64 *a, int *lda, npy_complex64 *taua, npy_complex64 *b, int *ldb, npy_complex64 *taub, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(cgtcon)(char *norm, int *n, npy_complex64 *dl, npy_complex64 *d, npy_complex64 *du, npy_complex64 *du2, int *ipiv, float *anorm, float *rcond, npy_complex64 *work, int *info); +void BLAS_FUNC(cgtrfs)(char *trans, int *n, int *nrhs, npy_complex64 *dl, npy_complex64 *d, npy_complex64 *du, npy_complex64 *dlf, npy_complex64 *df, npy_complex64 *duf, npy_complex64 *du2, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(cgtsv)(int *n, int *nrhs, npy_complex64 *dl, npy_complex64 *d, npy_complex64 *du, npy_complex64 *b, int *ldb, int *info); +void BLAS_FUNC(cgtsvx)(char *fact, char *trans, int *n, int *nrhs, npy_complex64 *dl, npy_complex64 *d, npy_complex64 *du, npy_complex64 *dlf, npy_complex64 *df, npy_complex64 *duf, npy_complex64 *du2, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *rcond, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(cgttrf)(int *n, npy_complex64 *dl, npy_complex64 *d, npy_complex64 *du, npy_complex64 *du2, int *ipiv, int *info); +void BLAS_FUNC(cgttrs)(char *trans, int *n, int *nrhs, npy_complex64 *dl, npy_complex64 *d, npy_complex64 *du, npy_complex64 *du2, int *ipiv, npy_complex64 *b, int *ldb, int *info); +void BLAS_FUNC(cgtts2)(int *itrans, int *n, int *nrhs, npy_complex64 *dl, npy_complex64 *d, npy_complex64 *du, npy_complex64 *du2, int *ipiv, npy_complex64 *b, int *ldb); +void BLAS_FUNC(chbev)(char *jobz, char *uplo, int *n, int *kd, npy_complex64 *ab, int *ldab, float *w, npy_complex64 *z, int *ldz, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(chbevd)(char *jobz, char *uplo, int *n, int *kd, npy_complex64 *ab, int *ldab, float *w, npy_complex64 *z, int *ldz, npy_complex64 *work, int *lwork, float *rwork, int *lrwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(chbevx)(char *jobz, char *range, char *uplo, int *n, int *kd, npy_complex64 *ab, int *ldab, npy_complex64 *q, int *ldq, float *vl, float *vu, int *il, int *iu, float *abstol, int *m, float *w, npy_complex64 *z, int *ldz, npy_complex64 *work, float *rwork, int *iwork, int *ifail, int *info); +void BLAS_FUNC(chbgst)(char *vect, char *uplo, int *n, int *ka, int *kb, npy_complex64 *ab, int *ldab, npy_complex64 *bb, int *ldbb, npy_complex64 *x, int *ldx, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(chbgv)(char *jobz, char *uplo, int *n, int *ka, int *kb, npy_complex64 *ab, int *ldab, npy_complex64 *bb, int *ldbb, float *w, npy_complex64 *z, int *ldz, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(chbgvd)(char *jobz, char *uplo, int *n, int *ka, int *kb, npy_complex64 *ab, int *ldab, npy_complex64 *bb, int *ldbb, float *w, npy_complex64 *z, int *ldz, npy_complex64 *work, int *lwork, float *rwork, int *lrwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(chbgvx)(char *jobz, char *range, char *uplo, int *n, int *ka, int *kb, npy_complex64 *ab, int *ldab, npy_complex64 *bb, int *ldbb, npy_complex64 *q, int *ldq, float *vl, float *vu, int *il, int *iu, float *abstol, int *m, float *w, npy_complex64 *z, int *ldz, npy_complex64 *work, float *rwork, int *iwork, int *ifail, int *info); +void BLAS_FUNC(chbtrd)(char *vect, char *uplo, int *n, int *kd, npy_complex64 *ab, int *ldab, float *d, float *e, npy_complex64 *q, int *ldq, npy_complex64 *work, int *info); +void BLAS_FUNC(checon)(char *uplo, int *n, npy_complex64 *a, int *lda, int *ipiv, float *anorm, float *rcond, npy_complex64 *work, int *info); +void BLAS_FUNC(cheequb)(char *uplo, int *n, npy_complex64 *a, int *lda, float *s, float *scond, float *amax, npy_complex64 *work, int *info); +void BLAS_FUNC(cheev)(char *jobz, char *uplo, int *n, npy_complex64 *a, int *lda, float *w, npy_complex64 *work, int *lwork, float *rwork, int *info); +void BLAS_FUNC(cheevd)(char *jobz, char *uplo, int *n, npy_complex64 *a, int *lda, float *w, npy_complex64 *work, int *lwork, float *rwork, int *lrwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(cheevr)(char *jobz, char *range, char *uplo, int *n, npy_complex64 *a, int *lda, float *vl, float *vu, int *il, int *iu, float *abstol, int *m, float *w, npy_complex64 *z, int *ldz, int *isuppz, npy_complex64 *work, int *lwork, float *rwork, int *lrwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(cheevx)(char *jobz, char *range, char *uplo, int *n, npy_complex64 *a, int *lda, float *vl, float *vu, int *il, int *iu, float *abstol, int *m, float *w, npy_complex64 *z, int *ldz, npy_complex64 *work, int *lwork, float *rwork, int *iwork, int *ifail, int *info); +void BLAS_FUNC(chegs2)(int *itype, char *uplo, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, int *info); +void BLAS_FUNC(chegst)(int *itype, char *uplo, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, int *info); +void BLAS_FUNC(chegv)(int *itype, char *jobz, char *uplo, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, float *w, npy_complex64 *work, int *lwork, float *rwork, int *info); +void BLAS_FUNC(chegvd)(int *itype, char *jobz, char *uplo, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, float *w, npy_complex64 *work, int *lwork, float *rwork, int *lrwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(chegvx)(int *itype, char *jobz, char *range, char *uplo, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, float *vl, float *vu, int *il, int *iu, float *abstol, int *m, float *w, npy_complex64 *z, int *ldz, npy_complex64 *work, int *lwork, float *rwork, int *iwork, int *ifail, int *info); +void BLAS_FUNC(cherfs)(char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *af, int *ldaf, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(chesv)(char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(chesvx)(char *fact, char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *af, int *ldaf, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *rcond, float *ferr, float *berr, npy_complex64 *work, int *lwork, float *rwork, int *info); +void BLAS_FUNC(cheswapr)(char *uplo, int *n, npy_complex64 *a, int *lda, int *i1, int *i2); +void BLAS_FUNC(chetd2)(char *uplo, int *n, npy_complex64 *a, int *lda, float *d, float *e, npy_complex64 *tau, int *info); +void BLAS_FUNC(chetf2)(char *uplo, int *n, npy_complex64 *a, int *lda, int *ipiv, int *info); +void BLAS_FUNC(chetrd)(char *uplo, int *n, npy_complex64 *a, int *lda, float *d, float *e, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(chetrf)(char *uplo, int *n, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(chetri)(char *uplo, int *n, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *work, int *info); +void BLAS_FUNC(chetri2)(char *uplo, int *n, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(chetri2x)(char *uplo, int *n, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *work, int *nb, int *info); +void BLAS_FUNC(chetrs)(char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *b, int *ldb, int *info); +void BLAS_FUNC(chetrs2)(char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *work, int *info); +void BLAS_FUNC(chfrk)(char *transr, char *uplo, char *trans, int *n, int *k, float *alpha, npy_complex64 *a, int *lda, float *beta, npy_complex64 *c); +void BLAS_FUNC(chgeqz)(char *job, char *compq, char *compz, int *n, int *ilo, int *ihi, npy_complex64 *h, int *ldh, npy_complex64 *t, int *ldt, npy_complex64 *alpha, npy_complex64 *beta, npy_complex64 *q, int *ldq, npy_complex64 *z, int *ldz, npy_complex64 *work, int *lwork, float *rwork, int *info); +char BLAS_FUNC(chla_transtype)(int *trans); +void BLAS_FUNC(chpcon)(char *uplo, int *n, npy_complex64 *ap, int *ipiv, float *anorm, float *rcond, npy_complex64 *work, int *info); +void BLAS_FUNC(chpev)(char *jobz, char *uplo, int *n, npy_complex64 *ap, float *w, npy_complex64 *z, int *ldz, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(chpevd)(char *jobz, char *uplo, int *n, npy_complex64 *ap, float *w, npy_complex64 *z, int *ldz, npy_complex64 *work, int *lwork, float *rwork, int *lrwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(chpevx)(char *jobz, char *range, char *uplo, int *n, npy_complex64 *ap, float *vl, float *vu, int *il, int *iu, float *abstol, int *m, float *w, npy_complex64 *z, int *ldz, npy_complex64 *work, float *rwork, int *iwork, int *ifail, int *info); +void BLAS_FUNC(chpgst)(int *itype, char *uplo, int *n, npy_complex64 *ap, npy_complex64 *bp, int *info); +void BLAS_FUNC(chpgv)(int *itype, char *jobz, char *uplo, int *n, npy_complex64 *ap, npy_complex64 *bp, float *w, npy_complex64 *z, int *ldz, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(chpgvd)(int *itype, char *jobz, char *uplo, int *n, npy_complex64 *ap, npy_complex64 *bp, float *w, npy_complex64 *z, int *ldz, npy_complex64 *work, int *lwork, float *rwork, int *lrwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(chpgvx)(int *itype, char *jobz, char *range, char *uplo, int *n, npy_complex64 *ap, npy_complex64 *bp, float *vl, float *vu, int *il, int *iu, float *abstol, int *m, float *w, npy_complex64 *z, int *ldz, npy_complex64 *work, float *rwork, int *iwork, int *ifail, int *info); +void BLAS_FUNC(chprfs)(char *uplo, int *n, int *nrhs, npy_complex64 *ap, npy_complex64 *afp, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(chpsv)(char *uplo, int *n, int *nrhs, npy_complex64 *ap, int *ipiv, npy_complex64 *b, int *ldb, int *info); +void BLAS_FUNC(chpsvx)(char *fact, char *uplo, int *n, int *nrhs, npy_complex64 *ap, npy_complex64 *afp, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *rcond, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(chptrd)(char *uplo, int *n, npy_complex64 *ap, float *d, float *e, npy_complex64 *tau, int *info); +void BLAS_FUNC(chptrf)(char *uplo, int *n, npy_complex64 *ap, int *ipiv, int *info); +void BLAS_FUNC(chptri)(char *uplo, int *n, npy_complex64 *ap, int *ipiv, npy_complex64 *work, int *info); +void BLAS_FUNC(chptrs)(char *uplo, int *n, int *nrhs, npy_complex64 *ap, int *ipiv, npy_complex64 *b, int *ldb, int *info); +void BLAS_FUNC(chsein)(char *side, char *eigsrc, char *initv, int *select, int *n, npy_complex64 *h, int *ldh, npy_complex64 *w, npy_complex64 *vl, int *ldvl, npy_complex64 *vr, int *ldvr, int *mm, int *m, npy_complex64 *work, float *rwork, int *ifaill, int *ifailr, int *info); +void BLAS_FUNC(chseqr)(char *job, char *compz, int *n, int *ilo, int *ihi, npy_complex64 *h, int *ldh, npy_complex64 *w, npy_complex64 *z, int *ldz, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(clabrd)(int *m, int *n, int *nb, npy_complex64 *a, int *lda, float *d, float *e, npy_complex64 *tauq, npy_complex64 *taup, npy_complex64 *x, int *ldx, npy_complex64 *y, int *ldy); +void BLAS_FUNC(clacgv)(int *n, npy_complex64 *x, int *incx); +void BLAS_FUNC(clacn2)(int *n, npy_complex64 *v, npy_complex64 *x, float *est, int *kase, int *isave); +void BLAS_FUNC(clacon)(int *n, npy_complex64 *v, npy_complex64 *x, float *est, int *kase); +void BLAS_FUNC(clacp2)(char *uplo, int *m, int *n, float *a, int *lda, npy_complex64 *b, int *ldb); +void BLAS_FUNC(clacpy)(char *uplo, int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb); +void BLAS_FUNC(clacrm)(int *m, int *n, npy_complex64 *a, int *lda, float *b, int *ldb, npy_complex64 *c, int *ldc, float *rwork); +void BLAS_FUNC(clacrt)(int *n, npy_complex64 *cx, int *incx, npy_complex64 *cy, int *incy, npy_complex64 *c, npy_complex64 *s); +void (cladivwrp_)(npy_complex64 *out, npy_complex64 *x, npy_complex64 *y); +void BLAS_FUNC(claed0)(int *qsiz, int *n, float *d, float *e, npy_complex64 *q, int *ldq, npy_complex64 *qstore, int *ldqs, float *rwork, int *iwork, int *info); +void BLAS_FUNC(claed7)(int *n, int *cutpnt, int *qsiz, int *tlvls, int *curlvl, int *curpbm, float *d, npy_complex64 *q, int *ldq, float *rho, int *indxq, float *qstore, int *qptr, int *prmptr, int *perm, int *givptr, int *givcol, float *givnum, npy_complex64 *work, float *rwork, int *iwork, int *info); +void BLAS_FUNC(claed8)(int *k, int *n, int *qsiz, npy_complex64 *q, int *ldq, float *d, float *rho, int *cutpnt, float *z, float *dlamda, npy_complex64 *q2, int *ldq2, float *w, int *indxp, int *indx, int *indxq, int *perm, int *givptr, int *givcol, float *givnum, int *info); +void BLAS_FUNC(claein)(int *rightv, int *noinit, int *n, npy_complex64 *h, int *ldh, npy_complex64 *w, npy_complex64 *v, npy_complex64 *b, int *ldb, float *rwork, float *eps3, float *smlnum, int *info); +void BLAS_FUNC(claesy)(npy_complex64 *a, npy_complex64 *b, npy_complex64 *c, npy_complex64 *rt1, npy_complex64 *rt2, npy_complex64 *evscal, npy_complex64 *cs1, npy_complex64 *sn1); +void BLAS_FUNC(claev2)(npy_complex64 *a, npy_complex64 *b, npy_complex64 *c, float *rt1, float *rt2, float *cs1, npy_complex64 *sn1); +void BLAS_FUNC(clag2z)(int *m, int *n, npy_complex64 *sa, int *ldsa, npy_complex128 *a, int *lda, int *info); +void BLAS_FUNC(clags2)(int *upper, float *a1, npy_complex64 *a2, float *a3, float *b1, npy_complex64 *b2, float *b3, float *csu, npy_complex64 *snu, float *csv, npy_complex64 *snv, float *csq, npy_complex64 *snq); +void BLAS_FUNC(clagtm)(char *trans, int *n, int *nrhs, float *alpha, npy_complex64 *dl, npy_complex64 *d, npy_complex64 *du, npy_complex64 *x, int *ldx, float *beta, npy_complex64 *b, int *ldb); +void BLAS_FUNC(clahef)(char *uplo, int *n, int *nb, int *kb, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *w, int *ldw, int *info); +void BLAS_FUNC(clahqr)(int *wantt, int *wantz, int *n, int *ilo, int *ihi, npy_complex64 *h, int *ldh, npy_complex64 *w, int *iloz, int *ihiz, npy_complex64 *z, int *ldz, int *info); +void BLAS_FUNC(clahr2)(int *n, int *k, int *nb, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *t, int *ldt, npy_complex64 *y, int *ldy); +void BLAS_FUNC(claic1)(int *job, int *j, npy_complex64 *x, float *sest, npy_complex64 *w, npy_complex64 *gamma, float *sestpr, npy_complex64 *s, npy_complex64 *c); +void BLAS_FUNC(clals0)(int *icompq, int *nl, int *nr, int *sqre, int *nrhs, npy_complex64 *b, int *ldb, npy_complex64 *bx, int *ldbx, int *perm, int *givptr, int *givcol, int *ldgcol, float *givnum, int *ldgnum, float *poles, float *difl, float *difr, float *z, int *k, float *c, float *s, float *rwork, int *info); +void BLAS_FUNC(clalsa)(int *icompq, int *smlsiz, int *n, int *nrhs, npy_complex64 *b, int *ldb, npy_complex64 *bx, int *ldbx, float *u, int *ldu, float *vt, int *k, float *difl, float *difr, float *z, float *poles, int *givptr, int *givcol, int *ldgcol, int *perm, float *givnum, float *c, float *s, float *rwork, int *iwork, int *info); +void BLAS_FUNC(clalsd)(char *uplo, int *smlsiz, int *n, int *nrhs, float *d, float *e, npy_complex64 *b, int *ldb, float *rcond, int *rank, npy_complex64 *work, float *rwork, int *iwork, int *info); +float BLAS_FUNC(clangb)(char *norm, int *n, int *kl, int *ku, npy_complex64 *ab, int *ldab, float *work); +float BLAS_FUNC(clange)(char *norm, int *m, int *n, npy_complex64 *a, int *lda, float *work); +float BLAS_FUNC(clangt)(char *norm, int *n, npy_complex64 *dl, npy_complex64 *d, npy_complex64 *du); +float BLAS_FUNC(clanhb)(char *norm, char *uplo, int *n, int *k, npy_complex64 *ab, int *ldab, float *work); +float BLAS_FUNC(clanhe)(char *norm, char *uplo, int *n, npy_complex64 *a, int *lda, float *work); +float BLAS_FUNC(clanhf)(char *norm, char *transr, char *uplo, int *n, npy_complex64 *a, float *work); +float BLAS_FUNC(clanhp)(char *norm, char *uplo, int *n, npy_complex64 *ap, float *work); +float BLAS_FUNC(clanhs)(char *norm, int *n, npy_complex64 *a, int *lda, float *work); +float BLAS_FUNC(clanht)(char *norm, int *n, float *d, npy_complex64 *e); +float BLAS_FUNC(clansb)(char *norm, char *uplo, int *n, int *k, npy_complex64 *ab, int *ldab, float *work); +float BLAS_FUNC(clansp)(char *norm, char *uplo, int *n, npy_complex64 *ap, float *work); +float BLAS_FUNC(clansy)(char *norm, char *uplo, int *n, npy_complex64 *a, int *lda, float *work); +float BLAS_FUNC(clantb)(char *norm, char *uplo, char *diag, int *n, int *k, npy_complex64 *ab, int *ldab, float *work); +float BLAS_FUNC(clantp)(char *norm, char *uplo, char *diag, int *n, npy_complex64 *ap, float *work); +float BLAS_FUNC(clantr)(char *norm, char *uplo, char *diag, int *m, int *n, npy_complex64 *a, int *lda, float *work); +void BLAS_FUNC(clapll)(int *n, npy_complex64 *x, int *incx, npy_complex64 *y, int *incy, float *ssmin); +void BLAS_FUNC(clapmr)(int *forwrd, int *m, int *n, npy_complex64 *x, int *ldx, int *k); +void BLAS_FUNC(clapmt)(int *forwrd, int *m, int *n, npy_complex64 *x, int *ldx, int *k); +void BLAS_FUNC(claqgb)(int *m, int *n, int *kl, int *ku, npy_complex64 *ab, int *ldab, float *r, float *c, float *rowcnd, float *colcnd, float *amax, char *equed); +void BLAS_FUNC(claqge)(int *m, int *n, npy_complex64 *a, int *lda, float *r, float *c, float *rowcnd, float *colcnd, float *amax, char *equed); +void BLAS_FUNC(claqhb)(char *uplo, int *n, int *kd, npy_complex64 *ab, int *ldab, float *s, float *scond, float *amax, char *equed); +void BLAS_FUNC(claqhe)(char *uplo, int *n, npy_complex64 *a, int *lda, float *s, float *scond, float *amax, char *equed); +void BLAS_FUNC(claqhp)(char *uplo, int *n, npy_complex64 *ap, float *s, float *scond, float *amax, char *equed); +void BLAS_FUNC(claqp2)(int *m, int *n, int *offset, npy_complex64 *a, int *lda, int *jpvt, npy_complex64 *tau, float *vn1, float *vn2, npy_complex64 *work); +void BLAS_FUNC(claqps)(int *m, int *n, int *offset, int *nb, int *kb, npy_complex64 *a, int *lda, int *jpvt, npy_complex64 *tau, float *vn1, float *vn2, npy_complex64 *auxv, npy_complex64 *f, int *ldf); +void BLAS_FUNC(claqr0)(int *wantt, int *wantz, int *n, int *ilo, int *ihi, npy_complex64 *h, int *ldh, npy_complex64 *w, int *iloz, int *ihiz, npy_complex64 *z, int *ldz, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(claqr1)(int *n, npy_complex64 *h, int *ldh, npy_complex64 *s1, npy_complex64 *s2, npy_complex64 *v); +void BLAS_FUNC(claqr2)(int *wantt, int *wantz, int *n, int *ktop, int *kbot, int *nw, npy_complex64 *h, int *ldh, int *iloz, int *ihiz, npy_complex64 *z, int *ldz, int *ns, int *nd, npy_complex64 *sh, npy_complex64 *v, int *ldv, int *nh, npy_complex64 *t, int *ldt, int *nv, npy_complex64 *wv, int *ldwv, npy_complex64 *work, int *lwork); +void BLAS_FUNC(claqr3)(int *wantt, int *wantz, int *n, int *ktop, int *kbot, int *nw, npy_complex64 *h, int *ldh, int *iloz, int *ihiz, npy_complex64 *z, int *ldz, int *ns, int *nd, npy_complex64 *sh, npy_complex64 *v, int *ldv, int *nh, npy_complex64 *t, int *ldt, int *nv, npy_complex64 *wv, int *ldwv, npy_complex64 *work, int *lwork); +void BLAS_FUNC(claqr4)(int *wantt, int *wantz, int *n, int *ilo, int *ihi, npy_complex64 *h, int *ldh, npy_complex64 *w, int *iloz, int *ihiz, npy_complex64 *z, int *ldz, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(claqr5)(int *wantt, int *wantz, int *kacc22, int *n, int *ktop, int *kbot, int *nshfts, npy_complex64 *s, npy_complex64 *h, int *ldh, int *iloz, int *ihiz, npy_complex64 *z, int *ldz, npy_complex64 *v, int *ldv, npy_complex64 *u, int *ldu, int *nv, npy_complex64 *wv, int *ldwv, int *nh, npy_complex64 *wh, int *ldwh); +void BLAS_FUNC(claqsb)(char *uplo, int *n, int *kd, npy_complex64 *ab, int *ldab, float *s, float *scond, float *amax, char *equed); +void BLAS_FUNC(claqsp)(char *uplo, int *n, npy_complex64 *ap, float *s, float *scond, float *amax, char *equed); +void BLAS_FUNC(claqsy)(char *uplo, int *n, npy_complex64 *a, int *lda, float *s, float *scond, float *amax, char *equed); +void BLAS_FUNC(clar1v)(int *n, int *b1, int *bn, float *lambda_, float *d, float *l, float *ld, float *lld, float *pivmin, float *gaptol, npy_complex64 *z, int *wantnc, int *negcnt, float *ztz, float *mingma, int *r, int *isuppz, float *nrminv, float *resid, float *rqcorr, float *work); +void BLAS_FUNC(clar2v)(int *n, npy_complex64 *x, npy_complex64 *y, npy_complex64 *z, int *incx, float *c, npy_complex64 *s, int *incc); +void BLAS_FUNC(clarcm)(int *m, int *n, float *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *c, int *ldc, float *rwork); +void BLAS_FUNC(clarf)(char *side, int *m, int *n, npy_complex64 *v, int *incv, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work); +void BLAS_FUNC(clarfb)(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, npy_complex64 *v, int *ldv, npy_complex64 *t, int *ldt, npy_complex64 *c, int *ldc, npy_complex64 *work, int *ldwork); +void BLAS_FUNC(clarfg)(int *n, npy_complex64 *alpha, npy_complex64 *x, int *incx, npy_complex64 *tau); +void BLAS_FUNC(clarfgp)(int *n, npy_complex64 *alpha, npy_complex64 *x, int *incx, npy_complex64 *tau); +void BLAS_FUNC(clarft)(char *direct, char *storev, int *n, int *k, npy_complex64 *v, int *ldv, npy_complex64 *tau, npy_complex64 *t, int *ldt); +void BLAS_FUNC(clarfx)(char *side, int *m, int *n, npy_complex64 *v, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work); +void BLAS_FUNC(clargv)(int *n, npy_complex64 *x, int *incx, npy_complex64 *y, int *incy, float *c, int *incc); +void BLAS_FUNC(clarnv)(int *idist, int *iseed, int *n, npy_complex64 *x); +void BLAS_FUNC(clarrv)(int *n, float *vl, float *vu, float *d, float *l, float *pivmin, int *isplit, int *m, int *dol, int *dou, float *minrgp, float *rtol1, float *rtol2, float *w, float *werr, float *wgap, int *iblock, int *indexw, float *gers, npy_complex64 *z, int *ldz, int *isuppz, float *work, int *iwork, int *info); +void BLAS_FUNC(clartg)(npy_complex64 *f, npy_complex64 *g, float *cs, npy_complex64 *sn, npy_complex64 *r); +void BLAS_FUNC(clartv)(int *n, npy_complex64 *x, int *incx, npy_complex64 *y, int *incy, float *c, npy_complex64 *s, int *incc); +void BLAS_FUNC(clarz)(char *side, int *m, int *n, int *l, npy_complex64 *v, int *incv, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work); +void BLAS_FUNC(clarzb)(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, npy_complex64 *v, int *ldv, npy_complex64 *t, int *ldt, npy_complex64 *c, int *ldc, npy_complex64 *work, int *ldwork); +void BLAS_FUNC(clarzt)(char *direct, char *storev, int *n, int *k, npy_complex64 *v, int *ldv, npy_complex64 *tau, npy_complex64 *t, int *ldt); +void BLAS_FUNC(clascl)(char *type_bn, int *kl, int *ku, float *cfrom, float *cto, int *m, int *n, npy_complex64 *a, int *lda, int *info); +void BLAS_FUNC(claset)(char *uplo, int *m, int *n, npy_complex64 *alpha, npy_complex64 *beta, npy_complex64 *a, int *lda); +void BLAS_FUNC(clasr)(char *side, char *pivot, char *direct, int *m, int *n, float *c, float *s, npy_complex64 *a, int *lda); +void BLAS_FUNC(classq)(int *n, npy_complex64 *x, int *incx, float *scale, float *sumsq); +void BLAS_FUNC(claswp)(int *n, npy_complex64 *a, int *lda, int *k1, int *k2, int *ipiv, int *incx); +void BLAS_FUNC(clasyf)(char *uplo, int *n, int *nb, int *kb, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *w, int *ldw, int *info); +void BLAS_FUNC(clatbs)(char *uplo, char *trans, char *diag, char *normin, int *n, int *kd, npy_complex64 *ab, int *ldab, npy_complex64 *x, float *scale, float *cnorm, int *info); +void BLAS_FUNC(clatdf)(int *ijob, int *n, npy_complex64 *z, int *ldz, npy_complex64 *rhs, float *rdsum, float *rdscal, int *ipiv, int *jpiv); +void BLAS_FUNC(clatps)(char *uplo, char *trans, char *diag, char *normin, int *n, npy_complex64 *ap, npy_complex64 *x, float *scale, float *cnorm, int *info); +void BLAS_FUNC(clatrd)(char *uplo, int *n, int *nb, npy_complex64 *a, int *lda, float *e, npy_complex64 *tau, npy_complex64 *w, int *ldw); +void BLAS_FUNC(clatrs)(char *uplo, char *trans, char *diag, char *normin, int *n, npy_complex64 *a, int *lda, npy_complex64 *x, float *scale, float *cnorm, int *info); +void BLAS_FUNC(clatrz)(int *m, int *n, int *l, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work); +void BLAS_FUNC(clauu2)(char *uplo, int *n, npy_complex64 *a, int *lda, int *info); +void BLAS_FUNC(clauum)(char *uplo, int *n, npy_complex64 *a, int *lda, int *info); +void BLAS_FUNC(cpbcon)(char *uplo, int *n, int *kd, npy_complex64 *ab, int *ldab, float *anorm, float *rcond, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(cpbequ)(char *uplo, int *n, int *kd, npy_complex64 *ab, int *ldab, float *s, float *scond, float *amax, int *info); +void BLAS_FUNC(cpbrfs)(char *uplo, int *n, int *kd, int *nrhs, npy_complex64 *ab, int *ldab, npy_complex64 *afb, int *ldafb, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(cpbstf)(char *uplo, int *n, int *kd, npy_complex64 *ab, int *ldab, int *info); +void BLAS_FUNC(cpbsv)(char *uplo, int *n, int *kd, int *nrhs, npy_complex64 *ab, int *ldab, npy_complex64 *b, int *ldb, int *info); +void BLAS_FUNC(cpbsvx)(char *fact, char *uplo, int *n, int *kd, int *nrhs, npy_complex64 *ab, int *ldab, npy_complex64 *afb, int *ldafb, char *equed, float *s, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *rcond, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(cpbtf2)(char *uplo, int *n, int *kd, npy_complex64 *ab, int *ldab, int *info); +void BLAS_FUNC(cpbtrf)(char *uplo, int *n, int *kd, npy_complex64 *ab, int *ldab, int *info); +void BLAS_FUNC(cpbtrs)(char *uplo, int *n, int *kd, int *nrhs, npy_complex64 *ab, int *ldab, npy_complex64 *b, int *ldb, int *info); +void BLAS_FUNC(cpftrf)(char *transr, char *uplo, int *n, npy_complex64 *a, int *info); +void BLAS_FUNC(cpftri)(char *transr, char *uplo, int *n, npy_complex64 *a, int *info); +void BLAS_FUNC(cpftrs)(char *transr, char *uplo, int *n, int *nrhs, npy_complex64 *a, npy_complex64 *b, int *ldb, int *info); +void BLAS_FUNC(cpocon)(char *uplo, int *n, npy_complex64 *a, int *lda, float *anorm, float *rcond, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(cpoequ)(int *n, npy_complex64 *a, int *lda, float *s, float *scond, float *amax, int *info); +void BLAS_FUNC(cpoequb)(int *n, npy_complex64 *a, int *lda, float *s, float *scond, float *amax, int *info); +void BLAS_FUNC(cporfs)(char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *af, int *ldaf, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(cposv)(char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, int *info); +void BLAS_FUNC(cposvx)(char *fact, char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *af, int *ldaf, char *equed, float *s, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *rcond, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(cpotf2)(char *uplo, int *n, npy_complex64 *a, int *lda, int *info); +void BLAS_FUNC(cpotrf)(char *uplo, int *n, npy_complex64 *a, int *lda, int *info); +void BLAS_FUNC(cpotri)(char *uplo, int *n, npy_complex64 *a, int *lda, int *info); +void BLAS_FUNC(cpotrs)(char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, int *info); +void BLAS_FUNC(cppcon)(char *uplo, int *n, npy_complex64 *ap, float *anorm, float *rcond, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(cppequ)(char *uplo, int *n, npy_complex64 *ap, float *s, float *scond, float *amax, int *info); +void BLAS_FUNC(cpprfs)(char *uplo, int *n, int *nrhs, npy_complex64 *ap, npy_complex64 *afp, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(cppsv)(char *uplo, int *n, int *nrhs, npy_complex64 *ap, npy_complex64 *b, int *ldb, int *info); +void BLAS_FUNC(cppsvx)(char *fact, char *uplo, int *n, int *nrhs, npy_complex64 *ap, npy_complex64 *afp, char *equed, float *s, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *rcond, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(cpptrf)(char *uplo, int *n, npy_complex64 *ap, int *info); +void BLAS_FUNC(cpptri)(char *uplo, int *n, npy_complex64 *ap, int *info); +void BLAS_FUNC(cpptrs)(char *uplo, int *n, int *nrhs, npy_complex64 *ap, npy_complex64 *b, int *ldb, int *info); +void BLAS_FUNC(cpstf2)(char *uplo, int *n, npy_complex64 *a, int *lda, int *piv, int *rank, float *tol, float *work, int *info); +void BLAS_FUNC(cpstrf)(char *uplo, int *n, npy_complex64 *a, int *lda, int *piv, int *rank, float *tol, float *work, int *info); +void BLAS_FUNC(cptcon)(int *n, float *d, npy_complex64 *e, float *anorm, float *rcond, float *rwork, int *info); +void BLAS_FUNC(cpteqr)(char *compz, int *n, float *d, float *e, npy_complex64 *z, int *ldz, float *work, int *info); +void BLAS_FUNC(cptrfs)(char *uplo, int *n, int *nrhs, float *d, npy_complex64 *e, float *df, npy_complex64 *ef, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(cptsv)(int *n, int *nrhs, float *d, npy_complex64 *e, npy_complex64 *b, int *ldb, int *info); +void BLAS_FUNC(cptsvx)(char *fact, int *n, int *nrhs, float *d, npy_complex64 *e, float *df, npy_complex64 *ef, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *rcond, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(cpttrf)(int *n, float *d, npy_complex64 *e, int *info); +void BLAS_FUNC(cpttrs)(char *uplo, int *n, int *nrhs, float *d, npy_complex64 *e, npy_complex64 *b, int *ldb, int *info); +void BLAS_FUNC(cptts2)(int *iuplo, int *n, int *nrhs, float *d, npy_complex64 *e, npy_complex64 *b, int *ldb); +void BLAS_FUNC(crot)(int *n, npy_complex64 *cx, int *incx, npy_complex64 *cy, int *incy, float *c, npy_complex64 *s); +void BLAS_FUNC(cspcon)(char *uplo, int *n, npy_complex64 *ap, int *ipiv, float *anorm, float *rcond, npy_complex64 *work, int *info); +void BLAS_FUNC(cspmv)(char *uplo, int *n, npy_complex64 *alpha, npy_complex64 *ap, npy_complex64 *x, int *incx, npy_complex64 *beta, npy_complex64 *y, int *incy); +void BLAS_FUNC(cspr)(char *uplo, int *n, npy_complex64 *alpha, npy_complex64 *x, int *incx, npy_complex64 *ap); +void BLAS_FUNC(csprfs)(char *uplo, int *n, int *nrhs, npy_complex64 *ap, npy_complex64 *afp, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(cspsv)(char *uplo, int *n, int *nrhs, npy_complex64 *ap, int *ipiv, npy_complex64 *b, int *ldb, int *info); +void BLAS_FUNC(cspsvx)(char *fact, char *uplo, int *n, int *nrhs, npy_complex64 *ap, npy_complex64 *afp, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *rcond, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(csptrf)(char *uplo, int *n, npy_complex64 *ap, int *ipiv, int *info); +void BLAS_FUNC(csptri)(char *uplo, int *n, npy_complex64 *ap, int *ipiv, npy_complex64 *work, int *info); +void BLAS_FUNC(csptrs)(char *uplo, int *n, int *nrhs, npy_complex64 *ap, int *ipiv, npy_complex64 *b, int *ldb, int *info); +void BLAS_FUNC(csrscl)(int *n, float *sa, npy_complex64 *sx, int *incx); +void BLAS_FUNC(cstedc)(char *compz, int *n, float *d, float *e, npy_complex64 *z, int *ldz, npy_complex64 *work, int *lwork, float *rwork, int *lrwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(cstegr)(char *jobz, char *range, int *n, float *d, float *e, float *vl, float *vu, int *il, int *iu, float *abstol, int *m, float *w, npy_complex64 *z, int *ldz, int *isuppz, float *work, int *lwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(cstein)(int *n, float *d, float *e, int *m, float *w, int *iblock, int *isplit, npy_complex64 *z, int *ldz, float *work, int *iwork, int *ifail, int *info); +void BLAS_FUNC(cstemr)(char *jobz, char *range, int *n, float *d, float *e, float *vl, float *vu, int *il, int *iu, int *m, float *w, npy_complex64 *z, int *ldz, int *nzc, int *isuppz, int *tryrac, float *work, int *lwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(csteqr)(char *compz, int *n, float *d, float *e, npy_complex64 *z, int *ldz, float *work, int *info); +void BLAS_FUNC(csycon)(char *uplo, int *n, npy_complex64 *a, int *lda, int *ipiv, float *anorm, float *rcond, npy_complex64 *work, int *info); +void BLAS_FUNC(csyconv)(char *uplo, char *way, int *n, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *work, int *info); +void BLAS_FUNC(csyequb)(char *uplo, int *n, npy_complex64 *a, int *lda, float *s, float *scond, float *amax, npy_complex64 *work, int *info); +void BLAS_FUNC(csymv)(char *uplo, int *n, npy_complex64 *alpha, npy_complex64 *a, int *lda, npy_complex64 *x, int *incx, npy_complex64 *beta, npy_complex64 *y, int *incy); +void BLAS_FUNC(csyr)(char *uplo, int *n, npy_complex64 *alpha, npy_complex64 *x, int *incx, npy_complex64 *a, int *lda); +void BLAS_FUNC(csyrfs)(char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *af, int *ldaf, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(csysv)(char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(csysvx)(char *fact, char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *af, int *ldaf, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *rcond, float *ferr, float *berr, npy_complex64 *work, int *lwork, float *rwork, int *info); +void BLAS_FUNC(csyswapr)(char *uplo, int *n, npy_complex64 *a, int *lda, int *i1, int *i2); +void BLAS_FUNC(csytf2)(char *uplo, int *n, npy_complex64 *a, int *lda, int *ipiv, int *info); +void BLAS_FUNC(csytrf)(char *uplo, int *n, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(csytri)(char *uplo, int *n, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *work, int *info); +void BLAS_FUNC(csytri2)(char *uplo, int *n, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(csytri2x)(char *uplo, int *n, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *work, int *nb, int *info); +void BLAS_FUNC(csytrs)(char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *b, int *ldb, int *info); +void BLAS_FUNC(csytrs2)(char *uplo, int *n, int *nrhs, npy_complex64 *a, int *lda, int *ipiv, npy_complex64 *b, int *ldb, npy_complex64 *work, int *info); +void BLAS_FUNC(ctbcon)(char *norm, char *uplo, char *diag, int *n, int *kd, npy_complex64 *ab, int *ldab, float *rcond, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(ctbrfs)(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, npy_complex64 *ab, int *ldab, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(ctbtrs)(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, npy_complex64 *ab, int *ldab, npy_complex64 *b, int *ldb, int *info); +void BLAS_FUNC(ctfsm)(char *transr, char *side, char *uplo, char *trans, char *diag, int *m, int *n, npy_complex64 *alpha, npy_complex64 *a, npy_complex64 *b, int *ldb); +void BLAS_FUNC(ctftri)(char *transr, char *uplo, char *diag, int *n, npy_complex64 *a, int *info); +void BLAS_FUNC(ctfttp)(char *transr, char *uplo, int *n, npy_complex64 *arf, npy_complex64 *ap, int *info); +void BLAS_FUNC(ctfttr)(char *transr, char *uplo, int *n, npy_complex64 *arf, npy_complex64 *a, int *lda, int *info); +void BLAS_FUNC(ctgevc)(char *side, char *howmny, int *select, int *n, npy_complex64 *s, int *lds, npy_complex64 *p, int *ldp, npy_complex64 *vl, int *ldvl, npy_complex64 *vr, int *ldvr, int *mm, int *m, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(ctgex2)(int *wantq, int *wantz, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *q, int *ldq, npy_complex64 *z, int *ldz, int *j1, int *info); +void BLAS_FUNC(ctgexc)(int *wantq, int *wantz, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *q, int *ldq, npy_complex64 *z, int *ldz, int *ifst, int *ilst, int *info); +void BLAS_FUNC(ctgsen)(int *ijob, int *wantq, int *wantz, int *select, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *alpha, npy_complex64 *beta, npy_complex64 *q, int *ldq, npy_complex64 *z, int *ldz, int *m, float *pl, float *pr, float *dif, npy_complex64 *work, int *lwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(ctgsja)(char *jobu, char *jobv, char *jobq, int *m, int *p, int *n, int *k, int *l, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, float *tola, float *tolb, float *alpha, float *beta, npy_complex64 *u, int *ldu, npy_complex64 *v, int *ldv, npy_complex64 *q, int *ldq, npy_complex64 *work, int *ncycle, int *info); +void BLAS_FUNC(ctgsna)(char *job, char *howmny, int *select, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *vl, int *ldvl, npy_complex64 *vr, int *ldvr, float *s, float *dif, int *mm, int *m, npy_complex64 *work, int *lwork, int *iwork, int *info); +void BLAS_FUNC(ctgsy2)(char *trans, int *ijob, int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *c, int *ldc, npy_complex64 *d, int *ldd, npy_complex64 *e, int *lde, npy_complex64 *f, int *ldf, float *scale, float *rdsum, float *rdscal, int *info); +void BLAS_FUNC(ctgsyl)(char *trans, int *ijob, int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *c, int *ldc, npy_complex64 *d, int *ldd, npy_complex64 *e, int *lde, npy_complex64 *f, int *ldf, float *scale, float *dif, npy_complex64 *work, int *lwork, int *iwork, int *info); +void BLAS_FUNC(ctpcon)(char *norm, char *uplo, char *diag, int *n, npy_complex64 *ap, float *rcond, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(ctpmqrt)(char *side, char *trans, int *m, int *n, int *k, int *l, int *nb, npy_complex64 *v, int *ldv, npy_complex64 *t, int *ldt, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *work, int *info); +void BLAS_FUNC(ctpqrt)(int *m, int *n, int *l, int *nb, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *t, int *ldt, npy_complex64 *work, int *info); +void BLAS_FUNC(ctpqrt2)(int *m, int *n, int *l, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *t, int *ldt, int *info); +void BLAS_FUNC(ctprfb)(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, npy_complex64 *v, int *ldv, npy_complex64 *t, int *ldt, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *work, int *ldwork); +void BLAS_FUNC(ctprfs)(char *uplo, char *trans, char *diag, int *n, int *nrhs, npy_complex64 *ap, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(ctptri)(char *uplo, char *diag, int *n, npy_complex64 *ap, int *info); +void BLAS_FUNC(ctptrs)(char *uplo, char *trans, char *diag, int *n, int *nrhs, npy_complex64 *ap, npy_complex64 *b, int *ldb, int *info); +void BLAS_FUNC(ctpttf)(char *transr, char *uplo, int *n, npy_complex64 *ap, npy_complex64 *arf, int *info); +void BLAS_FUNC(ctpttr)(char *uplo, int *n, npy_complex64 *ap, npy_complex64 *a, int *lda, int *info); +void BLAS_FUNC(ctrcon)(char *norm, char *uplo, char *diag, int *n, npy_complex64 *a, int *lda, float *rcond, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(ctrevc)(char *side, char *howmny, int *select, int *n, npy_complex64 *t, int *ldt, npy_complex64 *vl, int *ldvl, npy_complex64 *vr, int *ldvr, int *mm, int *m, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(ctrexc)(char *compq, int *n, npy_complex64 *t, int *ldt, npy_complex64 *q, int *ldq, int *ifst, int *ilst, int *info); +void BLAS_FUNC(ctrrfs)(char *uplo, char *trans, char *diag, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *x, int *ldx, float *ferr, float *berr, npy_complex64 *work, float *rwork, int *info); +void BLAS_FUNC(ctrsen)(char *job, char *compq, int *select, int *n, npy_complex64 *t, int *ldt, npy_complex64 *q, int *ldq, npy_complex64 *w, int *m, float *s, float *sep, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(ctrsna)(char *job, char *howmny, int *select, int *n, npy_complex64 *t, int *ldt, npy_complex64 *vl, int *ldvl, npy_complex64 *vr, int *ldvr, float *s, float *sep, int *mm, int *m, npy_complex64 *work, int *ldwork, float *rwork, int *info); +void BLAS_FUNC(ctrsyl)(char *trana, char *tranb, int *isgn, int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, npy_complex64 *c, int *ldc, float *scale, int *info); +void BLAS_FUNC(ctrti2)(char *uplo, char *diag, int *n, npy_complex64 *a, int *lda, int *info); +void BLAS_FUNC(ctrtri)(char *uplo, char *diag, int *n, npy_complex64 *a, int *lda, int *info); +void BLAS_FUNC(ctrtrs)(char *uplo, char *trans, char *diag, int *n, int *nrhs, npy_complex64 *a, int *lda, npy_complex64 *b, int *ldb, int *info); +void BLAS_FUNC(ctrttf)(char *transr, char *uplo, int *n, npy_complex64 *a, int *lda, npy_complex64 *arf, int *info); +void BLAS_FUNC(ctrttp)(char *uplo, int *n, npy_complex64 *a, int *lda, npy_complex64 *ap, int *info); +void BLAS_FUNC(ctzrzf)(int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(cunbdb)(char *trans, char *signs, int *m, int *p, int *q, npy_complex64 *x11, int *ldx11, npy_complex64 *x12, int *ldx12, npy_complex64 *x21, int *ldx21, npy_complex64 *x22, int *ldx22, float *theta, float *phi, npy_complex64 *taup1, npy_complex64 *taup2, npy_complex64 *tauq1, npy_complex64 *tauq2, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(cuncsd)(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, char *signs, int *m, int *p, int *q, npy_complex64 *x11, int *ldx11, npy_complex64 *x12, int *ldx12, npy_complex64 *x21, int *ldx21, npy_complex64 *x22, int *ldx22, float *theta, npy_complex64 *u1, int *ldu1, npy_complex64 *u2, int *ldu2, npy_complex64 *v1t, int *ldv1t, npy_complex64 *v2t, int *ldv2t, npy_complex64 *work, int *lwork, float *rwork, int *lrwork, int *iwork, int *info); +void BLAS_FUNC(cung2l)(int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *info); +void BLAS_FUNC(cung2r)(int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *info); +void BLAS_FUNC(cungbr)(char *vect, int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(cunghr)(int *n, int *ilo, int *ihi, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(cungl2)(int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *info); +void BLAS_FUNC(cunglq)(int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(cungql)(int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(cungqr)(int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(cungr2)(int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *info); +void BLAS_FUNC(cungrq)(int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(cungtr)(char *uplo, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(cunm2l)(char *side, char *trans, int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *info); +void BLAS_FUNC(cunm2r)(char *side, char *trans, int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *info); +void BLAS_FUNC(cunmbr)(char *vect, char *side, char *trans, int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(cunmhr)(char *side, char *trans, int *m, int *n, int *ilo, int *ihi, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(cunml2)(char *side, char *trans, int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *info); +void BLAS_FUNC(cunmlq)(char *side, char *trans, int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(cunmql)(char *side, char *trans, int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(cunmqr)(char *side, char *trans, int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(cunmr2)(char *side, char *trans, int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *info); +void BLAS_FUNC(cunmr3)(char *side, char *trans, int *m, int *n, int *k, int *l, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *info); +void BLAS_FUNC(cunmrq)(char *side, char *trans, int *m, int *n, int *k, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(cunmrz)(char *side, char *trans, int *m, int *n, int *k, int *l, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(cunmtr)(char *side, char *uplo, char *trans, int *m, int *n, npy_complex64 *a, int *lda, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *lwork, int *info); +void BLAS_FUNC(cupgtr)(char *uplo, int *n, npy_complex64 *ap, npy_complex64 *tau, npy_complex64 *q, int *ldq, npy_complex64 *work, int *info); +void BLAS_FUNC(cupmtr)(char *side, char *uplo, char *trans, int *m, int *n, npy_complex64 *ap, npy_complex64 *tau, npy_complex64 *c, int *ldc, npy_complex64 *work, int *info); +void BLAS_FUNC(dbbcsd)(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, int *m, int *p, int *q, double *theta, double *phi, double *u1, int *ldu1, double *u2, int *ldu2, double *v1t, int *ldv1t, double *v2t, int *ldv2t, double *b11d, double *b11e, double *b12d, double *b12e, double *b21d, double *b21e, double *b22d, double *b22e, double *work, int *lwork, int *info); +void BLAS_FUNC(dbdsdc)(char *uplo, char *compq, int *n, double *d, double *e, double *u, int *ldu, double *vt, int *ldvt, double *q, int *iq, double *work, int *iwork, int *info); +void BLAS_FUNC(dbdsqr)(char *uplo, int *n, int *ncvt, int *nru, int *ncc, double *d, double *e, double *vt, int *ldvt, double *u, int *ldu, double *c, int *ldc, double *work, int *info); +void BLAS_FUNC(ddisna)(char *job, int *m, int *n, double *d, double *sep, int *info); +void BLAS_FUNC(dgbbrd)(char *vect, int *m, int *n, int *ncc, int *kl, int *ku, double *ab, int *ldab, double *d, double *e, double *q, int *ldq, double *pt, int *ldpt, double *c, int *ldc, double *work, int *info); +void BLAS_FUNC(dgbcon)(char *norm, int *n, int *kl, int *ku, double *ab, int *ldab, int *ipiv, double *anorm, double *rcond, double *work, int *iwork, int *info); +void BLAS_FUNC(dgbequ)(int *m, int *n, int *kl, int *ku, double *ab, int *ldab, double *r, double *c, double *rowcnd, double *colcnd, double *amax, int *info); +void BLAS_FUNC(dgbequb)(int *m, int *n, int *kl, int *ku, double *ab, int *ldab, double *r, double *c, double *rowcnd, double *colcnd, double *amax, int *info); +void BLAS_FUNC(dgbrfs)(char *trans, int *n, int *kl, int *ku, int *nrhs, double *ab, int *ldab, double *afb, int *ldafb, int *ipiv, double *b, int *ldb, double *x, int *ldx, double *ferr, double *berr, double *work, int *iwork, int *info); +void BLAS_FUNC(dgbsv)(int *n, int *kl, int *ku, int *nrhs, double *ab, int *ldab, int *ipiv, double *b, int *ldb, int *info); +void BLAS_FUNC(dgbsvx)(char *fact, char *trans, int *n, int *kl, int *ku, int *nrhs, double *ab, int *ldab, double *afb, int *ldafb, int *ipiv, char *equed, double *r, double *c, double *b, int *ldb, double *x, int *ldx, double *rcond, double *ferr, double *berr, double *work, int *iwork, int *info); +void BLAS_FUNC(dgbtf2)(int *m, int *n, int *kl, int *ku, double *ab, int *ldab, int *ipiv, int *info); +void BLAS_FUNC(dgbtrf)(int *m, int *n, int *kl, int *ku, double *ab, int *ldab, int *ipiv, int *info); +void BLAS_FUNC(dgbtrs)(char *trans, int *n, int *kl, int *ku, int *nrhs, double *ab, int *ldab, int *ipiv, double *b, int *ldb, int *info); +void BLAS_FUNC(dgebak)(char *job, char *side, int *n, int *ilo, int *ihi, double *scale, int *m, double *v, int *ldv, int *info); +void BLAS_FUNC(dgebal)(char *job, int *n, double *a, int *lda, int *ilo, int *ihi, double *scale, int *info); +void BLAS_FUNC(dgebd2)(int *m, int *n, double *a, int *lda, double *d, double *e, double *tauq, double *taup, double *work, int *info); +void BLAS_FUNC(dgebrd)(int *m, int *n, double *a, int *lda, double *d, double *e, double *tauq, double *taup, double *work, int *lwork, int *info); +void BLAS_FUNC(dgecon)(char *norm, int *n, double *a, int *lda, double *anorm, double *rcond, double *work, int *iwork, int *info); +void BLAS_FUNC(dgeequ)(int *m, int *n, double *a, int *lda, double *r, double *c, double *rowcnd, double *colcnd, double *amax, int *info); +void BLAS_FUNC(dgeequb)(int *m, int *n, double *a, int *lda, double *r, double *c, double *rowcnd, double *colcnd, double *amax, int *info); +void BLAS_FUNC(dgees)(char *jobvs, char *sort, _dselect2 *select, int *n, double *a, int *lda, int *sdim, double *wr, double *wi, double *vs, int *ldvs, double *work, int *lwork, int *bwork, int *info); +void BLAS_FUNC(dgeesx)(char *jobvs, char *sort, _dselect2 *select, char *sense, int *n, double *a, int *lda, int *sdim, double *wr, double *wi, double *vs, int *ldvs, double *rconde, double *rcondv, double *work, int *lwork, int *iwork, int *liwork, int *bwork, int *info); +void BLAS_FUNC(dgeev)(char *jobvl, char *jobvr, int *n, double *a, int *lda, double *wr, double *wi, double *vl, int *ldvl, double *vr, int *ldvr, double *work, int *lwork, int *info); +void BLAS_FUNC(dgeevx)(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, double *a, int *lda, double *wr, double *wi, double *vl, int *ldvl, double *vr, int *ldvr, int *ilo, int *ihi, double *scale, double *abnrm, double *rconde, double *rcondv, double *work, int *lwork, int *iwork, int *info); +void BLAS_FUNC(dgehd2)(int *n, int *ilo, int *ihi, double *a, int *lda, double *tau, double *work, int *info); +void BLAS_FUNC(dgehrd)(int *n, int *ilo, int *ihi, double *a, int *lda, double *tau, double *work, int *lwork, int *info); +void BLAS_FUNC(dgejsv)(char *joba, char *jobu, char *jobv, char *jobr, char *jobt, char *jobp, int *m, int *n, double *a, int *lda, double *sva, double *u, int *ldu, double *v, int *ldv, double *work, int *lwork, int *iwork, int *info); +void BLAS_FUNC(dgelq2)(int *m, int *n, double *a, int *lda, double *tau, double *work, int *info); +void BLAS_FUNC(dgelqf)(int *m, int *n, double *a, int *lda, double *tau, double *work, int *lwork, int *info); +void BLAS_FUNC(dgels)(char *trans, int *m, int *n, int *nrhs, double *a, int *lda, double *b, int *ldb, double *work, int *lwork, int *info); +void BLAS_FUNC(dgelsd)(int *m, int *n, int *nrhs, double *a, int *lda, double *b, int *ldb, double *s, double *rcond, int *rank, double *work, int *lwork, int *iwork, int *info); +void BLAS_FUNC(dgelss)(int *m, int *n, int *nrhs, double *a, int *lda, double *b, int *ldb, double *s, double *rcond, int *rank, double *work, int *lwork, int *info); +void BLAS_FUNC(dgelsy)(int *m, int *n, int *nrhs, double *a, int *lda, double *b, int *ldb, int *jpvt, double *rcond, int *rank, double *work, int *lwork, int *info); +void BLAS_FUNC(dgemqrt)(char *side, char *trans, int *m, int *n, int *k, int *nb, double *v, int *ldv, double *t, int *ldt, double *c, int *ldc, double *work, int *info); +void BLAS_FUNC(dgeql2)(int *m, int *n, double *a, int *lda, double *tau, double *work, int *info); +void BLAS_FUNC(dgeqlf)(int *m, int *n, double *a, int *lda, double *tau, double *work, int *lwork, int *info); +void BLAS_FUNC(dgeqp3)(int *m, int *n, double *a, int *lda, int *jpvt, double *tau, double *work, int *lwork, int *info); +void BLAS_FUNC(dgeqr2)(int *m, int *n, double *a, int *lda, double *tau, double *work, int *info); +void BLAS_FUNC(dgeqr2p)(int *m, int *n, double *a, int *lda, double *tau, double *work, int *info); +void BLAS_FUNC(dgeqrf)(int *m, int *n, double *a, int *lda, double *tau, double *work, int *lwork, int *info); +void BLAS_FUNC(dgeqrfp)(int *m, int *n, double *a, int *lda, double *tau, double *work, int *lwork, int *info); +void BLAS_FUNC(dgeqrt)(int *m, int *n, int *nb, double *a, int *lda, double *t, int *ldt, double *work, int *info); +void BLAS_FUNC(dgeqrt2)(int *m, int *n, double *a, int *lda, double *t, int *ldt, int *info); +void BLAS_FUNC(dgeqrt3)(int *m, int *n, double *a, int *lda, double *t, int *ldt, int *info); +void BLAS_FUNC(dgerfs)(char *trans, int *n, int *nrhs, double *a, int *lda, double *af, int *ldaf, int *ipiv, double *b, int *ldb, double *x, int *ldx, double *ferr, double *berr, double *work, int *iwork, int *info); +void BLAS_FUNC(dgerq2)(int *m, int *n, double *a, int *lda, double *tau, double *work, int *info); +void BLAS_FUNC(dgerqf)(int *m, int *n, double *a, int *lda, double *tau, double *work, int *lwork, int *info); +void BLAS_FUNC(dgesc2)(int *n, double *a, int *lda, double *rhs, int *ipiv, int *jpiv, double *scale); +void BLAS_FUNC(dgesdd)(char *jobz, int *m, int *n, double *a, int *lda, double *s, double *u, int *ldu, double *vt, int *ldvt, double *work, int *lwork, int *iwork, int *info); +void BLAS_FUNC(dgesv)(int *n, int *nrhs, double *a, int *lda, int *ipiv, double *b, int *ldb, int *info); +void BLAS_FUNC(dgesvd)(char *jobu, char *jobvt, int *m, int *n, double *a, int *lda, double *s, double *u, int *ldu, double *vt, int *ldvt, double *work, int *lwork, int *info); +void BLAS_FUNC(dgesvj)(char *joba, char *jobu, char *jobv, int *m, int *n, double *a, int *lda, double *sva, int *mv, double *v, int *ldv, double *work, int *lwork, int *info); +void BLAS_FUNC(dgesvx)(char *fact, char *trans, int *n, int *nrhs, double *a, int *lda, double *af, int *ldaf, int *ipiv, char *equed, double *r, double *c, double *b, int *ldb, double *x, int *ldx, double *rcond, double *ferr, double *berr, double *work, int *iwork, int *info); +void BLAS_FUNC(dgetc2)(int *n, double *a, int *lda, int *ipiv, int *jpiv, int *info); +void BLAS_FUNC(dgetf2)(int *m, int *n, double *a, int *lda, int *ipiv, int *info); +void BLAS_FUNC(dgetrf)(int *m, int *n, double *a, int *lda, int *ipiv, int *info); +void BLAS_FUNC(dgetri)(int *n, double *a, int *lda, int *ipiv, double *work, int *lwork, int *info); +void BLAS_FUNC(dgetrs)(char *trans, int *n, int *nrhs, double *a, int *lda, int *ipiv, double *b, int *ldb, int *info); +void BLAS_FUNC(dggbak)(char *job, char *side, int *n, int *ilo, int *ihi, double *lscale, double *rscale, int *m, double *v, int *ldv, int *info); +void BLAS_FUNC(dggbal)(char *job, int *n, double *a, int *lda, double *b, int *ldb, int *ilo, int *ihi, double *lscale, double *rscale, double *work, int *info); +void BLAS_FUNC(dgges)(char *jobvsl, char *jobvsr, char *sort, _dselect3 *selctg, int *n, double *a, int *lda, double *b, int *ldb, int *sdim, double *alphar, double *alphai, double *beta, double *vsl, int *ldvsl, double *vsr, int *ldvsr, double *work, int *lwork, int *bwork, int *info); +void BLAS_FUNC(dggesx)(char *jobvsl, char *jobvsr, char *sort, _dselect3 *selctg, char *sense, int *n, double *a, int *lda, double *b, int *ldb, int *sdim, double *alphar, double *alphai, double *beta, double *vsl, int *ldvsl, double *vsr, int *ldvsr, double *rconde, double *rcondv, double *work, int *lwork, int *iwork, int *liwork, int *bwork, int *info); +void BLAS_FUNC(dggev)(char *jobvl, char *jobvr, int *n, double *a, int *lda, double *b, int *ldb, double *alphar, double *alphai, double *beta, double *vl, int *ldvl, double *vr, int *ldvr, double *work, int *lwork, int *info); +void BLAS_FUNC(dggevx)(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, double *a, int *lda, double *b, int *ldb, double *alphar, double *alphai, double *beta, double *vl, int *ldvl, double *vr, int *ldvr, int *ilo, int *ihi, double *lscale, double *rscale, double *abnrm, double *bbnrm, double *rconde, double *rcondv, double *work, int *lwork, int *iwork, int *bwork, int *info); +void BLAS_FUNC(dggglm)(int *n, int *m, int *p, double *a, int *lda, double *b, int *ldb, double *d, double *x, double *y, double *work, int *lwork, int *info); +void BLAS_FUNC(dgghrd)(char *compq, char *compz, int *n, int *ilo, int *ihi, double *a, int *lda, double *b, int *ldb, double *q, int *ldq, double *z, int *ldz, int *info); +void BLAS_FUNC(dgglse)(int *m, int *n, int *p, double *a, int *lda, double *b, int *ldb, double *c, double *d, double *x, double *work, int *lwork, int *info); +void BLAS_FUNC(dggqrf)(int *n, int *m, int *p, double *a, int *lda, double *taua, double *b, int *ldb, double *taub, double *work, int *lwork, int *info); +void BLAS_FUNC(dggrqf)(int *m, int *p, int *n, double *a, int *lda, double *taua, double *b, int *ldb, double *taub, double *work, int *lwork, int *info); +void BLAS_FUNC(dgsvj0)(char *jobv, int *m, int *n, double *a, int *lda, double *d, double *sva, int *mv, double *v, int *ldv, double *eps, double *sfmin, double *tol, int *nsweep, double *work, int *lwork, int *info); +void BLAS_FUNC(dgsvj1)(char *jobv, int *m, int *n, int *n1, double *a, int *lda, double *d, double *sva, int *mv, double *v, int *ldv, double *eps, double *sfmin, double *tol, int *nsweep, double *work, int *lwork, int *info); +void BLAS_FUNC(dgtcon)(char *norm, int *n, double *dl, double *d, double *du, double *du2, int *ipiv, double *anorm, double *rcond, double *work, int *iwork, int *info); +void BLAS_FUNC(dgtrfs)(char *trans, int *n, int *nrhs, double *dl, double *d, double *du, double *dlf, double *df, double *duf, double *du2, int *ipiv, double *b, int *ldb, double *x, int *ldx, double *ferr, double *berr, double *work, int *iwork, int *info); +void BLAS_FUNC(dgtsv)(int *n, int *nrhs, double *dl, double *d, double *du, double *b, int *ldb, int *info); +void BLAS_FUNC(dgtsvx)(char *fact, char *trans, int *n, int *nrhs, double *dl, double *d, double *du, double *dlf, double *df, double *duf, double *du2, int *ipiv, double *b, int *ldb, double *x, int *ldx, double *rcond, double *ferr, double *berr, double *work, int *iwork, int *info); +void BLAS_FUNC(dgttrf)(int *n, double *dl, double *d, double *du, double *du2, int *ipiv, int *info); +void BLAS_FUNC(dgttrs)(char *trans, int *n, int *nrhs, double *dl, double *d, double *du, double *du2, int *ipiv, double *b, int *ldb, int *info); +void BLAS_FUNC(dgtts2)(int *itrans, int *n, int *nrhs, double *dl, double *d, double *du, double *du2, int *ipiv, double *b, int *ldb); +void BLAS_FUNC(dhgeqz)(char *job, char *compq, char *compz, int *n, int *ilo, int *ihi, double *h, int *ldh, double *t, int *ldt, double *alphar, double *alphai, double *beta, double *q, int *ldq, double *z, int *ldz, double *work, int *lwork, int *info); +void BLAS_FUNC(dhsein)(char *side, char *eigsrc, char *initv, int *select, int *n, double *h, int *ldh, double *wr, double *wi, double *vl, int *ldvl, double *vr, int *ldvr, int *mm, int *m, double *work, int *ifaill, int *ifailr, int *info); +void BLAS_FUNC(dhseqr)(char *job, char *compz, int *n, int *ilo, int *ihi, double *h, int *ldh, double *wr, double *wi, double *z, int *ldz, double *work, int *lwork, int *info); +int BLAS_FUNC(disnan)(double *din); +void BLAS_FUNC(dlabad)(double *small, double *large); +void BLAS_FUNC(dlabrd)(int *m, int *n, int *nb, double *a, int *lda, double *d, double *e, double *tauq, double *taup, double *x, int *ldx, double *y, int *ldy); +void BLAS_FUNC(dlacn2)(int *n, double *v, double *x, int *isgn, double *est, int *kase, int *isave); +void BLAS_FUNC(dlacon)(int *n, double *v, double *x, int *isgn, double *est, int *kase); +void BLAS_FUNC(dlacpy)(char *uplo, int *m, int *n, double *a, int *lda, double *b, int *ldb); +void BLAS_FUNC(dladiv)(double *a, double *b, double *c, double *d, double *p, double *q); +void BLAS_FUNC(dlae2)(double *a, double *b, double *c, double *rt1, double *rt2); +void BLAS_FUNC(dlaebz)(int *ijob, int *nitmax, int *n, int *mmax, int *minp, int *nbmin, double *abstol, double *reltol, double *pivmin, double *d, double *e, double *e2, int *nval, double *ab, double *c, int *mout, int *nab, double *work, int *iwork, int *info); +void BLAS_FUNC(dlaed0)(int *icompq, int *qsiz, int *n, double *d, double *e, double *q, int *ldq, double *qstore, int *ldqs, double *work, int *iwork, int *info); +void BLAS_FUNC(dlaed1)(int *n, double *d, double *q, int *ldq, int *indxq, double *rho, int *cutpnt, double *work, int *iwork, int *info); +void BLAS_FUNC(dlaed2)(int *k, int *n, int *n1, double *d, double *q, int *ldq, int *indxq, double *rho, double *z, double *dlamda, double *w, double *q2, int *indx, int *indxc, int *indxp, int *coltyp, int *info); +void BLAS_FUNC(dlaed3)(int *k, int *n, int *n1, double *d, double *q, int *ldq, double *rho, double *dlamda, double *q2, int *indx, int *ctot, double *w, double *s, int *info); +void BLAS_FUNC(dlaed4)(int *n, int *i, double *d, double *z, double *delta, double *rho, double *dlam, int *info); +void BLAS_FUNC(dlaed5)(int *i, double *d, double *z, double *delta, double *rho, double *dlam); +void BLAS_FUNC(dlaed6)(int *kniter, int *orgati, double *rho, double *d, double *z, double *finit, double *tau, int *info); +void BLAS_FUNC(dlaed7)(int *icompq, int *n, int *qsiz, int *tlvls, int *curlvl, int *curpbm, double *d, double *q, int *ldq, int *indxq, double *rho, int *cutpnt, double *qstore, int *qptr, int *prmptr, int *perm, int *givptr, int *givcol, double *givnum, double *work, int *iwork, int *info); +void BLAS_FUNC(dlaed8)(int *icompq, int *k, int *n, int *qsiz, double *d, double *q, int *ldq, int *indxq, double *rho, int *cutpnt, double *z, double *dlamda, double *q2, int *ldq2, double *w, int *perm, int *givptr, int *givcol, double *givnum, int *indxp, int *indx, int *info); +void BLAS_FUNC(dlaed9)(int *k, int *kstart, int *kstop, int *n, double *d, double *q, int *ldq, double *rho, double *dlamda, double *w, double *s, int *lds, int *info); +void BLAS_FUNC(dlaeda)(int *n, int *tlvls, int *curlvl, int *curpbm, int *prmptr, int *perm, int *givptr, int *givcol, double *givnum, double *q, int *qptr, double *z, double *ztemp, int *info); +void BLAS_FUNC(dlaein)(int *rightv, int *noinit, int *n, double *h, int *ldh, double *wr, double *wi, double *vr, double *vi, double *b, int *ldb, double *work, double *eps3, double *smlnum, double *bignum, int *info); +void BLAS_FUNC(dlaev2)(double *a, double *b, double *c, double *rt1, double *rt2, double *cs1, double *sn1); +void BLAS_FUNC(dlaexc)(int *wantq, int *n, double *t, int *ldt, double *q, int *ldq, int *j1, int *n1, int *n2, double *work, int *info); +void BLAS_FUNC(dlag2)(double *a, int *lda, double *b, int *ldb, double *safmin, double *scale1, double *scale2, double *wr1, double *wr2, double *wi); +void BLAS_FUNC(dlag2s)(int *m, int *n, double *a, int *lda, float *sa, int *ldsa, int *info); +void BLAS_FUNC(dlags2)(int *upper, double *a1, double *a2, double *a3, double *b1, double *b2, double *b3, double *csu, double *snu, double *csv, double *snv, double *csq, double *snq); +void BLAS_FUNC(dlagtf)(int *n, double *a, double *lambda_, double *b, double *c, double *tol, double *d, int *in_, int *info); +void BLAS_FUNC(dlagtm)(char *trans, int *n, int *nrhs, double *alpha, double *dl, double *d, double *du, double *x, int *ldx, double *beta, double *b, int *ldb); +void BLAS_FUNC(dlagts)(int *job, int *n, double *a, double *b, double *c, double *d, int *in_, double *y, double *tol, int *info); +void BLAS_FUNC(dlagv2)(double *a, int *lda, double *b, int *ldb, double *alphar, double *alphai, double *beta, double *csl, double *snl, double *csr, double *snr); +void BLAS_FUNC(dlahqr)(int *wantt, int *wantz, int *n, int *ilo, int *ihi, double *h, int *ldh, double *wr, double *wi, int *iloz, int *ihiz, double *z, int *ldz, int *info); +void BLAS_FUNC(dlahr2)(int *n, int *k, int *nb, double *a, int *lda, double *tau, double *t, int *ldt, double *y, int *ldy); +void BLAS_FUNC(dlaic1)(int *job, int *j, double *x, double *sest, double *w, double *gamma, double *sestpr, double *s, double *c); +void BLAS_FUNC(dlaln2)(int *ltrans, int *na, int *nw, double *smin, double *ca, double *a, int *lda, double *d1, double *d2, double *b, int *ldb, double *wr, double *wi, double *x, int *ldx, double *scale, double *xnorm, int *info); +void BLAS_FUNC(dlals0)(int *icompq, int *nl, int *nr, int *sqre, int *nrhs, double *b, int *ldb, double *bx, int *ldbx, int *perm, int *givptr, int *givcol, int *ldgcol, double *givnum, int *ldgnum, double *poles, double *difl, double *difr, double *z, int *k, double *c, double *s, double *work, int *info); +void BLAS_FUNC(dlalsa)(int *icompq, int *smlsiz, int *n, int *nrhs, double *b, int *ldb, double *bx, int *ldbx, double *u, int *ldu, double *vt, int *k, double *difl, double *difr, double *z, double *poles, int *givptr, int *givcol, int *ldgcol, int *perm, double *givnum, double *c, double *s, double *work, int *iwork, int *info); +void BLAS_FUNC(dlalsd)(char *uplo, int *smlsiz, int *n, int *nrhs, double *d, double *e, double *b, int *ldb, double *rcond, int *rank, double *work, int *iwork, int *info); +double BLAS_FUNC(dlamch)(char *cmach); +void BLAS_FUNC(dlamrg)(int *n1, int *n2, double *a, int *dtrd1, int *dtrd2, int *index_bn); +int BLAS_FUNC(dlaneg)(int *n, double *d, double *lld, double *sigma, double *pivmin, int *r); +double BLAS_FUNC(dlangb)(char *norm, int *n, int *kl, int *ku, double *ab, int *ldab, double *work); +double BLAS_FUNC(dlange)(char *norm, int *m, int *n, double *a, int *lda, double *work); +double BLAS_FUNC(dlangt)(char *norm, int *n, double *dl, double *d_, double *du); +double BLAS_FUNC(dlanhs)(char *norm, int *n, double *a, int *lda, double *work); +double BLAS_FUNC(dlansb)(char *norm, char *uplo, int *n, int *k, double *ab, int *ldab, double *work); +double BLAS_FUNC(dlansf)(char *norm, char *transr, char *uplo, int *n, double *a, double *work); +double BLAS_FUNC(dlansp)(char *norm, char *uplo, int *n, double *ap, double *work); +double BLAS_FUNC(dlanst)(char *norm, int *n, double *d_, double *e); +double BLAS_FUNC(dlansy)(char *norm, char *uplo, int *n, double *a, int *lda, double *work); +double BLAS_FUNC(dlantb)(char *norm, char *uplo, char *diag, int *n, int *k, double *ab, int *ldab, double *work); +double BLAS_FUNC(dlantp)(char *norm, char *uplo, char *diag, int *n, double *ap, double *work); +double BLAS_FUNC(dlantr)(char *norm, char *uplo, char *diag, int *m, int *n, double *a, int *lda, double *work); +void BLAS_FUNC(dlanv2)(double *a, double *b, double *c, double *d, double *rt1r, double *rt1i, double *rt2r, double *rt2i, double *cs, double *sn); +void BLAS_FUNC(dlapll)(int *n, double *x, int *incx, double *y, int *incy, double *ssmin); +void BLAS_FUNC(dlapmr)(int *forwrd, int *m, int *n, double *x, int *ldx, int *k); +void BLAS_FUNC(dlapmt)(int *forwrd, int *m, int *n, double *x, int *ldx, int *k); +double BLAS_FUNC(dlapy2)(double *x, double *y); +double BLAS_FUNC(dlapy3)(double *x, double *y, double *z); +void BLAS_FUNC(dlaqgb)(int *m, int *n, int *kl, int *ku, double *ab, int *ldab, double *r, double *c, double *rowcnd, double *colcnd, double *amax, char *equed); +void BLAS_FUNC(dlaqge)(int *m, int *n, double *a, int *lda, double *r, double *c, double *rowcnd, double *colcnd, double *amax, char *equed); +void BLAS_FUNC(dlaqp2)(int *m, int *n, int *offset, double *a, int *lda, int *jpvt, double *tau, double *vn1, double *vn2, double *work); +void BLAS_FUNC(dlaqps)(int *m, int *n, int *offset, int *nb, int *kb, double *a, int *lda, int *jpvt, double *tau, double *vn1, double *vn2, double *auxv, double *f, int *ldf); +void BLAS_FUNC(dlaqr0)(int *wantt, int *wantz, int *n, int *ilo, int *ihi, double *h, int *ldh, double *wr, double *wi, int *iloz, int *ihiz, double *z, int *ldz, double *work, int *lwork, int *info); +void BLAS_FUNC(dlaqr1)(int *n, double *h, int *ldh, double *sr1, double *si1, double *sr2, double *si2, double *v); +void BLAS_FUNC(dlaqr2)(int *wantt, int *wantz, int *n, int *ktop, int *kbot, int *nw, double *h, int *ldh, int *iloz, int *ihiz, double *z, int *ldz, int *ns, int *nd, double *sr, double *si, double *v, int *ldv, int *nh, double *t, int *ldt, int *nv, double *wv, int *ldwv, double *work, int *lwork); +void BLAS_FUNC(dlaqr3)(int *wantt, int *wantz, int *n, int *ktop, int *kbot, int *nw, double *h, int *ldh, int *iloz, int *ihiz, double *z, int *ldz, int *ns, int *nd, double *sr, double *si, double *v, int *ldv, int *nh, double *t, int *ldt, int *nv, double *wv, int *ldwv, double *work, int *lwork); +void BLAS_FUNC(dlaqr4)(int *wantt, int *wantz, int *n, int *ilo, int *ihi, double *h, int *ldh, double *wr, double *wi, int *iloz, int *ihiz, double *z, int *ldz, double *work, int *lwork, int *info); +void BLAS_FUNC(dlaqr5)(int *wantt, int *wantz, int *kacc22, int *n, int *ktop, int *kbot, int *nshfts, double *sr, double *si, double *h, int *ldh, int *iloz, int *ihiz, double *z, int *ldz, double *v, int *ldv, double *u, int *ldu, int *nv, double *wv, int *ldwv, int *nh, double *wh, int *ldwh); +void BLAS_FUNC(dlaqsb)(char *uplo, int *n, int *kd, double *ab, int *ldab, double *s, double *scond, double *amax, char *equed); +void BLAS_FUNC(dlaqsp)(char *uplo, int *n, double *ap, double *s, double *scond, double *amax, char *equed); +void BLAS_FUNC(dlaqsy)(char *uplo, int *n, double *a, int *lda, double *s, double *scond, double *amax, char *equed); +void BLAS_FUNC(dlaqtr)(int *ltran, int *lreal, int *n, double *t, int *ldt, double *b, double *w, double *scale, double *x, double *work, int *info); +void BLAS_FUNC(dlar1v)(int *n, int *b1, int *bn, double *lambda_, double *d, double *l, double *ld, double *lld, double *pivmin, double *gaptol, double *z, int *wantnc, int *negcnt, double *ztz, double *mingma, int *r, int *isuppz, double *nrminv, double *resid, double *rqcorr, double *work); +void BLAS_FUNC(dlar2v)(int *n, double *x, double *y, double *z, int *incx, double *c, double *s, int *incc); +void BLAS_FUNC(dlarf)(char *side, int *m, int *n, double *v, int *incv, double *tau, double *c, int *ldc, double *work); +void BLAS_FUNC(dlarfb)(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, double *v, int *ldv, double *t, int *ldt, double *c, int *ldc, double *work, int *ldwork); +void BLAS_FUNC(dlarfg)(int *n, double *alpha, double *x, int *incx, double *tau); +void BLAS_FUNC(dlarfgp)(int *n, double *alpha, double *x, int *incx, double *tau); +void BLAS_FUNC(dlarft)(char *direct, char *storev, int *n, int *k, double *v, int *ldv, double *tau, double *t, int *ldt); +void BLAS_FUNC(dlarfx)(char *side, int *m, int *n, double *v, double *tau, double *c, int *ldc, double *work); +void BLAS_FUNC(dlargv)(int *n, double *x, int *incx, double *y, int *incy, double *c, int *incc); +void BLAS_FUNC(dlarnv)(int *idist, int *iseed, int *n, double *x); +void BLAS_FUNC(dlarra)(int *n, double *d, double *e, double *e2, double *spltol, double *tnrm, int *nsplit, int *isplit, int *info); +void BLAS_FUNC(dlarrb)(int *n, double *d, double *lld, int *ifirst, int *ilast, double *rtol1, double *rtol2, int *offset, double *w, double *wgap, double *werr, double *work, int *iwork, double *pivmin, double *spdiam, int *twist, int *info); +void BLAS_FUNC(dlarrc)(char *jobt, int *n, double *vl, double *vu, double *d, double *e, double *pivmin, int *eigcnt, int *lcnt, int *rcnt, int *info); +void BLAS_FUNC(dlarrd)(char *range, char *order, int *n, double *vl, double *vu, int *il, int *iu, double *gers, double *reltol, double *d, double *e, double *e2, double *pivmin, int *nsplit, int *isplit, int *m, double *w, double *werr, double *wl, double *wu, int *iblock, int *indexw, double *work, int *iwork, int *info); +void BLAS_FUNC(dlarre)(char *range, int *n, double *vl, double *vu, int *il, int *iu, double *d, double *e, double *e2, double *rtol1, double *rtol2, double *spltol, int *nsplit, int *isplit, int *m, double *w, double *werr, double *wgap, int *iblock, int *indexw, double *gers, double *pivmin, double *work, int *iwork, int *info); +void BLAS_FUNC(dlarrf)(int *n, double *d, double *l, double *ld, int *clstrt, int *clend, double *w, double *wgap, double *werr, double *spdiam, double *clgapl, double *clgapr, double *pivmin, double *sigma, double *dplus, double *lplus, double *work, int *info); +void BLAS_FUNC(dlarrj)(int *n, double *d, double *e2, int *ifirst, int *ilast, double *rtol, int *offset, double *w, double *werr, double *work, int *iwork, double *pivmin, double *spdiam, int *info); +void BLAS_FUNC(dlarrk)(int *n, int *iw, double *gl, double *gu, double *d, double *e2, double *pivmin, double *reltol, double *w, double *werr, int *info); +void BLAS_FUNC(dlarrr)(int *n, double *d, double *e, int *info); +void BLAS_FUNC(dlarrv)(int *n, double *vl, double *vu, double *d, double *l, double *pivmin, int *isplit, int *m, int *dol, int *dou, double *minrgp, double *rtol1, double *rtol2, double *w, double *werr, double *wgap, int *iblock, int *indexw, double *gers, double *z, int *ldz, int *isuppz, double *work, int *iwork, int *info); +void BLAS_FUNC(dlartg)(double *f, double *g, double *cs, double *sn, double *r); +void BLAS_FUNC(dlartgp)(double *f, double *g, double *cs, double *sn, double *r); +void BLAS_FUNC(dlartgs)(double *x, double *y, double *sigma, double *cs, double *sn); +void BLAS_FUNC(dlartv)(int *n, double *x, int *incx, double *y, int *incy, double *c, double *s, int *incc); +void BLAS_FUNC(dlaruv)(int *iseed, int *n, double *x); +void BLAS_FUNC(dlarz)(char *side, int *m, int *n, int *l, double *v, int *incv, double *tau, double *c, int *ldc, double *work); +void BLAS_FUNC(dlarzb)(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, double *v, int *ldv, double *t, int *ldt, double *c, int *ldc, double *work, int *ldwork); +void BLAS_FUNC(dlarzt)(char *direct, char *storev, int *n, int *k, double *v, int *ldv, double *tau, double *t, int *ldt); +void BLAS_FUNC(dlas2)(double *f, double *g, double *h, double *ssmin, double *ssmax); +void BLAS_FUNC(dlascl)(char *type_bn, int *kl, int *ku, double *cfrom, double *cto, int *m, int *n, double *a, int *lda, int *info); +void BLAS_FUNC(dlasd0)(int *n, int *sqre, double *d, double *e, double *u, int *ldu, double *vt, int *ldvt, int *smlsiz, int *iwork, double *work, int *info); +void BLAS_FUNC(dlasd1)(int *nl, int *nr, int *sqre, double *d, double *alpha, double *beta, double *u, int *ldu, double *vt, int *ldvt, int *idxq, int *iwork, double *work, int *info); +void BLAS_FUNC(dlasd2)(int *nl, int *nr, int *sqre, int *k, double *d, double *z, double *alpha, double *beta, double *u, int *ldu, double *vt, int *ldvt, double *dsigma, double *u2, int *ldu2, double *vt2, int *ldvt2, int *idxp, int *idx, int *idxc, int *idxq, int *coltyp, int *info); +void BLAS_FUNC(dlasd3)(int *nl, int *nr, int *sqre, int *k, double *d, double *q, int *ldq, double *dsigma, double *u, int *ldu, double *u2, int *ldu2, double *vt, int *ldvt, double *vt2, int *ldvt2, int *idxc, int *ctot, double *z, int *info); +void BLAS_FUNC(dlasd4)(int *n, int *i, double *d, double *z, double *delta, double *rho, double *sigma, double *work, int *info); +void BLAS_FUNC(dlasd5)(int *i, double *d, double *z, double *delta, double *rho, double *dsigma, double *work); +void BLAS_FUNC(dlasd6)(int *icompq, int *nl, int *nr, int *sqre, double *d, double *vf, double *vl, double *alpha, double *beta, int *idxq, int *perm, int *givptr, int *givcol, int *ldgcol, double *givnum, int *ldgnum, double *poles, double *difl, double *difr, double *z, int *k, double *c, double *s, double *work, int *iwork, int *info); +void BLAS_FUNC(dlasd7)(int *icompq, int *nl, int *nr, int *sqre, int *k, double *d, double *z, double *zw, double *vf, double *vfw, double *vl, double *vlw, double *alpha, double *beta, double *dsigma, int *idx, int *idxp, int *idxq, int *perm, int *givptr, int *givcol, int *ldgcol, double *givnum, int *ldgnum, double *c, double *s, int *info); +void BLAS_FUNC(dlasd8)(int *icompq, int *k, double *d, double *z, double *vf, double *vl, double *difl, double *difr, int *lddifr, double *dsigma, double *work, int *info); +void BLAS_FUNC(dlasda)(int *icompq, int *smlsiz, int *n, int *sqre, double *d, double *e, double *u, int *ldu, double *vt, int *k, double *difl, double *difr, double *z, double *poles, int *givptr, int *givcol, int *ldgcol, int *perm, double *givnum, double *c, double *s, double *work, int *iwork, int *info); +void BLAS_FUNC(dlasdq)(char *uplo, int *sqre, int *n, int *ncvt, int *nru, int *ncc, double *d, double *e, double *vt, int *ldvt, double *u, int *ldu, double *c, int *ldc, double *work, int *info); +void BLAS_FUNC(dlasdt)(int *n, int *lvl, int *nd, int *inode, int *ndiml, int *ndimr, int *msub); +void BLAS_FUNC(dlaset)(char *uplo, int *m, int *n, double *alpha, double *beta, double *a, int *lda); +void BLAS_FUNC(dlasq1)(int *n, double *d, double *e, double *work, int *info); +void BLAS_FUNC(dlasq2)(int *n, double *z, int *info); +void BLAS_FUNC(dlasq3)(int *i0, int *n0, double *z, int *pp, double *dmin, double *sigma, double *desig, double *qmax, int *nfail, int *iter, int *ndiv, int *ieee, int *ttype, double *dmin1, double *dmin2, double *dn, double *dn1, double *dn2, double *g, double *tau); +void BLAS_FUNC(dlasq4)(int *i0, int *n0, double *z, int *pp, int *n0in, double *dmin, double *dmin1, double *dmin2, double *dn, double *dn1, double *dn2, double *tau, int *ttype, double *g); +void BLAS_FUNC(dlasq6)(int *i0, int *n0, double *z, int *pp, double *dmin, double *dmin1, double *dmin2, double *dn, double *dnm1, double *dnm2); +void BLAS_FUNC(dlasr)(char *side, char *pivot, char *direct, int *m, int *n, double *c, double *s, double *a, int *lda); +void BLAS_FUNC(dlasrt)(char *id, int *n, double *d, int *info); +void BLAS_FUNC(dlassq)(int *n, double *x, int *incx, double *scale, double *sumsq); +void BLAS_FUNC(dlasv2)(double *f, double *g, double *h, double *ssmin, double *ssmax, double *snr, double *csr, double *snl, double *csl); +void BLAS_FUNC(dlaswp)(int *n, double *a, int *lda, int *k1, int *k2, int *ipiv, int *incx); +void BLAS_FUNC(dlasy2)(int *ltranl, int *ltranr, int *isgn, int *n1, int *n2, double *tl, int *ldtl, double *tr, int *ldtr, double *b, int *ldb, double *scale, double *x, int *ldx, double *xnorm, int *info); +void BLAS_FUNC(dlasyf)(char *uplo, int *n, int *nb, int *kb, double *a, int *lda, int *ipiv, double *w, int *ldw, int *info); +void BLAS_FUNC(dlat2s)(char *uplo, int *n, double *a, int *lda, float *sa, int *ldsa, int *info); +void BLAS_FUNC(dlatbs)(char *uplo, char *trans, char *diag, char *normin, int *n, int *kd, double *ab, int *ldab, double *x, double *scale, double *cnorm, int *info); +void BLAS_FUNC(dlatdf)(int *ijob, int *n, double *z, int *ldz, double *rhs, double *rdsum, double *rdscal, int *ipiv, int *jpiv); +void BLAS_FUNC(dlatps)(char *uplo, char *trans, char *diag, char *normin, int *n, double *ap, double *x, double *scale, double *cnorm, int *info); +void BLAS_FUNC(dlatrd)(char *uplo, int *n, int *nb, double *a, int *lda, double *e, double *tau, double *w, int *ldw); +void BLAS_FUNC(dlatrs)(char *uplo, char *trans, char *diag, char *normin, int *n, double *a, int *lda, double *x, double *scale, double *cnorm, int *info); +void BLAS_FUNC(dlatrz)(int *m, int *n, int *l, double *a, int *lda, double *tau, double *work); +void BLAS_FUNC(dlauu2)(char *uplo, int *n, double *a, int *lda, int *info); +void BLAS_FUNC(dlauum)(char *uplo, int *n, double *a, int *lda, int *info); +void BLAS_FUNC(dopgtr)(char *uplo, int *n, double *ap, double *tau, double *q, int *ldq, double *work, int *info); +void BLAS_FUNC(dopmtr)(char *side, char *uplo, char *trans, int *m, int *n, double *ap, double *tau, double *c, int *ldc, double *work, int *info); +void BLAS_FUNC(dorbdb)(char *trans, char *signs, int *m, int *p, int *q, double *x11, int *ldx11, double *x12, int *ldx12, double *x21, int *ldx21, double *x22, int *ldx22, double *theta, double *phi, double *taup1, double *taup2, double *tauq1, double *tauq2, double *work, int *lwork, int *info); +void BLAS_FUNC(dorcsd)(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, char *signs, int *m, int *p, int *q, double *x11, int *ldx11, double *x12, int *ldx12, double *x21, int *ldx21, double *x22, int *ldx22, double *theta, double *u1, int *ldu1, double *u2, int *ldu2, double *v1t, int *ldv1t, double *v2t, int *ldv2t, double *work, int *lwork, int *iwork, int *info); +void BLAS_FUNC(dorg2l)(int *m, int *n, int *k, double *a, int *lda, double *tau, double *work, int *info); +void BLAS_FUNC(dorg2r)(int *m, int *n, int *k, double *a, int *lda, double *tau, double *work, int *info); +void BLAS_FUNC(dorgbr)(char *vect, int *m, int *n, int *k, double *a, int *lda, double *tau, double *work, int *lwork, int *info); +void BLAS_FUNC(dorghr)(int *n, int *ilo, int *ihi, double *a, int *lda, double *tau, double *work, int *lwork, int *info); +void BLAS_FUNC(dorgl2)(int *m, int *n, int *k, double *a, int *lda, double *tau, double *work, int *info); +void BLAS_FUNC(dorglq)(int *m, int *n, int *k, double *a, int *lda, double *tau, double *work, int *lwork, int *info); +void BLAS_FUNC(dorgql)(int *m, int *n, int *k, double *a, int *lda, double *tau, double *work, int *lwork, int *info); +void BLAS_FUNC(dorgqr)(int *m, int *n, int *k, double *a, int *lda, double *tau, double *work, int *lwork, int *info); +void BLAS_FUNC(dorgr2)(int *m, int *n, int *k, double *a, int *lda, double *tau, double *work, int *info); +void BLAS_FUNC(dorgrq)(int *m, int *n, int *k, double *a, int *lda, double *tau, double *work, int *lwork, int *info); +void BLAS_FUNC(dorgtr)(char *uplo, int *n, double *a, int *lda, double *tau, double *work, int *lwork, int *info); +void BLAS_FUNC(dorm2l)(char *side, char *trans, int *m, int *n, int *k, double *a, int *lda, double *tau, double *c, int *ldc, double *work, int *info); +void BLAS_FUNC(dorm2r)(char *side, char *trans, int *m, int *n, int *k, double *a, int *lda, double *tau, double *c, int *ldc, double *work, int *info); +void BLAS_FUNC(dormbr)(char *vect, char *side, char *trans, int *m, int *n, int *k, double *a, int *lda, double *tau, double *c, int *ldc, double *work, int *lwork, int *info); +void BLAS_FUNC(dormhr)(char *side, char *trans, int *m, int *n, int *ilo, int *ihi, double *a, int *lda, double *tau, double *c, int *ldc, double *work, int *lwork, int *info); +void BLAS_FUNC(dorml2)(char *side, char *trans, int *m, int *n, int *k, double *a, int *lda, double *tau, double *c, int *ldc, double *work, int *info); +void BLAS_FUNC(dormlq)(char *side, char *trans, int *m, int *n, int *k, double *a, int *lda, double *tau, double *c, int *ldc, double *work, int *lwork, int *info); +void BLAS_FUNC(dormql)(char *side, char *trans, int *m, int *n, int *k, double *a, int *lda, double *tau, double *c, int *ldc, double *work, int *lwork, int *info); +void BLAS_FUNC(dormqr)(char *side, char *trans, int *m, int *n, int *k, double *a, int *lda, double *tau, double *c, int *ldc, double *work, int *lwork, int *info); +void BLAS_FUNC(dormr2)(char *side, char *trans, int *m, int *n, int *k, double *a, int *lda, double *tau, double *c, int *ldc, double *work, int *info); +void BLAS_FUNC(dormr3)(char *side, char *trans, int *m, int *n, int *k, int *l, double *a, int *lda, double *tau, double *c, int *ldc, double *work, int *info); +void BLAS_FUNC(dormrq)(char *side, char *trans, int *m, int *n, int *k, double *a, int *lda, double *tau, double *c, int *ldc, double *work, int *lwork, int *info); +void BLAS_FUNC(dormrz)(char *side, char *trans, int *m, int *n, int *k, int *l, double *a, int *lda, double *tau, double *c, int *ldc, double *work, int *lwork, int *info); +void BLAS_FUNC(dormtr)(char *side, char *uplo, char *trans, int *m, int *n, double *a, int *lda, double *tau, double *c, int *ldc, double *work, int *lwork, int *info); +void BLAS_FUNC(dpbcon)(char *uplo, int *n, int *kd, double *ab, int *ldab, double *anorm, double *rcond, double *work, int *iwork, int *info); +void BLAS_FUNC(dpbequ)(char *uplo, int *n, int *kd, double *ab, int *ldab, double *s, double *scond, double *amax, int *info); +void BLAS_FUNC(dpbrfs)(char *uplo, int *n, int *kd, int *nrhs, double *ab, int *ldab, double *afb, int *ldafb, double *b, int *ldb, double *x, int *ldx, double *ferr, double *berr, double *work, int *iwork, int *info); +void BLAS_FUNC(dpbstf)(char *uplo, int *n, int *kd, double *ab, int *ldab, int *info); +void BLAS_FUNC(dpbsv)(char *uplo, int *n, int *kd, int *nrhs, double *ab, int *ldab, double *b, int *ldb, int *info); +void BLAS_FUNC(dpbsvx)(char *fact, char *uplo, int *n, int *kd, int *nrhs, double *ab, int *ldab, double *afb, int *ldafb, char *equed, double *s, double *b, int *ldb, double *x, int *ldx, double *rcond, double *ferr, double *berr, double *work, int *iwork, int *info); +void BLAS_FUNC(dpbtf2)(char *uplo, int *n, int *kd, double *ab, int *ldab, int *info); +void BLAS_FUNC(dpbtrf)(char *uplo, int *n, int *kd, double *ab, int *ldab, int *info); +void BLAS_FUNC(dpbtrs)(char *uplo, int *n, int *kd, int *nrhs, double *ab, int *ldab, double *b, int *ldb, int *info); +void BLAS_FUNC(dpftrf)(char *transr, char *uplo, int *n, double *a, int *info); +void BLAS_FUNC(dpftri)(char *transr, char *uplo, int *n, double *a, int *info); +void BLAS_FUNC(dpftrs)(char *transr, char *uplo, int *n, int *nrhs, double *a, double *b, int *ldb, int *info); +void BLAS_FUNC(dpocon)(char *uplo, int *n, double *a, int *lda, double *anorm, double *rcond, double *work, int *iwork, int *info); +void BLAS_FUNC(dpoequ)(int *n, double *a, int *lda, double *s, double *scond, double *amax, int *info); +void BLAS_FUNC(dpoequb)(int *n, double *a, int *lda, double *s, double *scond, double *amax, int *info); +void BLAS_FUNC(dporfs)(char *uplo, int *n, int *nrhs, double *a, int *lda, double *af, int *ldaf, double *b, int *ldb, double *x, int *ldx, double *ferr, double *berr, double *work, int *iwork, int *info); +void BLAS_FUNC(dposv)(char *uplo, int *n, int *nrhs, double *a, int *lda, double *b, int *ldb, int *info); +void BLAS_FUNC(dposvx)(char *fact, char *uplo, int *n, int *nrhs, double *a, int *lda, double *af, int *ldaf, char *equed, double *s, double *b, int *ldb, double *x, int *ldx, double *rcond, double *ferr, double *berr, double *work, int *iwork, int *info); +void BLAS_FUNC(dpotf2)(char *uplo, int *n, double *a, int *lda, int *info); +void BLAS_FUNC(dpotrf)(char *uplo, int *n, double *a, int *lda, int *info); +void BLAS_FUNC(dpotri)(char *uplo, int *n, double *a, int *lda, int *info); +void BLAS_FUNC(dpotrs)(char *uplo, int *n, int *nrhs, double *a, int *lda, double *b, int *ldb, int *info); +void BLAS_FUNC(dppcon)(char *uplo, int *n, double *ap, double *anorm, double *rcond, double *work, int *iwork, int *info); +void BLAS_FUNC(dppequ)(char *uplo, int *n, double *ap, double *s, double *scond, double *amax, int *info); +void BLAS_FUNC(dpprfs)(char *uplo, int *n, int *nrhs, double *ap, double *afp, double *b, int *ldb, double *x, int *ldx, double *ferr, double *berr, double *work, int *iwork, int *info); +void BLAS_FUNC(dppsv)(char *uplo, int *n, int *nrhs, double *ap, double *b, int *ldb, int *info); +void BLAS_FUNC(dppsvx)(char *fact, char *uplo, int *n, int *nrhs, double *ap, double *afp, char *equed, double *s, double *b, int *ldb, double *x, int *ldx, double *rcond, double *ferr, double *berr, double *work, int *iwork, int *info); +void BLAS_FUNC(dpptrf)(char *uplo, int *n, double *ap, int *info); +void BLAS_FUNC(dpptri)(char *uplo, int *n, double *ap, int *info); +void BLAS_FUNC(dpptrs)(char *uplo, int *n, int *nrhs, double *ap, double *b, int *ldb, int *info); +void BLAS_FUNC(dpstf2)(char *uplo, int *n, double *a, int *lda, int *piv, int *rank, double *tol, double *work, int *info); +void BLAS_FUNC(dpstrf)(char *uplo, int *n, double *a, int *lda, int *piv, int *rank, double *tol, double *work, int *info); +void BLAS_FUNC(dptcon)(int *n, double *d, double *e, double *anorm, double *rcond, double *work, int *info); +void BLAS_FUNC(dpteqr)(char *compz, int *n, double *d, double *e, double *z, int *ldz, double *work, int *info); +void BLAS_FUNC(dptrfs)(int *n, int *nrhs, double *d, double *e, double *df, double *ef, double *b, int *ldb, double *x, int *ldx, double *ferr, double *berr, double *work, int *info); +void BLAS_FUNC(dptsv)(int *n, int *nrhs, double *d, double *e, double *b, int *ldb, int *info); +void BLAS_FUNC(dptsvx)(char *fact, int *n, int *nrhs, double *d, double *e, double *df, double *ef, double *b, int *ldb, double *x, int *ldx, double *rcond, double *ferr, double *berr, double *work, int *info); +void BLAS_FUNC(dpttrf)(int *n, double *d, double *e, int *info); +void BLAS_FUNC(dpttrs)(int *n, int *nrhs, double *d, double *e, double *b, int *ldb, int *info); +void BLAS_FUNC(dptts2)(int *n, int *nrhs, double *d, double *e, double *b, int *ldb); +void BLAS_FUNC(drscl)(int *n, double *sa, double *sx, int *incx); +void BLAS_FUNC(dsbev)(char *jobz, char *uplo, int *n, int *kd, double *ab, int *ldab, double *w, double *z, int *ldz, double *work, int *info); +void BLAS_FUNC(dsbevd)(char *jobz, char *uplo, int *n, int *kd, double *ab, int *ldab, double *w, double *z, int *ldz, double *work, int *lwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(dsbevx)(char *jobz, char *range, char *uplo, int *n, int *kd, double *ab, int *ldab, double *q, int *ldq, double *vl, double *vu, int *il, int *iu, double *abstol, int *m, double *w, double *z, int *ldz, double *work, int *iwork, int *ifail, int *info); +void BLAS_FUNC(dsbgst)(char *vect, char *uplo, int *n, int *ka, int *kb, double *ab, int *ldab, double *bb, int *ldbb, double *x, int *ldx, double *work, int *info); +void BLAS_FUNC(dsbgv)(char *jobz, char *uplo, int *n, int *ka, int *kb, double *ab, int *ldab, double *bb, int *ldbb, double *w, double *z, int *ldz, double *work, int *info); +void BLAS_FUNC(dsbgvd)(char *jobz, char *uplo, int *n, int *ka, int *kb, double *ab, int *ldab, double *bb, int *ldbb, double *w, double *z, int *ldz, double *work, int *lwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(dsbgvx)(char *jobz, char *range, char *uplo, int *n, int *ka, int *kb, double *ab, int *ldab, double *bb, int *ldbb, double *q, int *ldq, double *vl, double *vu, int *il, int *iu, double *abstol, int *m, double *w, double *z, int *ldz, double *work, int *iwork, int *ifail, int *info); +void BLAS_FUNC(dsbtrd)(char *vect, char *uplo, int *n, int *kd, double *ab, int *ldab, double *d, double *e, double *q, int *ldq, double *work, int *info); +void BLAS_FUNC(dsfrk)(char *transr, char *uplo, char *trans, int *n, int *k, double *alpha, double *a, int *lda, double *beta, double *c); +void BLAS_FUNC(dsgesv)(int *n, int *nrhs, double *a, int *lda, int *ipiv, double *b, int *ldb, double *x, int *ldx, double *work, float *swork, int *iter, int *info); +void BLAS_FUNC(dspcon)(char *uplo, int *n, double *ap, int *ipiv, double *anorm, double *rcond, double *work, int *iwork, int *info); +void BLAS_FUNC(dspev)(char *jobz, char *uplo, int *n, double *ap, double *w, double *z, int *ldz, double *work, int *info); +void BLAS_FUNC(dspevd)(char *jobz, char *uplo, int *n, double *ap, double *w, double *z, int *ldz, double *work, int *lwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(dspevx)(char *jobz, char *range, char *uplo, int *n, double *ap, double *vl, double *vu, int *il, int *iu, double *abstol, int *m, double *w, double *z, int *ldz, double *work, int *iwork, int *ifail, int *info); +void BLAS_FUNC(dspgst)(int *itype, char *uplo, int *n, double *ap, double *bp, int *info); +void BLAS_FUNC(dspgv)(int *itype, char *jobz, char *uplo, int *n, double *ap, double *bp, double *w, double *z, int *ldz, double *work, int *info); +void BLAS_FUNC(dspgvd)(int *itype, char *jobz, char *uplo, int *n, double *ap, double *bp, double *w, double *z, int *ldz, double *work, int *lwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(dspgvx)(int *itype, char *jobz, char *range, char *uplo, int *n, double *ap, double *bp, double *vl, double *vu, int *il, int *iu, double *abstol, int *m, double *w, double *z, int *ldz, double *work, int *iwork, int *ifail, int *info); +void BLAS_FUNC(dsposv)(char *uplo, int *n, int *nrhs, double *a, int *lda, double *b, int *ldb, double *x, int *ldx, double *work, float *swork, int *iter, int *info); +void BLAS_FUNC(dsprfs)(char *uplo, int *n, int *nrhs, double *ap, double *afp, int *ipiv, double *b, int *ldb, double *x, int *ldx, double *ferr, double *berr, double *work, int *iwork, int *info); +void BLAS_FUNC(dspsv)(char *uplo, int *n, int *nrhs, double *ap, int *ipiv, double *b, int *ldb, int *info); +void BLAS_FUNC(dspsvx)(char *fact, char *uplo, int *n, int *nrhs, double *ap, double *afp, int *ipiv, double *b, int *ldb, double *x, int *ldx, double *rcond, double *ferr, double *berr, double *work, int *iwork, int *info); +void BLAS_FUNC(dsptrd)(char *uplo, int *n, double *ap, double *d, double *e, double *tau, int *info); +void BLAS_FUNC(dsptrf)(char *uplo, int *n, double *ap, int *ipiv, int *info); +void BLAS_FUNC(dsptri)(char *uplo, int *n, double *ap, int *ipiv, double *work, int *info); +void BLAS_FUNC(dsptrs)(char *uplo, int *n, int *nrhs, double *ap, int *ipiv, double *b, int *ldb, int *info); +void BLAS_FUNC(dstebz)(char *range, char *order, int *n, double *vl, double *vu, int *il, int *iu, double *abstol, double *d, double *e, int *m, int *nsplit, double *w, int *iblock, int *isplit, double *work, int *iwork, int *info); +void BLAS_FUNC(dstedc)(char *compz, int *n, double *d, double *e, double *z, int *ldz, double *work, int *lwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(dstegr)(char *jobz, char *range, int *n, double *d, double *e, double *vl, double *vu, int *il, int *iu, double *abstol, int *m, double *w, double *z, int *ldz, int *isuppz, double *work, int *lwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(dstein)(int *n, double *d, double *e, int *m, double *w, int *iblock, int *isplit, double *z, int *ldz, double *work, int *iwork, int *ifail, int *info); +void BLAS_FUNC(dstemr)(char *jobz, char *range, int *n, double *d, double *e, double *vl, double *vu, int *il, int *iu, int *m, double *w, double *z, int *ldz, int *nzc, int *isuppz, int *tryrac, double *work, int *lwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(dsteqr)(char *compz, int *n, double *d, double *e, double *z, int *ldz, double *work, int *info); +void BLAS_FUNC(dsterf)(int *n, double *d, double *e, int *info); +void BLAS_FUNC(dstev)(char *jobz, int *n, double *d, double *e, double *z, int *ldz, double *work, int *info); +void BLAS_FUNC(dstevd)(char *jobz, int *n, double *d, double *e, double *z, int *ldz, double *work, int *lwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(dstevr)(char *jobz, char *range, int *n, double *d, double *e, double *vl, double *vu, int *il, int *iu, double *abstol, int *m, double *w, double *z, int *ldz, int *isuppz, double *work, int *lwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(dstevx)(char *jobz, char *range, int *n, double *d, double *e, double *vl, double *vu, int *il, int *iu, double *abstol, int *m, double *w, double *z, int *ldz, double *work, int *iwork, int *ifail, int *info); +void BLAS_FUNC(dsycon)(char *uplo, int *n, double *a, int *lda, int *ipiv, double *anorm, double *rcond, double *work, int *iwork, int *info); +void BLAS_FUNC(dsyconv)(char *uplo, char *way, int *n, double *a, int *lda, int *ipiv, double *work, int *info); +void BLAS_FUNC(dsyequb)(char *uplo, int *n, double *a, int *lda, double *s, double *scond, double *amax, double *work, int *info); +void BLAS_FUNC(dsyev)(char *jobz, char *uplo, int *n, double *a, int *lda, double *w, double *work, int *lwork, int *info); +void BLAS_FUNC(dsyevd)(char *jobz, char *uplo, int *n, double *a, int *lda, double *w, double *work, int *lwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(dsyevr)(char *jobz, char *range, char *uplo, int *n, double *a, int *lda, double *vl, double *vu, int *il, int *iu, double *abstol, int *m, double *w, double *z, int *ldz, int *isuppz, double *work, int *lwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(dsyevx)(char *jobz, char *range, char *uplo, int *n, double *a, int *lda, double *vl, double *vu, int *il, int *iu, double *abstol, int *m, double *w, double *z, int *ldz, double *work, int *lwork, int *iwork, int *ifail, int *info); +void BLAS_FUNC(dsygs2)(int *itype, char *uplo, int *n, double *a, int *lda, double *b, int *ldb, int *info); +void BLAS_FUNC(dsygst)(int *itype, char *uplo, int *n, double *a, int *lda, double *b, int *ldb, int *info); +void BLAS_FUNC(dsygv)(int *itype, char *jobz, char *uplo, int *n, double *a, int *lda, double *b, int *ldb, double *w, double *work, int *lwork, int *info); +void BLAS_FUNC(dsygvd)(int *itype, char *jobz, char *uplo, int *n, double *a, int *lda, double *b, int *ldb, double *w, double *work, int *lwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(dsygvx)(int *itype, char *jobz, char *range, char *uplo, int *n, double *a, int *lda, double *b, int *ldb, double *vl, double *vu, int *il, int *iu, double *abstol, int *m, double *w, double *z, int *ldz, double *work, int *lwork, int *iwork, int *ifail, int *info); +void BLAS_FUNC(dsyrfs)(char *uplo, int *n, int *nrhs, double *a, int *lda, double *af, int *ldaf, int *ipiv, double *b, int *ldb, double *x, int *ldx, double *ferr, double *berr, double *work, int *iwork, int *info); +void BLAS_FUNC(dsysv)(char *uplo, int *n, int *nrhs, double *a, int *lda, int *ipiv, double *b, int *ldb, double *work, int *lwork, int *info); +void BLAS_FUNC(dsysvx)(char *fact, char *uplo, int *n, int *nrhs, double *a, int *lda, double *af, int *ldaf, int *ipiv, double *b, int *ldb, double *x, int *ldx, double *rcond, double *ferr, double *berr, double *work, int *lwork, int *iwork, int *info); +void BLAS_FUNC(dsyswapr)(char *uplo, int *n, double *a, int *lda, int *i1, int *i2); +void BLAS_FUNC(dsytd2)(char *uplo, int *n, double *a, int *lda, double *d, double *e, double *tau, int *info); +void BLAS_FUNC(dsytf2)(char *uplo, int *n, double *a, int *lda, int *ipiv, int *info); +void BLAS_FUNC(dsytrd)(char *uplo, int *n, double *a, int *lda, double *d, double *e, double *tau, double *work, int *lwork, int *info); +void BLAS_FUNC(dsytrf)(char *uplo, int *n, double *a, int *lda, int *ipiv, double *work, int *lwork, int *info); +void BLAS_FUNC(dsytri)(char *uplo, int *n, double *a, int *lda, int *ipiv, double *work, int *info); +void BLAS_FUNC(dsytri2)(char *uplo, int *n, double *a, int *lda, int *ipiv, double *work, int *lwork, int *info); +void BLAS_FUNC(dsytri2x)(char *uplo, int *n, double *a, int *lda, int *ipiv, double *work, int *nb, int *info); +void BLAS_FUNC(dsytrs)(char *uplo, int *n, int *nrhs, double *a, int *lda, int *ipiv, double *b, int *ldb, int *info); +void BLAS_FUNC(dsytrs2)(char *uplo, int *n, int *nrhs, double *a, int *lda, int *ipiv, double *b, int *ldb, double *work, int *info); +void BLAS_FUNC(dtbcon)(char *norm, char *uplo, char *diag, int *n, int *kd, double *ab, int *ldab, double *rcond, double *work, int *iwork, int *info); +void BLAS_FUNC(dtbrfs)(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, double *ab, int *ldab, double *b, int *ldb, double *x, int *ldx, double *ferr, double *berr, double *work, int *iwork, int *info); +void BLAS_FUNC(dtbtrs)(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, double *ab, int *ldab, double *b, int *ldb, int *info); +void BLAS_FUNC(dtfsm)(char *transr, char *side, char *uplo, char *trans, char *diag, int *m, int *n, double *alpha, double *a, double *b, int *ldb); +void BLAS_FUNC(dtftri)(char *transr, char *uplo, char *diag, int *n, double *a, int *info); +void BLAS_FUNC(dtfttp)(char *transr, char *uplo, int *n, double *arf, double *ap, int *info); +void BLAS_FUNC(dtfttr)(char *transr, char *uplo, int *n, double *arf, double *a, int *lda, int *info); +void BLAS_FUNC(dtgevc)(char *side, char *howmny, int *select, int *n, double *s, int *lds, double *p, int *ldp, double *vl, int *ldvl, double *vr, int *ldvr, int *mm, int *m, double *work, int *info); +void BLAS_FUNC(dtgex2)(int *wantq, int *wantz, int *n, double *a, int *lda, double *b, int *ldb, double *q, int *ldq, double *z, int *ldz, int *j1, int *n1, int *n2, double *work, int *lwork, int *info); +void BLAS_FUNC(dtgexc)(int *wantq, int *wantz, int *n, double *a, int *lda, double *b, int *ldb, double *q, int *ldq, double *z, int *ldz, int *ifst, int *ilst, double *work, int *lwork, int *info); +void BLAS_FUNC(dtgsen)(int *ijob, int *wantq, int *wantz, int *select, int *n, double *a, int *lda, double *b, int *ldb, double *alphar, double *alphai, double *beta, double *q, int *ldq, double *z, int *ldz, int *m, double *pl, double *pr, double *dif, double *work, int *lwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(dtgsja)(char *jobu, char *jobv, char *jobq, int *m, int *p, int *n, int *k, int *l, double *a, int *lda, double *b, int *ldb, double *tola, double *tolb, double *alpha, double *beta, double *u, int *ldu, double *v, int *ldv, double *q, int *ldq, double *work, int *ncycle, int *info); +void BLAS_FUNC(dtgsna)(char *job, char *howmny, int *select, int *n, double *a, int *lda, double *b, int *ldb, double *vl, int *ldvl, double *vr, int *ldvr, double *s, double *dif, int *mm, int *m, double *work, int *lwork, int *iwork, int *info); +void BLAS_FUNC(dtgsy2)(char *trans, int *ijob, int *m, int *n, double *a, int *lda, double *b, int *ldb, double *c, int *ldc, double *d, int *ldd, double *e, int *lde, double *f, int *ldf, double *scale, double *rdsum, double *rdscal, int *iwork, int *pq, int *info); +void BLAS_FUNC(dtgsyl)(char *trans, int *ijob, int *m, int *n, double *a, int *lda, double *b, int *ldb, double *c, int *ldc, double *d, int *ldd, double *e, int *lde, double *f, int *ldf, double *scale, double *dif, double *work, int *lwork, int *iwork, int *info); +void BLAS_FUNC(dtpcon)(char *norm, char *uplo, char *diag, int *n, double *ap, double *rcond, double *work, int *iwork, int *info); +void BLAS_FUNC(dtpmqrt)(char *side, char *trans, int *m, int *n, int *k, int *l, int *nb, double *v, int *ldv, double *t, int *ldt, double *a, int *lda, double *b, int *ldb, double *work, int *info); +void BLAS_FUNC(dtpqrt)(int *m, int *n, int *l, int *nb, double *a, int *lda, double *b, int *ldb, double *t, int *ldt, double *work, int *info); +void BLAS_FUNC(dtpqrt2)(int *m, int *n, int *l, double *a, int *lda, double *b, int *ldb, double *t, int *ldt, int *info); +void BLAS_FUNC(dtprfb)(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, double *v, int *ldv, double *t, int *ldt, double *a, int *lda, double *b, int *ldb, double *work, int *ldwork); +void BLAS_FUNC(dtprfs)(char *uplo, char *trans, char *diag, int *n, int *nrhs, double *ap, double *b, int *ldb, double *x, int *ldx, double *ferr, double *berr, double *work, int *iwork, int *info); +void BLAS_FUNC(dtptri)(char *uplo, char *diag, int *n, double *ap, int *info); +void BLAS_FUNC(dtptrs)(char *uplo, char *trans, char *diag, int *n, int *nrhs, double *ap, double *b, int *ldb, int *info); +void BLAS_FUNC(dtpttf)(char *transr, char *uplo, int *n, double *ap, double *arf, int *info); +void BLAS_FUNC(dtpttr)(char *uplo, int *n, double *ap, double *a, int *lda, int *info); +void BLAS_FUNC(dtrcon)(char *norm, char *uplo, char *diag, int *n, double *a, int *lda, double *rcond, double *work, int *iwork, int *info); +void BLAS_FUNC(dtrevc)(char *side, char *howmny, int *select, int *n, double *t, int *ldt, double *vl, int *ldvl, double *vr, int *ldvr, int *mm, int *m, double *work, int *info); +void BLAS_FUNC(dtrexc)(char *compq, int *n, double *t, int *ldt, double *q, int *ldq, int *ifst, int *ilst, double *work, int *info); +void BLAS_FUNC(dtrrfs)(char *uplo, char *trans, char *diag, int *n, int *nrhs, double *a, int *lda, double *b, int *ldb, double *x, int *ldx, double *ferr, double *berr, double *work, int *iwork, int *info); +void BLAS_FUNC(dtrsen)(char *job, char *compq, int *select, int *n, double *t, int *ldt, double *q, int *ldq, double *wr, double *wi, int *m, double *s, double *sep, double *work, int *lwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(dtrsna)(char *job, char *howmny, int *select, int *n, double *t, int *ldt, double *vl, int *ldvl, double *vr, int *ldvr, double *s, double *sep, int *mm, int *m, double *work, int *ldwork, int *iwork, int *info); +void BLAS_FUNC(dtrsyl)(char *trana, char *tranb, int *isgn, int *m, int *n, double *a, int *lda, double *b, int *ldb, double *c, int *ldc, double *scale, int *info); +void BLAS_FUNC(dtrti2)(char *uplo, char *diag, int *n, double *a, int *lda, int *info); +void BLAS_FUNC(dtrtri)(char *uplo, char *diag, int *n, double *a, int *lda, int *info); +void BLAS_FUNC(dtrtrs)(char *uplo, char *trans, char *diag, int *n, int *nrhs, double *a, int *lda, double *b, int *ldb, int *info); +void BLAS_FUNC(dtrttf)(char *transr, char *uplo, int *n, double *a, int *lda, double *arf, int *info); +void BLAS_FUNC(dtrttp)(char *uplo, int *n, double *a, int *lda, double *ap, int *info); +void BLAS_FUNC(dtzrzf)(int *m, int *n, double *a, int *lda, double *tau, double *work, int *lwork, int *info); +double BLAS_FUNC(dzsum1)(int *n, npy_complex128 *cx, int *incx); +int BLAS_FUNC(icmax1)(int *n, npy_complex64 *cx, int *incx); +int BLAS_FUNC(ieeeck)(int *ispec, float *zero, float *one); +int BLAS_FUNC(ilaclc)(int *m, int *n, npy_complex64 *a, int *lda); +int BLAS_FUNC(ilaclr)(int *m, int *n, npy_complex64 *a, int *lda); +int BLAS_FUNC(iladiag)(char *diag); +int BLAS_FUNC(iladlc)(int *m, int *n, double *a, int *lda); +int BLAS_FUNC(iladlr)(int *m, int *n, double *a, int *lda); +int BLAS_FUNC(ilaprec)(char *prec); +int BLAS_FUNC(ilaslc)(int *m, int *n, float *a, int *lda); +int BLAS_FUNC(ilaslr)(int *m, int *n, float *a, int *lda); +int BLAS_FUNC(ilatrans)(char *trans); +int BLAS_FUNC(ilauplo)(char *uplo); +void BLAS_FUNC(ilaver)(int *vers_major, int *vers_minor, int *vers_patch); +int BLAS_FUNC(ilazlc)(int *m, int *n, npy_complex128 *a, int *lda); +int BLAS_FUNC(ilazlr)(int *m, int *n, npy_complex128 *a, int *lda); +int BLAS_FUNC(izmax1)(int *n, npy_complex128 *cx, int *incx); +void BLAS_FUNC(sbbcsd)(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, int *m, int *p, int *q, float *theta, float *phi, float *u1, int *ldu1, float *u2, int *ldu2, float *v1t, int *ldv1t, float *v2t, int *ldv2t, float *b11d, float *b11e, float *b12d, float *b12e, float *b21d, float *b21e, float *b22d, float *b22e, float *work, int *lwork, int *info); +void BLAS_FUNC(sbdsdc)(char *uplo, char *compq, int *n, float *d, float *e, float *u, int *ldu, float *vt, int *ldvt, float *q, int *iq, float *work, int *iwork, int *info); +void BLAS_FUNC(sbdsqr)(char *uplo, int *n, int *ncvt, int *nru, int *ncc, float *d, float *e, float *vt, int *ldvt, float *u, int *ldu, float *c, int *ldc, float *work, int *info); +float BLAS_FUNC(scsum1)(int *n, npy_complex64 *cx, int *incx); +void BLAS_FUNC(sdisna)(char *job, int *m, int *n, float *d, float *sep, int *info); +void BLAS_FUNC(sgbbrd)(char *vect, int *m, int *n, int *ncc, int *kl, int *ku, float *ab, int *ldab, float *d, float *e, float *q, int *ldq, float *pt, int *ldpt, float *c, int *ldc, float *work, int *info); +void BLAS_FUNC(sgbcon)(char *norm, int *n, int *kl, int *ku, float *ab, int *ldab, int *ipiv, float *anorm, float *rcond, float *work, int *iwork, int *info); +void BLAS_FUNC(sgbequ)(int *m, int *n, int *kl, int *ku, float *ab, int *ldab, float *r, float *c, float *rowcnd, float *colcnd, float *amax, int *info); +void BLAS_FUNC(sgbequb)(int *m, int *n, int *kl, int *ku, float *ab, int *ldab, float *r, float *c, float *rowcnd, float *colcnd, float *amax, int *info); +void BLAS_FUNC(sgbrfs)(char *trans, int *n, int *kl, int *ku, int *nrhs, float *ab, int *ldab, float *afb, int *ldafb, int *ipiv, float *b, int *ldb, float *x, int *ldx, float *ferr, float *berr, float *work, int *iwork, int *info); +void BLAS_FUNC(sgbsv)(int *n, int *kl, int *ku, int *nrhs, float *ab, int *ldab, int *ipiv, float *b, int *ldb, int *info); +void BLAS_FUNC(sgbsvx)(char *fact, char *trans, int *n, int *kl, int *ku, int *nrhs, float *ab, int *ldab, float *afb, int *ldafb, int *ipiv, char *equed, float *r, float *c, float *b, int *ldb, float *x, int *ldx, float *rcond, float *ferr, float *berr, float *work, int *iwork, int *info); +void BLAS_FUNC(sgbtf2)(int *m, int *n, int *kl, int *ku, float *ab, int *ldab, int *ipiv, int *info); +void BLAS_FUNC(sgbtrf)(int *m, int *n, int *kl, int *ku, float *ab, int *ldab, int *ipiv, int *info); +void BLAS_FUNC(sgbtrs)(char *trans, int *n, int *kl, int *ku, int *nrhs, float *ab, int *ldab, int *ipiv, float *b, int *ldb, int *info); +void BLAS_FUNC(sgebak)(char *job, char *side, int *n, int *ilo, int *ihi, float *scale, int *m, float *v, int *ldv, int *info); +void BLAS_FUNC(sgebal)(char *job, int *n, float *a, int *lda, int *ilo, int *ihi, float *scale, int *info); +void BLAS_FUNC(sgebd2)(int *m, int *n, float *a, int *lda, float *d, float *e, float *tauq, float *taup, float *work, int *info); +void BLAS_FUNC(sgebrd)(int *m, int *n, float *a, int *lda, float *d, float *e, float *tauq, float *taup, float *work, int *lwork, int *info); +void BLAS_FUNC(sgecon)(char *norm, int *n, float *a, int *lda, float *anorm, float *rcond, float *work, int *iwork, int *info); +void BLAS_FUNC(sgeequ)(int *m, int *n, float *a, int *lda, float *r, float *c, float *rowcnd, float *colcnd, float *amax, int *info); +void BLAS_FUNC(sgeequb)(int *m, int *n, float *a, int *lda, float *r, float *c, float *rowcnd, float *colcnd, float *amax, int *info); +void BLAS_FUNC(sgees)(char *jobvs, char *sort, _sselect2 *select, int *n, float *a, int *lda, int *sdim, float *wr, float *wi, float *vs, int *ldvs, float *work, int *lwork, int *bwork, int *info); +void BLAS_FUNC(sgeesx)(char *jobvs, char *sort, _sselect2 *select, char *sense, int *n, float *a, int *lda, int *sdim, float *wr, float *wi, float *vs, int *ldvs, float *rconde, float *rcondv, float *work, int *lwork, int *iwork, int *liwork, int *bwork, int *info); +void BLAS_FUNC(sgeev)(char *jobvl, char *jobvr, int *n, float *a, int *lda, float *wr, float *wi, float *vl, int *ldvl, float *vr, int *ldvr, float *work, int *lwork, int *info); +void BLAS_FUNC(sgeevx)(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, float *a, int *lda, float *wr, float *wi, float *vl, int *ldvl, float *vr, int *ldvr, int *ilo, int *ihi, float *scale, float *abnrm, float *rconde, float *rcondv, float *work, int *lwork, int *iwork, int *info); +void BLAS_FUNC(sgehd2)(int *n, int *ilo, int *ihi, float *a, int *lda, float *tau, float *work, int *info); +void BLAS_FUNC(sgehrd)(int *n, int *ilo, int *ihi, float *a, int *lda, float *tau, float *work, int *lwork, int *info); +void BLAS_FUNC(sgejsv)(char *joba, char *jobu, char *jobv, char *jobr, char *jobt, char *jobp, int *m, int *n, float *a, int *lda, float *sva, float *u, int *ldu, float *v, int *ldv, float *work, int *lwork, int *iwork, int *info); +void BLAS_FUNC(sgelq2)(int *m, int *n, float *a, int *lda, float *tau, float *work, int *info); +void BLAS_FUNC(sgelqf)(int *m, int *n, float *a, int *lda, float *tau, float *work, int *lwork, int *info); +void BLAS_FUNC(sgels)(char *trans, int *m, int *n, int *nrhs, float *a, int *lda, float *b, int *ldb, float *work, int *lwork, int *info); +void BLAS_FUNC(sgelsd)(int *m, int *n, int *nrhs, float *a, int *lda, float *b, int *ldb, float *s, float *rcond, int *rank, float *work, int *lwork, int *iwork, int *info); +void BLAS_FUNC(sgelss)(int *m, int *n, int *nrhs, float *a, int *lda, float *b, int *ldb, float *s, float *rcond, int *rank, float *work, int *lwork, int *info); +void BLAS_FUNC(sgelsy)(int *m, int *n, int *nrhs, float *a, int *lda, float *b, int *ldb, int *jpvt, float *rcond, int *rank, float *work, int *lwork, int *info); +void BLAS_FUNC(sgemqrt)(char *side, char *trans, int *m, int *n, int *k, int *nb, float *v, int *ldv, float *t, int *ldt, float *c, int *ldc, float *work, int *info); +void BLAS_FUNC(sgeql2)(int *m, int *n, float *a, int *lda, float *tau, float *work, int *info); +void BLAS_FUNC(sgeqlf)(int *m, int *n, float *a, int *lda, float *tau, float *work, int *lwork, int *info); +void BLAS_FUNC(sgeqp3)(int *m, int *n, float *a, int *lda, int *jpvt, float *tau, float *work, int *lwork, int *info); +void BLAS_FUNC(sgeqr2)(int *m, int *n, float *a, int *lda, float *tau, float *work, int *info); +void BLAS_FUNC(sgeqr2p)(int *m, int *n, float *a, int *lda, float *tau, float *work, int *info); +void BLAS_FUNC(sgeqrf)(int *m, int *n, float *a, int *lda, float *tau, float *work, int *lwork, int *info); +void BLAS_FUNC(sgeqrfp)(int *m, int *n, float *a, int *lda, float *tau, float *work, int *lwork, int *info); +void BLAS_FUNC(sgeqrt)(int *m, int *n, int *nb, float *a, int *lda, float *t, int *ldt, float *work, int *info); +void BLAS_FUNC(sgeqrt2)(int *m, int *n, float *a, int *lda, float *t, int *ldt, int *info); +void BLAS_FUNC(sgeqrt3)(int *m, int *n, float *a, int *lda, float *t, int *ldt, int *info); +void BLAS_FUNC(sgerfs)(char *trans, int *n, int *nrhs, float *a, int *lda, float *af, int *ldaf, int *ipiv, float *b, int *ldb, float *x, int *ldx, float *ferr, float *berr, float *work, int *iwork, int *info); +void BLAS_FUNC(sgerq2)(int *m, int *n, float *a, int *lda, float *tau, float *work, int *info); +void BLAS_FUNC(sgerqf)(int *m, int *n, float *a, int *lda, float *tau, float *work, int *lwork, int *info); +void BLAS_FUNC(sgesc2)(int *n, float *a, int *lda, float *rhs, int *ipiv, int *jpiv, float *scale); +void BLAS_FUNC(sgesdd)(char *jobz, int *m, int *n, float *a, int *lda, float *s, float *u, int *ldu, float *vt, int *ldvt, float *work, int *lwork, int *iwork, int *info); +void BLAS_FUNC(sgesv)(int *n, int *nrhs, float *a, int *lda, int *ipiv, float *b, int *ldb, int *info); +void BLAS_FUNC(sgesvd)(char *jobu, char *jobvt, int *m, int *n, float *a, int *lda, float *s, float *u, int *ldu, float *vt, int *ldvt, float *work, int *lwork, int *info); +void BLAS_FUNC(sgesvj)(char *joba, char *jobu, char *jobv, int *m, int *n, float *a, int *lda, float *sva, int *mv, float *v, int *ldv, float *work, int *lwork, int *info); +void BLAS_FUNC(sgesvx)(char *fact, char *trans, int *n, int *nrhs, float *a, int *lda, float *af, int *ldaf, int *ipiv, char *equed, float *r, float *c, float *b, int *ldb, float *x, int *ldx, float *rcond, float *ferr, float *berr, float *work, int *iwork, int *info); +void BLAS_FUNC(sgetc2)(int *n, float *a, int *lda, int *ipiv, int *jpiv, int *info); +void BLAS_FUNC(sgetf2)(int *m, int *n, float *a, int *lda, int *ipiv, int *info); +void BLAS_FUNC(sgetrf)(int *m, int *n, float *a, int *lda, int *ipiv, int *info); +void BLAS_FUNC(sgetri)(int *n, float *a, int *lda, int *ipiv, float *work, int *lwork, int *info); +void BLAS_FUNC(sgetrs)(char *trans, int *n, int *nrhs, float *a, int *lda, int *ipiv, float *b, int *ldb, int *info); +void BLAS_FUNC(sggbak)(char *job, char *side, int *n, int *ilo, int *ihi, float *lscale, float *rscale, int *m, float *v, int *ldv, int *info); +void BLAS_FUNC(sggbal)(char *job, int *n, float *a, int *lda, float *b, int *ldb, int *ilo, int *ihi, float *lscale, float *rscale, float *work, int *info); +void BLAS_FUNC(sgges)(char *jobvsl, char *jobvsr, char *sort, _sselect3 *selctg, int *n, float *a, int *lda, float *b, int *ldb, int *sdim, float *alphar, float *alphai, float *beta, float *vsl, int *ldvsl, float *vsr, int *ldvsr, float *work, int *lwork, int *bwork, int *info); +void BLAS_FUNC(sggesx)(char *jobvsl, char *jobvsr, char *sort, _sselect3 *selctg, char *sense, int *n, float *a, int *lda, float *b, int *ldb, int *sdim, float *alphar, float *alphai, float *beta, float *vsl, int *ldvsl, float *vsr, int *ldvsr, float *rconde, float *rcondv, float *work, int *lwork, int *iwork, int *liwork, int *bwork, int *info); +void BLAS_FUNC(sggev)(char *jobvl, char *jobvr, int *n, float *a, int *lda, float *b, int *ldb, float *alphar, float *alphai, float *beta, float *vl, int *ldvl, float *vr, int *ldvr, float *work, int *lwork, int *info); +void BLAS_FUNC(sggevx)(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, float *a, int *lda, float *b, int *ldb, float *alphar, float *alphai, float *beta, float *vl, int *ldvl, float *vr, int *ldvr, int *ilo, int *ihi, float *lscale, float *rscale, float *abnrm, float *bbnrm, float *rconde, float *rcondv, float *work, int *lwork, int *iwork, int *bwork, int *info); +void BLAS_FUNC(sggglm)(int *n, int *m, int *p, float *a, int *lda, float *b, int *ldb, float *d, float *x, float *y, float *work, int *lwork, int *info); +void BLAS_FUNC(sgghrd)(char *compq, char *compz, int *n, int *ilo, int *ihi, float *a, int *lda, float *b, int *ldb, float *q, int *ldq, float *z, int *ldz, int *info); +void BLAS_FUNC(sgglse)(int *m, int *n, int *p, float *a, int *lda, float *b, int *ldb, float *c, float *d, float *x, float *work, int *lwork, int *info); +void BLAS_FUNC(sggqrf)(int *n, int *m, int *p, float *a, int *lda, float *taua, float *b, int *ldb, float *taub, float *work, int *lwork, int *info); +void BLAS_FUNC(sggrqf)(int *m, int *p, int *n, float *a, int *lda, float *taua, float *b, int *ldb, float *taub, float *work, int *lwork, int *info); +void BLAS_FUNC(sgsvj0)(char *jobv, int *m, int *n, float *a, int *lda, float *d, float *sva, int *mv, float *v, int *ldv, float *eps, float *sfmin, float *tol, int *nsweep, float *work, int *lwork, int *info); +void BLAS_FUNC(sgsvj1)(char *jobv, int *m, int *n, int *n1, float *a, int *lda, float *d, float *sva, int *mv, float *v, int *ldv, float *eps, float *sfmin, float *tol, int *nsweep, float *work, int *lwork, int *info); +void BLAS_FUNC(sgtcon)(char *norm, int *n, float *dl, float *d, float *du, float *du2, int *ipiv, float *anorm, float *rcond, float *work, int *iwork, int *info); +void BLAS_FUNC(sgtrfs)(char *trans, int *n, int *nrhs, float *dl, float *d, float *du, float *dlf, float *df, float *duf, float *du2, int *ipiv, float *b, int *ldb, float *x, int *ldx, float *ferr, float *berr, float *work, int *iwork, int *info); +void BLAS_FUNC(sgtsv)(int *n, int *nrhs, float *dl, float *d, float *du, float *b, int *ldb, int *info); +void BLAS_FUNC(sgtsvx)(char *fact, char *trans, int *n, int *nrhs, float *dl, float *d, float *du, float *dlf, float *df, float *duf, float *du2, int *ipiv, float *b, int *ldb, float *x, int *ldx, float *rcond, float *ferr, float *berr, float *work, int *iwork, int *info); +void BLAS_FUNC(sgttrf)(int *n, float *dl, float *d, float *du, float *du2, int *ipiv, int *info); +void BLAS_FUNC(sgttrs)(char *trans, int *n, int *nrhs, float *dl, float *d, float *du, float *du2, int *ipiv, float *b, int *ldb, int *info); +void BLAS_FUNC(sgtts2)(int *itrans, int *n, int *nrhs, float *dl, float *d, float *du, float *du2, int *ipiv, float *b, int *ldb); +void BLAS_FUNC(shgeqz)(char *job, char *compq, char *compz, int *n, int *ilo, int *ihi, float *h, int *ldh, float *t, int *ldt, float *alphar, float *alphai, float *beta, float *q, int *ldq, float *z, int *ldz, float *work, int *lwork, int *info); +void BLAS_FUNC(shsein)(char *side, char *eigsrc, char *initv, int *select, int *n, float *h, int *ldh, float *wr, float *wi, float *vl, int *ldvl, float *vr, int *ldvr, int *mm, int *m, float *work, int *ifaill, int *ifailr, int *info); +void BLAS_FUNC(shseqr)(char *job, char *compz, int *n, int *ilo, int *ihi, float *h, int *ldh, float *wr, float *wi, float *z, int *ldz, float *work, int *lwork, int *info); +void BLAS_FUNC(slabad)(float *small, float *large); +void BLAS_FUNC(slabrd)(int *m, int *n, int *nb, float *a, int *lda, float *d, float *e, float *tauq, float *taup, float *x, int *ldx, float *y, int *ldy); +void BLAS_FUNC(slacn2)(int *n, float *v, float *x, int *isgn, float *est, int *kase, int *isave); +void BLAS_FUNC(slacon)(int *n, float *v, float *x, int *isgn, float *est, int *kase); +void BLAS_FUNC(slacpy)(char *uplo, int *m, int *n, float *a, int *lda, float *b, int *ldb); +void BLAS_FUNC(sladiv)(float *a, float *b, float *c, float *d, float *p, float *q); +void BLAS_FUNC(slae2)(float *a, float *b, float *c, float *rt1, float *rt2); +void BLAS_FUNC(slaebz)(int *ijob, int *nitmax, int *n, int *mmax, int *minp, int *nbmin, float *abstol, float *reltol, float *pivmin, float *d, float *e, float *e2, int *nval, float *ab, float *c, int *mout, int *nab, float *work, int *iwork, int *info); +void BLAS_FUNC(slaed0)(int *icompq, int *qsiz, int *n, float *d, float *e, float *q, int *ldq, float *qstore, int *ldqs, float *work, int *iwork, int *info); +void BLAS_FUNC(slaed1)(int *n, float *d, float *q, int *ldq, int *indxq, float *rho, int *cutpnt, float *work, int *iwork, int *info); +void BLAS_FUNC(slaed2)(int *k, int *n, int *n1, float *d, float *q, int *ldq, int *indxq, float *rho, float *z, float *dlamda, float *w, float *q2, int *indx, int *indxc, int *indxp, int *coltyp, int *info); +void BLAS_FUNC(slaed3)(int *k, int *n, int *n1, float *d, float *q, int *ldq, float *rho, float *dlamda, float *q2, int *indx, int *ctot, float *w, float *s, int *info); +void BLAS_FUNC(slaed4)(int *n, int *i, float *d, float *z, float *delta, float *rho, float *dlam, int *info); +void BLAS_FUNC(slaed5)(int *i, float *d, float *z, float *delta, float *rho, float *dlam); +void BLAS_FUNC(slaed6)(int *kniter, int *orgati, float *rho, float *d, float *z, float *finit, float *tau, int *info); +void BLAS_FUNC(slaed7)(int *icompq, int *n, int *qsiz, int *tlvls, int *curlvl, int *curpbm, float *d, float *q, int *ldq, int *indxq, float *rho, int *cutpnt, float *qstore, int *qptr, int *prmptr, int *perm, int *givptr, int *givcol, float *givnum, float *work, int *iwork, int *info); +void BLAS_FUNC(slaed8)(int *icompq, int *k, int *n, int *qsiz, float *d, float *q, int *ldq, int *indxq, float *rho, int *cutpnt, float *z, float *dlamda, float *q2, int *ldq2, float *w, int *perm, int *givptr, int *givcol, float *givnum, int *indxp, int *indx, int *info); +void BLAS_FUNC(slaed9)(int *k, int *kstart, int *kstop, int *n, float *d, float *q, int *ldq, float *rho, float *dlamda, float *w, float *s, int *lds, int *info); +void BLAS_FUNC(slaeda)(int *n, int *tlvls, int *curlvl, int *curpbm, int *prmptr, int *perm, int *givptr, int *givcol, float *givnum, float *q, int *qptr, float *z, float *ztemp, int *info); +void BLAS_FUNC(slaein)(int *rightv, int *noinit, int *n, float *h, int *ldh, float *wr, float *wi, float *vr, float *vi, float *b, int *ldb, float *work, float *eps3, float *smlnum, float *bignum, int *info); +void BLAS_FUNC(slaev2)(float *a, float *b, float *c, float *rt1, float *rt2, float *cs1, float *sn1); +void BLAS_FUNC(slaexc)(int *wantq, int *n, float *t, int *ldt, float *q, int *ldq, int *j1, int *n1, int *n2, float *work, int *info); +void BLAS_FUNC(slag2)(float *a, int *lda, float *b, int *ldb, float *safmin, float *scale1, float *scale2, float *wr1, float *wr2, float *wi); +void BLAS_FUNC(slag2d)(int *m, int *n, float *sa, int *ldsa, double *a, int *lda, int *info); +void BLAS_FUNC(slags2)(int *upper, float *a1, float *a2, float *a3, float *b1, float *b2, float *b3, float *csu, float *snu, float *csv, float *snv, float *csq, float *snq); +void BLAS_FUNC(slagtf)(int *n, float *a, float *lambda_, float *b, float *c, float *tol, float *d, int *in_, int *info); +void BLAS_FUNC(slagtm)(char *trans, int *n, int *nrhs, float *alpha, float *dl, float *d, float *du, float *x, int *ldx, float *beta, float *b, int *ldb); +void BLAS_FUNC(slagts)(int *job, int *n, float *a, float *b, float *c, float *d, int *in_, float *y, float *tol, int *info); +void BLAS_FUNC(slagv2)(float *a, int *lda, float *b, int *ldb, float *alphar, float *alphai, float *beta, float *csl, float *snl, float *csr, float *snr); +void BLAS_FUNC(slahqr)(int *wantt, int *wantz, int *n, int *ilo, int *ihi, float *h, int *ldh, float *wr, float *wi, int *iloz, int *ihiz, float *z, int *ldz, int *info); +void BLAS_FUNC(slahr2)(int *n, int *k, int *nb, float *a, int *lda, float *tau, float *t, int *ldt, float *y, int *ldy); +void BLAS_FUNC(slaic1)(int *job, int *j, float *x, float *sest, float *w, float *gamma, float *sestpr, float *s, float *c); +void BLAS_FUNC(slaln2)(int *ltrans, int *na, int *nw, float *smin, float *ca, float *a, int *lda, float *d1, float *d2, float *b, int *ldb, float *wr, float *wi, float *x, int *ldx, float *scale, float *xnorm, int *info); +void BLAS_FUNC(slals0)(int *icompq, int *nl, int *nr, int *sqre, int *nrhs, float *b, int *ldb, float *bx, int *ldbx, int *perm, int *givptr, int *givcol, int *ldgcol, float *givnum, int *ldgnum, float *poles, float *difl, float *difr, float *z, int *k, float *c, float *s, float *work, int *info); +void BLAS_FUNC(slalsa)(int *icompq, int *smlsiz, int *n, int *nrhs, float *b, int *ldb, float *bx, int *ldbx, float *u, int *ldu, float *vt, int *k, float *difl, float *difr, float *z, float *poles, int *givptr, int *givcol, int *ldgcol, int *perm, float *givnum, float *c, float *s, float *work, int *iwork, int *info); +void BLAS_FUNC(slalsd)(char *uplo, int *smlsiz, int *n, int *nrhs, float *d, float *e, float *b, int *ldb, float *rcond, int *rank, float *work, int *iwork, int *info); +float BLAS_FUNC(slamch)(char *cmach); +void BLAS_FUNC(slamrg)(int *n1, int *n2, float *a, int *strd1, int *strd2, int *index_bn); +float BLAS_FUNC(slangb)(char *norm, int *n, int *kl, int *ku, float *ab, int *ldab, float *work); +float BLAS_FUNC(slange)(char *norm, int *m, int *n, float *a, int *lda, float *work); +float BLAS_FUNC(slangt)(char *norm, int *n, float *dl, float *d, float *du); +float BLAS_FUNC(slanhs)(char *norm, int *n, float *a, int *lda, float *work); +float BLAS_FUNC(slansb)(char *norm, char *uplo, int *n, int *k, float *ab, int *ldab, float *work); +float BLAS_FUNC(slansf)(char *norm, char *transr, char *uplo, int *n, float *a, float *work); +float BLAS_FUNC(slansp)(char *norm, char *uplo, int *n, float *ap, float *work); +float BLAS_FUNC(slanst)(char *norm, int *n, float *d, float *e); +float BLAS_FUNC(slansy)(char *norm, char *uplo, int *n, float *a, int *lda, float *work); +float BLAS_FUNC(slantb)(char *norm, char *uplo, char *diag, int *n, int *k, float *ab, int *ldab, float *work); +float BLAS_FUNC(slantp)(char *norm, char *uplo, char *diag, int *n, float *ap, float *work); +float BLAS_FUNC(slantr)(char *norm, char *uplo, char *diag, int *m, int *n, float *a, int *lda, float *work); +void BLAS_FUNC(slanv2)(float *a, float *b, float *c, float *d, float *rt1r, float *rt1i, float *rt2r, float *rt2i, float *cs, float *sn); +void BLAS_FUNC(slapll)(int *n, float *x, int *incx, float *y, int *incy, float *ssmin); +void BLAS_FUNC(slapmr)(int *forwrd, int *m, int *n, float *x, int *ldx, int *k); +void BLAS_FUNC(slapmt)(int *forwrd, int *m, int *n, float *x, int *ldx, int *k); +float BLAS_FUNC(slapy2)(float *x, float *y); +float BLAS_FUNC(slapy3)(float *x, float *y, float *z); +void BLAS_FUNC(slaqgb)(int *m, int *n, int *kl, int *ku, float *ab, int *ldab, float *r, float *c, float *rowcnd, float *colcnd, float *amax, char *equed); +void BLAS_FUNC(slaqge)(int *m, int *n, float *a, int *lda, float *r, float *c, float *rowcnd, float *colcnd, float *amax, char *equed); +void BLAS_FUNC(slaqp2)(int *m, int *n, int *offset, float *a, int *lda, int *jpvt, float *tau, float *vn1, float *vn2, float *work); +void BLAS_FUNC(slaqps)(int *m, int *n, int *offset, int *nb, int *kb, float *a, int *lda, int *jpvt, float *tau, float *vn1, float *vn2, float *auxv, float *f, int *ldf); +void BLAS_FUNC(slaqr0)(int *wantt, int *wantz, int *n, int *ilo, int *ihi, float *h, int *ldh, float *wr, float *wi, int *iloz, int *ihiz, float *z, int *ldz, float *work, int *lwork, int *info); +void BLAS_FUNC(slaqr1)(int *n, float *h, int *ldh, float *sr1, float *si1, float *sr2, float *si2, float *v); +void BLAS_FUNC(slaqr2)(int *wantt, int *wantz, int *n, int *ktop, int *kbot, int *nw, float *h, int *ldh, int *iloz, int *ihiz, float *z, int *ldz, int *ns, int *nd, float *sr, float *si, float *v, int *ldv, int *nh, float *t, int *ldt, int *nv, float *wv, int *ldwv, float *work, int *lwork); +void BLAS_FUNC(slaqr3)(int *wantt, int *wantz, int *n, int *ktop, int *kbot, int *nw, float *h, int *ldh, int *iloz, int *ihiz, float *z, int *ldz, int *ns, int *nd, float *sr, float *si, float *v, int *ldv, int *nh, float *t, int *ldt, int *nv, float *wv, int *ldwv, float *work, int *lwork); +void BLAS_FUNC(slaqr4)(int *wantt, int *wantz, int *n, int *ilo, int *ihi, float *h, int *ldh, float *wr, float *wi, int *iloz, int *ihiz, float *z, int *ldz, float *work, int *lwork, int *info); +void BLAS_FUNC(slaqr5)(int *wantt, int *wantz, int *kacc22, int *n, int *ktop, int *kbot, int *nshfts, float *sr, float *si, float *h, int *ldh, int *iloz, int *ihiz, float *z, int *ldz, float *v, int *ldv, float *u, int *ldu, int *nv, float *wv, int *ldwv, int *nh, float *wh, int *ldwh); +void BLAS_FUNC(slaqsb)(char *uplo, int *n, int *kd, float *ab, int *ldab, float *s, float *scond, float *amax, char *equed); +void BLAS_FUNC(slaqsp)(char *uplo, int *n, float *ap, float *s, float *scond, float *amax, char *equed); +void BLAS_FUNC(slaqsy)(char *uplo, int *n, float *a, int *lda, float *s, float *scond, float *amax, char *equed); +void BLAS_FUNC(slaqtr)(int *ltran, int *lreal, int *n, float *t, int *ldt, float *b, float *w, float *scale, float *x, float *work, int *info); +void BLAS_FUNC(slar1v)(int *n, int *b1, int *bn, float *lambda_, float *d, float *l, float *ld, float *lld, float *pivmin, float *gaptol, float *z, int *wantnc, int *negcnt, float *ztz, float *mingma, int *r, int *isuppz, float *nrminv, float *resid, float *rqcorr, float *work); +void BLAS_FUNC(slar2v)(int *n, float *x, float *y, float *z, int *incx, float *c, float *s, int *incc); +void BLAS_FUNC(slarf)(char *side, int *m, int *n, float *v, int *incv, float *tau, float *c, int *ldc, float *work); +void BLAS_FUNC(slarfb)(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, float *v, int *ldv, float *t, int *ldt, float *c, int *ldc, float *work, int *ldwork); +void BLAS_FUNC(slarfg)(int *n, float *alpha, float *x, int *incx, float *tau); +void BLAS_FUNC(slarfgp)(int *n, float *alpha, float *x, int *incx, float *tau); +void BLAS_FUNC(slarft)(char *direct, char *storev, int *n, int *k, float *v, int *ldv, float *tau, float *t, int *ldt); +void BLAS_FUNC(slarfx)(char *side, int *m, int *n, float *v, float *tau, float *c, int *ldc, float *work); +void BLAS_FUNC(slargv)(int *n, float *x, int *incx, float *y, int *incy, float *c, int *incc); +void BLAS_FUNC(slarnv)(int *idist, int *iseed, int *n, float *x); +void BLAS_FUNC(slarra)(int *n, float *d, float *e, float *e2, float *spltol, float *tnrm, int *nsplit, int *isplit, int *info); +void BLAS_FUNC(slarrb)(int *n, float *d, float *lld, int *ifirst, int *ilast, float *rtol1, float *rtol2, int *offset, float *w, float *wgap, float *werr, float *work, int *iwork, float *pivmin, float *spdiam, int *twist, int *info); +void BLAS_FUNC(slarrc)(char *jobt, int *n, float *vl, float *vu, float *d, float *e, float *pivmin, int *eigcnt, int *lcnt, int *rcnt, int *info); +void BLAS_FUNC(slarrd)(char *range, char *order, int *n, float *vl, float *vu, int *il, int *iu, float *gers, float *reltol, float *d, float *e, float *e2, float *pivmin, int *nsplit, int *isplit, int *m, float *w, float *werr, float *wl, float *wu, int *iblock, int *indexw, float *work, int *iwork, int *info); +void BLAS_FUNC(slarre)(char *range, int *n, float *vl, float *vu, int *il, int *iu, float *d, float *e, float *e2, float *rtol1, float *rtol2, float *spltol, int *nsplit, int *isplit, int *m, float *w, float *werr, float *wgap, int *iblock, int *indexw, float *gers, float *pivmin, float *work, int *iwork, int *info); +void BLAS_FUNC(slarrf)(int *n, float *d, float *l, float *ld, int *clstrt, int *clend, float *w, float *wgap, float *werr, float *spdiam, float *clgapl, float *clgapr, float *pivmin, float *sigma, float *dplus, float *lplus, float *work, int *info); +void BLAS_FUNC(slarrj)(int *n, float *d, float *e2, int *ifirst, int *ilast, float *rtol, int *offset, float *w, float *werr, float *work, int *iwork, float *pivmin, float *spdiam, int *info); +void BLAS_FUNC(slarrk)(int *n, int *iw, float *gl, float *gu, float *d, float *e2, float *pivmin, float *reltol, float *w, float *werr, int *info); +void BLAS_FUNC(slarrr)(int *n, float *d, float *e, int *info); +void BLAS_FUNC(slarrv)(int *n, float *vl, float *vu, float *d, float *l, float *pivmin, int *isplit, int *m, int *dol, int *dou, float *minrgp, float *rtol1, float *rtol2, float *w, float *werr, float *wgap, int *iblock, int *indexw, float *gers, float *z, int *ldz, int *isuppz, float *work, int *iwork, int *info); +void BLAS_FUNC(slartg)(float *f, float *g, float *cs, float *sn, float *r); +void BLAS_FUNC(slartgp)(float *f, float *g, float *cs, float *sn, float *r); +void BLAS_FUNC(slartgs)(float *x, float *y, float *sigma, float *cs, float *sn); +void BLAS_FUNC(slartv)(int *n, float *x, int *incx, float *y, int *incy, float *c, float *s, int *incc); +void BLAS_FUNC(slaruv)(int *iseed, int *n, float *x); +void BLAS_FUNC(slarz)(char *side, int *m, int *n, int *l, float *v, int *incv, float *tau, float *c, int *ldc, float *work); +void BLAS_FUNC(slarzb)(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, float *v, int *ldv, float *t, int *ldt, float *c, int *ldc, float *work, int *ldwork); +void BLAS_FUNC(slarzt)(char *direct, char *storev, int *n, int *k, float *v, int *ldv, float *tau, float *t, int *ldt); +void BLAS_FUNC(slas2)(float *f, float *g, float *h, float *ssmin, float *ssmax); +void BLAS_FUNC(slascl)(char *type_bn, int *kl, int *ku, float *cfrom, float *cto, int *m, int *n, float *a, int *lda, int *info); +void BLAS_FUNC(slasd0)(int *n, int *sqre, float *d, float *e, float *u, int *ldu, float *vt, int *ldvt, int *smlsiz, int *iwork, float *work, int *info); +void BLAS_FUNC(slasd1)(int *nl, int *nr, int *sqre, float *d, float *alpha, float *beta, float *u, int *ldu, float *vt, int *ldvt, int *idxq, int *iwork, float *work, int *info); +void BLAS_FUNC(slasd2)(int *nl, int *nr, int *sqre, int *k, float *d, float *z, float *alpha, float *beta, float *u, int *ldu, float *vt, int *ldvt, float *dsigma, float *u2, int *ldu2, float *vt2, int *ldvt2, int *idxp, int *idx, int *idxc, int *idxq, int *coltyp, int *info); +void BLAS_FUNC(slasd3)(int *nl, int *nr, int *sqre, int *k, float *d, float *q, int *ldq, float *dsigma, float *u, int *ldu, float *u2, int *ldu2, float *vt, int *ldvt, float *vt2, int *ldvt2, int *idxc, int *ctot, float *z, int *info); +void BLAS_FUNC(slasd4)(int *n, int *i, float *d, float *z, float *delta, float *rho, float *sigma, float *work, int *info); +void BLAS_FUNC(slasd5)(int *i, float *d, float *z, float *delta, float *rho, float *dsigma, float *work); +void BLAS_FUNC(slasd6)(int *icompq, int *nl, int *nr, int *sqre, float *d, float *vf, float *vl, float *alpha, float *beta, int *idxq, int *perm, int *givptr, int *givcol, int *ldgcol, float *givnum, int *ldgnum, float *poles, float *difl, float *difr, float *z, int *k, float *c, float *s, float *work, int *iwork, int *info); +void BLAS_FUNC(slasd7)(int *icompq, int *nl, int *nr, int *sqre, int *k, float *d, float *z, float *zw, float *vf, float *vfw, float *vl, float *vlw, float *alpha, float *beta, float *dsigma, int *idx, int *idxp, int *idxq, int *perm, int *givptr, int *givcol, int *ldgcol, float *givnum, int *ldgnum, float *c, float *s, int *info); +void BLAS_FUNC(slasd8)(int *icompq, int *k, float *d, float *z, float *vf, float *vl, float *difl, float *difr, int *lddifr, float *dsigma, float *work, int *info); +void BLAS_FUNC(slasda)(int *icompq, int *smlsiz, int *n, int *sqre, float *d, float *e, float *u, int *ldu, float *vt, int *k, float *difl, float *difr, float *z, float *poles, int *givptr, int *givcol, int *ldgcol, int *perm, float *givnum, float *c, float *s, float *work, int *iwork, int *info); +void BLAS_FUNC(slasdq)(char *uplo, int *sqre, int *n, int *ncvt, int *nru, int *ncc, float *d, float *e, float *vt, int *ldvt, float *u, int *ldu, float *c, int *ldc, float *work, int *info); +void BLAS_FUNC(slasdt)(int *n, int *lvl, int *nd, int *inode, int *ndiml, int *ndimr, int *msub); +void BLAS_FUNC(slaset)(char *uplo, int *m, int *n, float *alpha, float *beta, float *a, int *lda); +void BLAS_FUNC(slasq1)(int *n, float *d, float *e, float *work, int *info); +void BLAS_FUNC(slasq2)(int *n, float *z, int *info); +void BLAS_FUNC(slasq3)(int *i0, int *n0, float *z, int *pp, float *dmin, float *sigma, float *desig, float *qmax, int *nfail, int *iter, int *ndiv, int *ieee, int *ttype, float *dmin1, float *dmin2, float *dn, float *dn1, float *dn2, float *g, float *tau); +void BLAS_FUNC(slasq4)(int *i0, int *n0, float *z, int *pp, int *n0in, float *dmin, float *dmin1, float *dmin2, float *dn, float *dn1, float *dn2, float *tau, int *ttype, float *g); +void BLAS_FUNC(slasq6)(int *i0, int *n0, float *z, int *pp, float *dmin, float *dmin1, float *dmin2, float *dn, float *dnm1, float *dnm2); +void BLAS_FUNC(slasr)(char *side, char *pivot, char *direct, int *m, int *n, float *c, float *s, float *a, int *lda); +void BLAS_FUNC(slasrt)(char *id, int *n, float *d, int *info); +void BLAS_FUNC(slassq)(int *n, float *x, int *incx, float *scale, float *sumsq); +void BLAS_FUNC(slasv2)(float *f, float *g, float *h, float *ssmin, float *ssmax, float *snr, float *csr, float *snl, float *csl); +void BLAS_FUNC(slaswp)(int *n, float *a, int *lda, int *k1, int *k2, int *ipiv, int *incx); +void BLAS_FUNC(slasy2)(int *ltranl, int *ltranr, int *isgn, int *n1, int *n2, float *tl, int *ldtl, float *tr, int *ldtr, float *b, int *ldb, float *scale, float *x, int *ldx, float *xnorm, int *info); +void BLAS_FUNC(slasyf)(char *uplo, int *n, int *nb, int *kb, float *a, int *lda, int *ipiv, float *w, int *ldw, int *info); +void BLAS_FUNC(slatbs)(char *uplo, char *trans, char *diag, char *normin, int *n, int *kd, float *ab, int *ldab, float *x, float *scale, float *cnorm, int *info); +void BLAS_FUNC(slatdf)(int *ijob, int *n, float *z, int *ldz, float *rhs, float *rdsum, float *rdscal, int *ipiv, int *jpiv); +void BLAS_FUNC(slatps)(char *uplo, char *trans, char *diag, char *normin, int *n, float *ap, float *x, float *scale, float *cnorm, int *info); +void BLAS_FUNC(slatrd)(char *uplo, int *n, int *nb, float *a, int *lda, float *e, float *tau, float *w, int *ldw); +void BLAS_FUNC(slatrs)(char *uplo, char *trans, char *diag, char *normin, int *n, float *a, int *lda, float *x, float *scale, float *cnorm, int *info); +void BLAS_FUNC(slatrz)(int *m, int *n, int *l, float *a, int *lda, float *tau, float *work); +void BLAS_FUNC(slauu2)(char *uplo, int *n, float *a, int *lda, int *info); +void BLAS_FUNC(slauum)(char *uplo, int *n, float *a, int *lda, int *info); +void BLAS_FUNC(sopgtr)(char *uplo, int *n, float *ap, float *tau, float *q, int *ldq, float *work, int *info); +void BLAS_FUNC(sopmtr)(char *side, char *uplo, char *trans, int *m, int *n, float *ap, float *tau, float *c, int *ldc, float *work, int *info); +void BLAS_FUNC(sorbdb)(char *trans, char *signs, int *m, int *p, int *q, float *x11, int *ldx11, float *x12, int *ldx12, float *x21, int *ldx21, float *x22, int *ldx22, float *theta, float *phi, float *taup1, float *taup2, float *tauq1, float *tauq2, float *work, int *lwork, int *info); +void BLAS_FUNC(sorcsd)(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, char *signs, int *m, int *p, int *q, float *x11, int *ldx11, float *x12, int *ldx12, float *x21, int *ldx21, float *x22, int *ldx22, float *theta, float *u1, int *ldu1, float *u2, int *ldu2, float *v1t, int *ldv1t, float *v2t, int *ldv2t, float *work, int *lwork, int *iwork, int *info); +void BLAS_FUNC(sorg2l)(int *m, int *n, int *k, float *a, int *lda, float *tau, float *work, int *info); +void BLAS_FUNC(sorg2r)(int *m, int *n, int *k, float *a, int *lda, float *tau, float *work, int *info); +void BLAS_FUNC(sorgbr)(char *vect, int *m, int *n, int *k, float *a, int *lda, float *tau, float *work, int *lwork, int *info); +void BLAS_FUNC(sorghr)(int *n, int *ilo, int *ihi, float *a, int *lda, float *tau, float *work, int *lwork, int *info); +void BLAS_FUNC(sorgl2)(int *m, int *n, int *k, float *a, int *lda, float *tau, float *work, int *info); +void BLAS_FUNC(sorglq)(int *m, int *n, int *k, float *a, int *lda, float *tau, float *work, int *lwork, int *info); +void BLAS_FUNC(sorgql)(int *m, int *n, int *k, float *a, int *lda, float *tau, float *work, int *lwork, int *info); +void BLAS_FUNC(sorgqr)(int *m, int *n, int *k, float *a, int *lda, float *tau, float *work, int *lwork, int *info); +void BLAS_FUNC(sorgr2)(int *m, int *n, int *k, float *a, int *lda, float *tau, float *work, int *info); +void BLAS_FUNC(sorgrq)(int *m, int *n, int *k, float *a, int *lda, float *tau, float *work, int *lwork, int *info); +void BLAS_FUNC(sorgtr)(char *uplo, int *n, float *a, int *lda, float *tau, float *work, int *lwork, int *info); +void BLAS_FUNC(sorm2l)(char *side, char *trans, int *m, int *n, int *k, float *a, int *lda, float *tau, float *c, int *ldc, float *work, int *info); +void BLAS_FUNC(sorm2r)(char *side, char *trans, int *m, int *n, int *k, float *a, int *lda, float *tau, float *c, int *ldc, float *work, int *info); +void BLAS_FUNC(sormbr)(char *vect, char *side, char *trans, int *m, int *n, int *k, float *a, int *lda, float *tau, float *c, int *ldc, float *work, int *lwork, int *info); +void BLAS_FUNC(sormhr)(char *side, char *trans, int *m, int *n, int *ilo, int *ihi, float *a, int *lda, float *tau, float *c, int *ldc, float *work, int *lwork, int *info); +void BLAS_FUNC(sorml2)(char *side, char *trans, int *m, int *n, int *k, float *a, int *lda, float *tau, float *c, int *ldc, float *work, int *info); +void BLAS_FUNC(sormlq)(char *side, char *trans, int *m, int *n, int *k, float *a, int *lda, float *tau, float *c, int *ldc, float *work, int *lwork, int *info); +void BLAS_FUNC(sormql)(char *side, char *trans, int *m, int *n, int *k, float *a, int *lda, float *tau, float *c, int *ldc, float *work, int *lwork, int *info); +void BLAS_FUNC(sormqr)(char *side, char *trans, int *m, int *n, int *k, float *a, int *lda, float *tau, float *c, int *ldc, float *work, int *lwork, int *info); +void BLAS_FUNC(sormr2)(char *side, char *trans, int *m, int *n, int *k, float *a, int *lda, float *tau, float *c, int *ldc, float *work, int *info); +void BLAS_FUNC(sormr3)(char *side, char *trans, int *m, int *n, int *k, int *l, float *a, int *lda, float *tau, float *c, int *ldc, float *work, int *info); +void BLAS_FUNC(sormrq)(char *side, char *trans, int *m, int *n, int *k, float *a, int *lda, float *tau, float *c, int *ldc, float *work, int *lwork, int *info); +void BLAS_FUNC(sormrz)(char *side, char *trans, int *m, int *n, int *k, int *l, float *a, int *lda, float *tau, float *c, int *ldc, float *work, int *lwork, int *info); +void BLAS_FUNC(sormtr)(char *side, char *uplo, char *trans, int *m, int *n, float *a, int *lda, float *tau, float *c, int *ldc, float *work, int *lwork, int *info); +void BLAS_FUNC(spbcon)(char *uplo, int *n, int *kd, float *ab, int *ldab, float *anorm, float *rcond, float *work, int *iwork, int *info); +void BLAS_FUNC(spbequ)(char *uplo, int *n, int *kd, float *ab, int *ldab, float *s, float *scond, float *amax, int *info); +void BLAS_FUNC(spbrfs)(char *uplo, int *n, int *kd, int *nrhs, float *ab, int *ldab, float *afb, int *ldafb, float *b, int *ldb, float *x, int *ldx, float *ferr, float *berr, float *work, int *iwork, int *info); +void BLAS_FUNC(spbstf)(char *uplo, int *n, int *kd, float *ab, int *ldab, int *info); +void BLAS_FUNC(spbsv)(char *uplo, int *n, int *kd, int *nrhs, float *ab, int *ldab, float *b, int *ldb, int *info); +void BLAS_FUNC(spbsvx)(char *fact, char *uplo, int *n, int *kd, int *nrhs, float *ab, int *ldab, float *afb, int *ldafb, char *equed, float *s, float *b, int *ldb, float *x, int *ldx, float *rcond, float *ferr, float *berr, float *work, int *iwork, int *info); +void BLAS_FUNC(spbtf2)(char *uplo, int *n, int *kd, float *ab, int *ldab, int *info); +void BLAS_FUNC(spbtrf)(char *uplo, int *n, int *kd, float *ab, int *ldab, int *info); +void BLAS_FUNC(spbtrs)(char *uplo, int *n, int *kd, int *nrhs, float *ab, int *ldab, float *b, int *ldb, int *info); +void BLAS_FUNC(spftrf)(char *transr, char *uplo, int *n, float *a, int *info); +void BLAS_FUNC(spftri)(char *transr, char *uplo, int *n, float *a, int *info); +void BLAS_FUNC(spftrs)(char *transr, char *uplo, int *n, int *nrhs, float *a, float *b, int *ldb, int *info); +void BLAS_FUNC(spocon)(char *uplo, int *n, float *a, int *lda, float *anorm, float *rcond, float *work, int *iwork, int *info); +void BLAS_FUNC(spoequ)(int *n, float *a, int *lda, float *s, float *scond, float *amax, int *info); +void BLAS_FUNC(spoequb)(int *n, float *a, int *lda, float *s, float *scond, float *amax, int *info); +void BLAS_FUNC(sporfs)(char *uplo, int *n, int *nrhs, float *a, int *lda, float *af, int *ldaf, float *b, int *ldb, float *x, int *ldx, float *ferr, float *berr, float *work, int *iwork, int *info); +void BLAS_FUNC(sposv)(char *uplo, int *n, int *nrhs, float *a, int *lda, float *b, int *ldb, int *info); +void BLAS_FUNC(sposvx)(char *fact, char *uplo, int *n, int *nrhs, float *a, int *lda, float *af, int *ldaf, char *equed, float *s, float *b, int *ldb, float *x, int *ldx, float *rcond, float *ferr, float *berr, float *work, int *iwork, int *info); +void BLAS_FUNC(spotf2)(char *uplo, int *n, float *a, int *lda, int *info); +void BLAS_FUNC(spotrf)(char *uplo, int *n, float *a, int *lda, int *info); +void BLAS_FUNC(spotri)(char *uplo, int *n, float *a, int *lda, int *info); +void BLAS_FUNC(spotrs)(char *uplo, int *n, int *nrhs, float *a, int *lda, float *b, int *ldb, int *info); +void BLAS_FUNC(sppcon)(char *uplo, int *n, float *ap, float *anorm, float *rcond, float *work, int *iwork, int *info); +void BLAS_FUNC(sppequ)(char *uplo, int *n, float *ap, float *s, float *scond, float *amax, int *info); +void BLAS_FUNC(spprfs)(char *uplo, int *n, int *nrhs, float *ap, float *afp, float *b, int *ldb, float *x, int *ldx, float *ferr, float *berr, float *work, int *iwork, int *info); +void BLAS_FUNC(sppsv)(char *uplo, int *n, int *nrhs, float *ap, float *b, int *ldb, int *info); +void BLAS_FUNC(sppsvx)(char *fact, char *uplo, int *n, int *nrhs, float *ap, float *afp, char *equed, float *s, float *b, int *ldb, float *x, int *ldx, float *rcond, float *ferr, float *berr, float *work, int *iwork, int *info); +void BLAS_FUNC(spptrf)(char *uplo, int *n, float *ap, int *info); +void BLAS_FUNC(spptri)(char *uplo, int *n, float *ap, int *info); +void BLAS_FUNC(spptrs)(char *uplo, int *n, int *nrhs, float *ap, float *b, int *ldb, int *info); +void BLAS_FUNC(spstf2)(char *uplo, int *n, float *a, int *lda, int *piv, int *rank, float *tol, float *work, int *info); +void BLAS_FUNC(spstrf)(char *uplo, int *n, float *a, int *lda, int *piv, int *rank, float *tol, float *work, int *info); +void BLAS_FUNC(sptcon)(int *n, float *d, float *e, float *anorm, float *rcond, float *work, int *info); +void BLAS_FUNC(spteqr)(char *compz, int *n, float *d, float *e, float *z, int *ldz, float *work, int *info); +void BLAS_FUNC(sptrfs)(int *n, int *nrhs, float *d, float *e, float *df, float *ef, float *b, int *ldb, float *x, int *ldx, float *ferr, float *berr, float *work, int *info); +void BLAS_FUNC(sptsv)(int *n, int *nrhs, float *d, float *e, float *b, int *ldb, int *info); +void BLAS_FUNC(sptsvx)(char *fact, int *n, int *nrhs, float *d, float *e, float *df, float *ef, float *b, int *ldb, float *x, int *ldx, float *rcond, float *ferr, float *berr, float *work, int *info); +void BLAS_FUNC(spttrf)(int *n, float *d, float *e, int *info); +void BLAS_FUNC(spttrs)(int *n, int *nrhs, float *d, float *e, float *b, int *ldb, int *info); +void BLAS_FUNC(sptts2)(int *n, int *nrhs, float *d, float *e, float *b, int *ldb); +void BLAS_FUNC(srscl)(int *n, float *sa, float *sx, int *incx); +void BLAS_FUNC(ssbev)(char *jobz, char *uplo, int *n, int *kd, float *ab, int *ldab, float *w, float *z, int *ldz, float *work, int *info); +void BLAS_FUNC(ssbevd)(char *jobz, char *uplo, int *n, int *kd, float *ab, int *ldab, float *w, float *z, int *ldz, float *work, int *lwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(ssbevx)(char *jobz, char *range, char *uplo, int *n, int *kd, float *ab, int *ldab, float *q, int *ldq, float *vl, float *vu, int *il, int *iu, float *abstol, int *m, float *w, float *z, int *ldz, float *work, int *iwork, int *ifail, int *info); +void BLAS_FUNC(ssbgst)(char *vect, char *uplo, int *n, int *ka, int *kb, float *ab, int *ldab, float *bb, int *ldbb, float *x, int *ldx, float *work, int *info); +void BLAS_FUNC(ssbgv)(char *jobz, char *uplo, int *n, int *ka, int *kb, float *ab, int *ldab, float *bb, int *ldbb, float *w, float *z, int *ldz, float *work, int *info); +void BLAS_FUNC(ssbgvd)(char *jobz, char *uplo, int *n, int *ka, int *kb, float *ab, int *ldab, float *bb, int *ldbb, float *w, float *z, int *ldz, float *work, int *lwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(ssbgvx)(char *jobz, char *range, char *uplo, int *n, int *ka, int *kb, float *ab, int *ldab, float *bb, int *ldbb, float *q, int *ldq, float *vl, float *vu, int *il, int *iu, float *abstol, int *m, float *w, float *z, int *ldz, float *work, int *iwork, int *ifail, int *info); +void BLAS_FUNC(ssbtrd)(char *vect, char *uplo, int *n, int *kd, float *ab, int *ldab, float *d, float *e, float *q, int *ldq, float *work, int *info); +void BLAS_FUNC(ssfrk)(char *transr, char *uplo, char *trans, int *n, int *k, float *alpha, float *a, int *lda, float *beta, float *c); +void BLAS_FUNC(sspcon)(char *uplo, int *n, float *ap, int *ipiv, float *anorm, float *rcond, float *work, int *iwork, int *info); +void BLAS_FUNC(sspev)(char *jobz, char *uplo, int *n, float *ap, float *w, float *z, int *ldz, float *work, int *info); +void BLAS_FUNC(sspevd)(char *jobz, char *uplo, int *n, float *ap, float *w, float *z, int *ldz, float *work, int *lwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(sspevx)(char *jobz, char *range, char *uplo, int *n, float *ap, float *vl, float *vu, int *il, int *iu, float *abstol, int *m, float *w, float *z, int *ldz, float *work, int *iwork, int *ifail, int *info); +void BLAS_FUNC(sspgst)(int *itype, char *uplo, int *n, float *ap, float *bp, int *info); +void BLAS_FUNC(sspgv)(int *itype, char *jobz, char *uplo, int *n, float *ap, float *bp, float *w, float *z, int *ldz, float *work, int *info); +void BLAS_FUNC(sspgvd)(int *itype, char *jobz, char *uplo, int *n, float *ap, float *bp, float *w, float *z, int *ldz, float *work, int *lwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(sspgvx)(int *itype, char *jobz, char *range, char *uplo, int *n, float *ap, float *bp, float *vl, float *vu, int *il, int *iu, float *abstol, int *m, float *w, float *z, int *ldz, float *work, int *iwork, int *ifail, int *info); +void BLAS_FUNC(ssprfs)(char *uplo, int *n, int *nrhs, float *ap, float *afp, int *ipiv, float *b, int *ldb, float *x, int *ldx, float *ferr, float *berr, float *work, int *iwork, int *info); +void BLAS_FUNC(sspsv)(char *uplo, int *n, int *nrhs, float *ap, int *ipiv, float *b, int *ldb, int *info); +void BLAS_FUNC(sspsvx)(char *fact, char *uplo, int *n, int *nrhs, float *ap, float *afp, int *ipiv, float *b, int *ldb, float *x, int *ldx, float *rcond, float *ferr, float *berr, float *work, int *iwork, int *info); +void BLAS_FUNC(ssptrd)(char *uplo, int *n, float *ap, float *d, float *e, float *tau, int *info); +void BLAS_FUNC(ssptrf)(char *uplo, int *n, float *ap, int *ipiv, int *info); +void BLAS_FUNC(ssptri)(char *uplo, int *n, float *ap, int *ipiv, float *work, int *info); +void BLAS_FUNC(ssptrs)(char *uplo, int *n, int *nrhs, float *ap, int *ipiv, float *b, int *ldb, int *info); +void BLAS_FUNC(sstebz)(char *range, char *order, int *n, float *vl, float *vu, int *il, int *iu, float *abstol, float *d, float *e, int *m, int *nsplit, float *w, int *iblock, int *isplit, float *work, int *iwork, int *info); +void BLAS_FUNC(sstedc)(char *compz, int *n, float *d, float *e, float *z, int *ldz, float *work, int *lwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(sstegr)(char *jobz, char *range, int *n, float *d, float *e, float *vl, float *vu, int *il, int *iu, float *abstol, int *m, float *w, float *z, int *ldz, int *isuppz, float *work, int *lwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(sstein)(int *n, float *d, float *e, int *m, float *w, int *iblock, int *isplit, float *z, int *ldz, float *work, int *iwork, int *ifail, int *info); +void BLAS_FUNC(sstemr)(char *jobz, char *range, int *n, float *d, float *e, float *vl, float *vu, int *il, int *iu, int *m, float *w, float *z, int *ldz, int *nzc, int *isuppz, int *tryrac, float *work, int *lwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(ssteqr)(char *compz, int *n, float *d, float *e, float *z, int *ldz, float *work, int *info); +void BLAS_FUNC(ssterf)(int *n, float *d, float *e, int *info); +void BLAS_FUNC(sstev)(char *jobz, int *n, float *d, float *e, float *z, int *ldz, float *work, int *info); +void BLAS_FUNC(sstevd)(char *jobz, int *n, float *d, float *e, float *z, int *ldz, float *work, int *lwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(sstevr)(char *jobz, char *range, int *n, float *d, float *e, float *vl, float *vu, int *il, int *iu, float *abstol, int *m, float *w, float *z, int *ldz, int *isuppz, float *work, int *lwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(sstevx)(char *jobz, char *range, int *n, float *d, float *e, float *vl, float *vu, int *il, int *iu, float *abstol, int *m, float *w, float *z, int *ldz, float *work, int *iwork, int *ifail, int *info); +void BLAS_FUNC(ssycon)(char *uplo, int *n, float *a, int *lda, int *ipiv, float *anorm, float *rcond, float *work, int *iwork, int *info); +void BLAS_FUNC(ssyconv)(char *uplo, char *way, int *n, float *a, int *lda, int *ipiv, float *work, int *info); +void BLAS_FUNC(ssyequb)(char *uplo, int *n, float *a, int *lda, float *s, float *scond, float *amax, float *work, int *info); +void BLAS_FUNC(ssyev)(char *jobz, char *uplo, int *n, float *a, int *lda, float *w, float *work, int *lwork, int *info); +void BLAS_FUNC(ssyevd)(char *jobz, char *uplo, int *n, float *a, int *lda, float *w, float *work, int *lwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(ssyevr)(char *jobz, char *range, char *uplo, int *n, float *a, int *lda, float *vl, float *vu, int *il, int *iu, float *abstol, int *m, float *w, float *z, int *ldz, int *isuppz, float *work, int *lwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(ssyevx)(char *jobz, char *range, char *uplo, int *n, float *a, int *lda, float *vl, float *vu, int *il, int *iu, float *abstol, int *m, float *w, float *z, int *ldz, float *work, int *lwork, int *iwork, int *ifail, int *info); +void BLAS_FUNC(ssygs2)(int *itype, char *uplo, int *n, float *a, int *lda, float *b, int *ldb, int *info); +void BLAS_FUNC(ssygst)(int *itype, char *uplo, int *n, float *a, int *lda, float *b, int *ldb, int *info); +void BLAS_FUNC(ssygv)(int *itype, char *jobz, char *uplo, int *n, float *a, int *lda, float *b, int *ldb, float *w, float *work, int *lwork, int *info); +void BLAS_FUNC(ssygvd)(int *itype, char *jobz, char *uplo, int *n, float *a, int *lda, float *b, int *ldb, float *w, float *work, int *lwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(ssygvx)(int *itype, char *jobz, char *range, char *uplo, int *n, float *a, int *lda, float *b, int *ldb, float *vl, float *vu, int *il, int *iu, float *abstol, int *m, float *w, float *z, int *ldz, float *work, int *lwork, int *iwork, int *ifail, int *info); +void BLAS_FUNC(ssyrfs)(char *uplo, int *n, int *nrhs, float *a, int *lda, float *af, int *ldaf, int *ipiv, float *b, int *ldb, float *x, int *ldx, float *ferr, float *berr, float *work, int *iwork, int *info); +void BLAS_FUNC(ssysv)(char *uplo, int *n, int *nrhs, float *a, int *lda, int *ipiv, float *b, int *ldb, float *work, int *lwork, int *info); +void BLAS_FUNC(ssysvx)(char *fact, char *uplo, int *n, int *nrhs, float *a, int *lda, float *af, int *ldaf, int *ipiv, float *b, int *ldb, float *x, int *ldx, float *rcond, float *ferr, float *berr, float *work, int *lwork, int *iwork, int *info); +void BLAS_FUNC(ssyswapr)(char *uplo, int *n, float *a, int *lda, int *i1, int *i2); +void BLAS_FUNC(ssytd2)(char *uplo, int *n, float *a, int *lda, float *d, float *e, float *tau, int *info); +void BLAS_FUNC(ssytf2)(char *uplo, int *n, float *a, int *lda, int *ipiv, int *info); +void BLAS_FUNC(ssytrd)(char *uplo, int *n, float *a, int *lda, float *d, float *e, float *tau, float *work, int *lwork, int *info); +void BLAS_FUNC(ssytrf)(char *uplo, int *n, float *a, int *lda, int *ipiv, float *work, int *lwork, int *info); +void BLAS_FUNC(ssytri)(char *uplo, int *n, float *a, int *lda, int *ipiv, float *work, int *info); +void BLAS_FUNC(ssytri2)(char *uplo, int *n, float *a, int *lda, int *ipiv, float *work, int *lwork, int *info); +void BLAS_FUNC(ssytri2x)(char *uplo, int *n, float *a, int *lda, int *ipiv, float *work, int *nb, int *info); +void BLAS_FUNC(ssytrs)(char *uplo, int *n, int *nrhs, float *a, int *lda, int *ipiv, float *b, int *ldb, int *info); +void BLAS_FUNC(ssytrs2)(char *uplo, int *n, int *nrhs, float *a, int *lda, int *ipiv, float *b, int *ldb, float *work, int *info); +void BLAS_FUNC(stbcon)(char *norm, char *uplo, char *diag, int *n, int *kd, float *ab, int *ldab, float *rcond, float *work, int *iwork, int *info); +void BLAS_FUNC(stbrfs)(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, float *ab, int *ldab, float *b, int *ldb, float *x, int *ldx, float *ferr, float *berr, float *work, int *iwork, int *info); +void BLAS_FUNC(stbtrs)(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, float *ab, int *ldab, float *b, int *ldb, int *info); +void BLAS_FUNC(stfsm)(char *transr, char *side, char *uplo, char *trans, char *diag, int *m, int *n, float *alpha, float *a, float *b, int *ldb); +void BLAS_FUNC(stftri)(char *transr, char *uplo, char *diag, int *n, float *a, int *info); +void BLAS_FUNC(stfttp)(char *transr, char *uplo, int *n, float *arf, float *ap, int *info); +void BLAS_FUNC(stfttr)(char *transr, char *uplo, int *n, float *arf, float *a, int *lda, int *info); +void BLAS_FUNC(stgevc)(char *side, char *howmny, int *select, int *n, float *s, int *lds, float *p, int *ldp, float *vl, int *ldvl, float *vr, int *ldvr, int *mm, int *m, float *work, int *info); +void BLAS_FUNC(stgex2)(int *wantq, int *wantz, int *n, float *a, int *lda, float *b, int *ldb, float *q, int *ldq, float *z, int *ldz, int *j1, int *n1, int *n2, float *work, int *lwork, int *info); +void BLAS_FUNC(stgexc)(int *wantq, int *wantz, int *n, float *a, int *lda, float *b, int *ldb, float *q, int *ldq, float *z, int *ldz, int *ifst, int *ilst, float *work, int *lwork, int *info); +void BLAS_FUNC(stgsen)(int *ijob, int *wantq, int *wantz, int *select, int *n, float *a, int *lda, float *b, int *ldb, float *alphar, float *alphai, float *beta, float *q, int *ldq, float *z, int *ldz, int *m, float *pl, float *pr, float *dif, float *work, int *lwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(stgsja)(char *jobu, char *jobv, char *jobq, int *m, int *p, int *n, int *k, int *l, float *a, int *lda, float *b, int *ldb, float *tola, float *tolb, float *alpha, float *beta, float *u, int *ldu, float *v, int *ldv, float *q, int *ldq, float *work, int *ncycle, int *info); +void BLAS_FUNC(stgsna)(char *job, char *howmny, int *select, int *n, float *a, int *lda, float *b, int *ldb, float *vl, int *ldvl, float *vr, int *ldvr, float *s, float *dif, int *mm, int *m, float *work, int *lwork, int *iwork, int *info); +void BLAS_FUNC(stgsy2)(char *trans, int *ijob, int *m, int *n, float *a, int *lda, float *b, int *ldb, float *c, int *ldc, float *d, int *ldd, float *e, int *lde, float *f, int *ldf, float *scale, float *rdsum, float *rdscal, int *iwork, int *pq, int *info); +void BLAS_FUNC(stgsyl)(char *trans, int *ijob, int *m, int *n, float *a, int *lda, float *b, int *ldb, float *c, int *ldc, float *d, int *ldd, float *e, int *lde, float *f, int *ldf, float *scale, float *dif, float *work, int *lwork, int *iwork, int *info); +void BLAS_FUNC(stpcon)(char *norm, char *uplo, char *diag, int *n, float *ap, float *rcond, float *work, int *iwork, int *info); +void BLAS_FUNC(stpmqrt)(char *side, char *trans, int *m, int *n, int *k, int *l, int *nb, float *v, int *ldv, float *t, int *ldt, float *a, int *lda, float *b, int *ldb, float *work, int *info); +void BLAS_FUNC(stpqrt)(int *m, int *n, int *l, int *nb, float *a, int *lda, float *b, int *ldb, float *t, int *ldt, float *work, int *info); +void BLAS_FUNC(stpqrt2)(int *m, int *n, int *l, float *a, int *lda, float *b, int *ldb, float *t, int *ldt, int *info); +void BLAS_FUNC(stprfb)(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, float *v, int *ldv, float *t, int *ldt, float *a, int *lda, float *b, int *ldb, float *work, int *ldwork); +void BLAS_FUNC(stprfs)(char *uplo, char *trans, char *diag, int *n, int *nrhs, float *ap, float *b, int *ldb, float *x, int *ldx, float *ferr, float *berr, float *work, int *iwork, int *info); +void BLAS_FUNC(stptri)(char *uplo, char *diag, int *n, float *ap, int *info); +void BLAS_FUNC(stptrs)(char *uplo, char *trans, char *diag, int *n, int *nrhs, float *ap, float *b, int *ldb, int *info); +void BLAS_FUNC(stpttf)(char *transr, char *uplo, int *n, float *ap, float *arf, int *info); +void BLAS_FUNC(stpttr)(char *uplo, int *n, float *ap, float *a, int *lda, int *info); +void BLAS_FUNC(strcon)(char *norm, char *uplo, char *diag, int *n, float *a, int *lda, float *rcond, float *work, int *iwork, int *info); +void BLAS_FUNC(strevc)(char *side, char *howmny, int *select, int *n, float *t, int *ldt, float *vl, int *ldvl, float *vr, int *ldvr, int *mm, int *m, float *work, int *info); +void BLAS_FUNC(strexc)(char *compq, int *n, float *t, int *ldt, float *q, int *ldq, int *ifst, int *ilst, float *work, int *info); +void BLAS_FUNC(strrfs)(char *uplo, char *trans, char *diag, int *n, int *nrhs, float *a, int *lda, float *b, int *ldb, float *x, int *ldx, float *ferr, float *berr, float *work, int *iwork, int *info); +void BLAS_FUNC(strsen)(char *job, char *compq, int *select, int *n, float *t, int *ldt, float *q, int *ldq, float *wr, float *wi, int *m, float *s, float *sep, float *work, int *lwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(strsna)(char *job, char *howmny, int *select, int *n, float *t, int *ldt, float *vl, int *ldvl, float *vr, int *ldvr, float *s, float *sep, int *mm, int *m, float *work, int *ldwork, int *iwork, int *info); +void BLAS_FUNC(strsyl)(char *trana, char *tranb, int *isgn, int *m, int *n, float *a, int *lda, float *b, int *ldb, float *c, int *ldc, float *scale, int *info); +void BLAS_FUNC(strti2)(char *uplo, char *diag, int *n, float *a, int *lda, int *info); +void BLAS_FUNC(strtri)(char *uplo, char *diag, int *n, float *a, int *lda, int *info); +void BLAS_FUNC(strtrs)(char *uplo, char *trans, char *diag, int *n, int *nrhs, float *a, int *lda, float *b, int *ldb, int *info); +void BLAS_FUNC(strttf)(char *transr, char *uplo, int *n, float *a, int *lda, float *arf, int *info); +void BLAS_FUNC(strttp)(char *uplo, int *n, float *a, int *lda, float *ap, int *info); +void BLAS_FUNC(stzrzf)(int *m, int *n, float *a, int *lda, float *tau, float *work, int *lwork, int *info); +void BLAS_FUNC(xerbla_array)(char *srname_array, int *srname_len, int *info); +void BLAS_FUNC(zbbcsd)(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, int *m, int *p, int *q, double *theta, double *phi, npy_complex128 *u1, int *ldu1, npy_complex128 *u2, int *ldu2, npy_complex128 *v1t, int *ldv1t, npy_complex128 *v2t, int *ldv2t, double *b11d, double *b11e, double *b12d, double *b12e, double *b21d, double *b21e, double *b22d, double *b22e, double *rwork, int *lrwork, int *info); +void BLAS_FUNC(zbdsqr)(char *uplo, int *n, int *ncvt, int *nru, int *ncc, double *d, double *e, npy_complex128 *vt, int *ldvt, npy_complex128 *u, int *ldu, npy_complex128 *c, int *ldc, double *rwork, int *info); +void BLAS_FUNC(zcgesv)(int *n, int *nrhs, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, npy_complex128 *work, npy_complex64 *swork, double *rwork, int *iter, int *info); +void BLAS_FUNC(zcposv)(char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, npy_complex128 *work, npy_complex64 *swork, double *rwork, int *iter, int *info); +void BLAS_FUNC(zdrscl)(int *n, double *sa, npy_complex128 *sx, int *incx); +void BLAS_FUNC(zgbbrd)(char *vect, int *m, int *n, int *ncc, int *kl, int *ku, npy_complex128 *ab, int *ldab, double *d, double *e, npy_complex128 *q, int *ldq, npy_complex128 *pt, int *ldpt, npy_complex128 *c, int *ldc, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(zgbcon)(char *norm, int *n, int *kl, int *ku, npy_complex128 *ab, int *ldab, int *ipiv, double *anorm, double *rcond, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(zgbequ)(int *m, int *n, int *kl, int *ku, npy_complex128 *ab, int *ldab, double *r, double *c, double *rowcnd, double *colcnd, double *amax, int *info); +void BLAS_FUNC(zgbequb)(int *m, int *n, int *kl, int *ku, npy_complex128 *ab, int *ldab, double *r, double *c, double *rowcnd, double *colcnd, double *amax, int *info); +void BLAS_FUNC(zgbrfs)(char *trans, int *n, int *kl, int *ku, int *nrhs, npy_complex128 *ab, int *ldab, npy_complex128 *afb, int *ldafb, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(zgbsv)(int *n, int *kl, int *ku, int *nrhs, npy_complex128 *ab, int *ldab, int *ipiv, npy_complex128 *b, int *ldb, int *info); +void BLAS_FUNC(zgbsvx)(char *fact, char *trans, int *n, int *kl, int *ku, int *nrhs, npy_complex128 *ab, int *ldab, npy_complex128 *afb, int *ldafb, int *ipiv, char *equed, double *r, double *c, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *rcond, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(zgbtf2)(int *m, int *n, int *kl, int *ku, npy_complex128 *ab, int *ldab, int *ipiv, int *info); +void BLAS_FUNC(zgbtrf)(int *m, int *n, int *kl, int *ku, npy_complex128 *ab, int *ldab, int *ipiv, int *info); +void BLAS_FUNC(zgbtrs)(char *trans, int *n, int *kl, int *ku, int *nrhs, npy_complex128 *ab, int *ldab, int *ipiv, npy_complex128 *b, int *ldb, int *info); +void BLAS_FUNC(zgebak)(char *job, char *side, int *n, int *ilo, int *ihi, double *scale, int *m, npy_complex128 *v, int *ldv, int *info); +void BLAS_FUNC(zgebal)(char *job, int *n, npy_complex128 *a, int *lda, int *ilo, int *ihi, double *scale, int *info); +void BLAS_FUNC(zgebd2)(int *m, int *n, npy_complex128 *a, int *lda, double *d, double *e, npy_complex128 *tauq, npy_complex128 *taup, npy_complex128 *work, int *info); +void BLAS_FUNC(zgebrd)(int *m, int *n, npy_complex128 *a, int *lda, double *d, double *e, npy_complex128 *tauq, npy_complex128 *taup, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zgecon)(char *norm, int *n, npy_complex128 *a, int *lda, double *anorm, double *rcond, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(zgeequ)(int *m, int *n, npy_complex128 *a, int *lda, double *r, double *c, double *rowcnd, double *colcnd, double *amax, int *info); +void BLAS_FUNC(zgeequb)(int *m, int *n, npy_complex128 *a, int *lda, double *r, double *c, double *rowcnd, double *colcnd, double *amax, int *info); +void BLAS_FUNC(zgees)(char *jobvs, char *sort, _zselect1 *select, int *n, npy_complex128 *a, int *lda, int *sdim, npy_complex128 *w, npy_complex128 *vs, int *ldvs, npy_complex128 *work, int *lwork, double *rwork, int *bwork, int *info); +void BLAS_FUNC(zgeesx)(char *jobvs, char *sort, _zselect1 *select, char *sense, int *n, npy_complex128 *a, int *lda, int *sdim, npy_complex128 *w, npy_complex128 *vs, int *ldvs, double *rconde, double *rcondv, npy_complex128 *work, int *lwork, double *rwork, int *bwork, int *info); +void BLAS_FUNC(zgeev)(char *jobvl, char *jobvr, int *n, npy_complex128 *a, int *lda, npy_complex128 *w, npy_complex128 *vl, int *ldvl, npy_complex128 *vr, int *ldvr, npy_complex128 *work, int *lwork, double *rwork, int *info); +void BLAS_FUNC(zgeevx)(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, npy_complex128 *a, int *lda, npy_complex128 *w, npy_complex128 *vl, int *ldvl, npy_complex128 *vr, int *ldvr, int *ilo, int *ihi, double *scale, double *abnrm, double *rconde, double *rcondv, npy_complex128 *work, int *lwork, double *rwork, int *info); +void BLAS_FUNC(zgehd2)(int *n, int *ilo, int *ihi, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *info); +void BLAS_FUNC(zgehrd)(int *n, int *ilo, int *ihi, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zgelq2)(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *info); +void BLAS_FUNC(zgelqf)(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zgels)(char *trans, int *m, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zgelsd)(int *m, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, double *s, double *rcond, int *rank, npy_complex128 *work, int *lwork, double *rwork, int *iwork, int *info); +void BLAS_FUNC(zgelss)(int *m, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, double *s, double *rcond, int *rank, npy_complex128 *work, int *lwork, double *rwork, int *info); +void BLAS_FUNC(zgelsy)(int *m, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, int *jpvt, double *rcond, int *rank, npy_complex128 *work, int *lwork, double *rwork, int *info); +void BLAS_FUNC(zgemqrt)(char *side, char *trans, int *m, int *n, int *k, int *nb, npy_complex128 *v, int *ldv, npy_complex128 *t, int *ldt, npy_complex128 *c, int *ldc, npy_complex128 *work, int *info); +void BLAS_FUNC(zgeql2)(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *info); +void BLAS_FUNC(zgeqlf)(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zgeqp3)(int *m, int *n, npy_complex128 *a, int *lda, int *jpvt, npy_complex128 *tau, npy_complex128 *work, int *lwork, double *rwork, int *info); +void BLAS_FUNC(zgeqr2)(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *info); +void BLAS_FUNC(zgeqr2p)(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *info); +void BLAS_FUNC(zgeqrf)(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zgeqrfp)(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zgeqrt)(int *m, int *n, int *nb, npy_complex128 *a, int *lda, npy_complex128 *t, int *ldt, npy_complex128 *work, int *info); +void BLAS_FUNC(zgeqrt2)(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *t, int *ldt, int *info); +void BLAS_FUNC(zgeqrt3)(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *t, int *ldt, int *info); +void BLAS_FUNC(zgerfs)(char *trans, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *af, int *ldaf, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(zgerq2)(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *info); +void BLAS_FUNC(zgerqf)(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zgesc2)(int *n, npy_complex128 *a, int *lda, npy_complex128 *rhs, int *ipiv, int *jpiv, double *scale); +void BLAS_FUNC(zgesdd)(char *jobz, int *m, int *n, npy_complex128 *a, int *lda, double *s, npy_complex128 *u, int *ldu, npy_complex128 *vt, int *ldvt, npy_complex128 *work, int *lwork, double *rwork, int *iwork, int *info); +void BLAS_FUNC(zgesv)(int *n, int *nrhs, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *b, int *ldb, int *info); +void BLAS_FUNC(zgesvd)(char *jobu, char *jobvt, int *m, int *n, npy_complex128 *a, int *lda, double *s, npy_complex128 *u, int *ldu, npy_complex128 *vt, int *ldvt, npy_complex128 *work, int *lwork, double *rwork, int *info); +void BLAS_FUNC(zgesvx)(char *fact, char *trans, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *af, int *ldaf, int *ipiv, char *equed, double *r, double *c, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *rcond, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(zgetc2)(int *n, npy_complex128 *a, int *lda, int *ipiv, int *jpiv, int *info); +void BLAS_FUNC(zgetf2)(int *m, int *n, npy_complex128 *a, int *lda, int *ipiv, int *info); +void BLAS_FUNC(zgetrf)(int *m, int *n, npy_complex128 *a, int *lda, int *ipiv, int *info); +void BLAS_FUNC(zgetri)(int *n, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zgetrs)(char *trans, int *n, int *nrhs, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *b, int *ldb, int *info); +void BLAS_FUNC(zggbak)(char *job, char *side, int *n, int *ilo, int *ihi, double *lscale, double *rscale, int *m, npy_complex128 *v, int *ldv, int *info); +void BLAS_FUNC(zggbal)(char *job, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, int *ilo, int *ihi, double *lscale, double *rscale, double *work, int *info); +void BLAS_FUNC(zgges)(char *jobvsl, char *jobvsr, char *sort, _zselect2 *selctg, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, int *sdim, npy_complex128 *alpha, npy_complex128 *beta, npy_complex128 *vsl, int *ldvsl, npy_complex128 *vsr, int *ldvsr, npy_complex128 *work, int *lwork, double *rwork, int *bwork, int *info); +void BLAS_FUNC(zggesx)(char *jobvsl, char *jobvsr, char *sort, _zselect2 *selctg, char *sense, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, int *sdim, npy_complex128 *alpha, npy_complex128 *beta, npy_complex128 *vsl, int *ldvsl, npy_complex128 *vsr, int *ldvsr, double *rconde, double *rcondv, npy_complex128 *work, int *lwork, double *rwork, int *iwork, int *liwork, int *bwork, int *info); +void BLAS_FUNC(zggev)(char *jobvl, char *jobvr, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *alpha, npy_complex128 *beta, npy_complex128 *vl, int *ldvl, npy_complex128 *vr, int *ldvr, npy_complex128 *work, int *lwork, double *rwork, int *info); +void BLAS_FUNC(zggevx)(char *balanc, char *jobvl, char *jobvr, char *sense, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *alpha, npy_complex128 *beta, npy_complex128 *vl, int *ldvl, npy_complex128 *vr, int *ldvr, int *ilo, int *ihi, double *lscale, double *rscale, double *abnrm, double *bbnrm, double *rconde, double *rcondv, npy_complex128 *work, int *lwork, double *rwork, int *iwork, int *bwork, int *info); +void BLAS_FUNC(zggglm)(int *n, int *m, int *p, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *d, npy_complex128 *x, npy_complex128 *y, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zgghrd)(char *compq, char *compz, int *n, int *ilo, int *ihi, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *q, int *ldq, npy_complex128 *z, int *ldz, int *info); +void BLAS_FUNC(zgglse)(int *m, int *n, int *p, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *c, npy_complex128 *d, npy_complex128 *x, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zggqrf)(int *n, int *m, int *p, npy_complex128 *a, int *lda, npy_complex128 *taua, npy_complex128 *b, int *ldb, npy_complex128 *taub, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zggrqf)(int *m, int *p, int *n, npy_complex128 *a, int *lda, npy_complex128 *taua, npy_complex128 *b, int *ldb, npy_complex128 *taub, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zgtcon)(char *norm, int *n, npy_complex128 *dl, npy_complex128 *d, npy_complex128 *du, npy_complex128 *du2, int *ipiv, double *anorm, double *rcond, npy_complex128 *work, int *info); +void BLAS_FUNC(zgtrfs)(char *trans, int *n, int *nrhs, npy_complex128 *dl, npy_complex128 *d, npy_complex128 *du, npy_complex128 *dlf, npy_complex128 *df, npy_complex128 *duf, npy_complex128 *du2, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(zgtsv)(int *n, int *nrhs, npy_complex128 *dl, npy_complex128 *d, npy_complex128 *du, npy_complex128 *b, int *ldb, int *info); +void BLAS_FUNC(zgtsvx)(char *fact, char *trans, int *n, int *nrhs, npy_complex128 *dl, npy_complex128 *d, npy_complex128 *du, npy_complex128 *dlf, npy_complex128 *df, npy_complex128 *duf, npy_complex128 *du2, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *rcond, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(zgttrf)(int *n, npy_complex128 *dl, npy_complex128 *d, npy_complex128 *du, npy_complex128 *du2, int *ipiv, int *info); +void BLAS_FUNC(zgttrs)(char *trans, int *n, int *nrhs, npy_complex128 *dl, npy_complex128 *d, npy_complex128 *du, npy_complex128 *du2, int *ipiv, npy_complex128 *b, int *ldb, int *info); +void BLAS_FUNC(zgtts2)(int *itrans, int *n, int *nrhs, npy_complex128 *dl, npy_complex128 *d, npy_complex128 *du, npy_complex128 *du2, int *ipiv, npy_complex128 *b, int *ldb); +void BLAS_FUNC(zhbev)(char *jobz, char *uplo, int *n, int *kd, npy_complex128 *ab, int *ldab, double *w, npy_complex128 *z, int *ldz, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(zhbevd)(char *jobz, char *uplo, int *n, int *kd, npy_complex128 *ab, int *ldab, double *w, npy_complex128 *z, int *ldz, npy_complex128 *work, int *lwork, double *rwork, int *lrwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(zhbevx)(char *jobz, char *range, char *uplo, int *n, int *kd, npy_complex128 *ab, int *ldab, npy_complex128 *q, int *ldq, double *vl, double *vu, int *il, int *iu, double *abstol, int *m, double *w, npy_complex128 *z, int *ldz, npy_complex128 *work, double *rwork, int *iwork, int *ifail, int *info); +void BLAS_FUNC(zhbgst)(char *vect, char *uplo, int *n, int *ka, int *kb, npy_complex128 *ab, int *ldab, npy_complex128 *bb, int *ldbb, npy_complex128 *x, int *ldx, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(zhbgv)(char *jobz, char *uplo, int *n, int *ka, int *kb, npy_complex128 *ab, int *ldab, npy_complex128 *bb, int *ldbb, double *w, npy_complex128 *z, int *ldz, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(zhbgvd)(char *jobz, char *uplo, int *n, int *ka, int *kb, npy_complex128 *ab, int *ldab, npy_complex128 *bb, int *ldbb, double *w, npy_complex128 *z, int *ldz, npy_complex128 *work, int *lwork, double *rwork, int *lrwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(zhbgvx)(char *jobz, char *range, char *uplo, int *n, int *ka, int *kb, npy_complex128 *ab, int *ldab, npy_complex128 *bb, int *ldbb, npy_complex128 *q, int *ldq, double *vl, double *vu, int *il, int *iu, double *abstol, int *m, double *w, npy_complex128 *z, int *ldz, npy_complex128 *work, double *rwork, int *iwork, int *ifail, int *info); +void BLAS_FUNC(zhbtrd)(char *vect, char *uplo, int *n, int *kd, npy_complex128 *ab, int *ldab, double *d, double *e, npy_complex128 *q, int *ldq, npy_complex128 *work, int *info); +void BLAS_FUNC(zhecon)(char *uplo, int *n, npy_complex128 *a, int *lda, int *ipiv, double *anorm, double *rcond, npy_complex128 *work, int *info); +void BLAS_FUNC(zheequb)(char *uplo, int *n, npy_complex128 *a, int *lda, double *s, double *scond, double *amax, npy_complex128 *work, int *info); +void BLAS_FUNC(zheev)(char *jobz, char *uplo, int *n, npy_complex128 *a, int *lda, double *w, npy_complex128 *work, int *lwork, double *rwork, int *info); +void BLAS_FUNC(zheevd)(char *jobz, char *uplo, int *n, npy_complex128 *a, int *lda, double *w, npy_complex128 *work, int *lwork, double *rwork, int *lrwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(zheevr)(char *jobz, char *range, char *uplo, int *n, npy_complex128 *a, int *lda, double *vl, double *vu, int *il, int *iu, double *abstol, int *m, double *w, npy_complex128 *z, int *ldz, int *isuppz, npy_complex128 *work, int *lwork, double *rwork, int *lrwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(zheevx)(char *jobz, char *range, char *uplo, int *n, npy_complex128 *a, int *lda, double *vl, double *vu, int *il, int *iu, double *abstol, int *m, double *w, npy_complex128 *z, int *ldz, npy_complex128 *work, int *lwork, double *rwork, int *iwork, int *ifail, int *info); +void BLAS_FUNC(zhegs2)(int *itype, char *uplo, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, int *info); +void BLAS_FUNC(zhegst)(int *itype, char *uplo, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, int *info); +void BLAS_FUNC(zhegv)(int *itype, char *jobz, char *uplo, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, double *w, npy_complex128 *work, int *lwork, double *rwork, int *info); +void BLAS_FUNC(zhegvd)(int *itype, char *jobz, char *uplo, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, double *w, npy_complex128 *work, int *lwork, double *rwork, int *lrwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(zhegvx)(int *itype, char *jobz, char *range, char *uplo, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, double *vl, double *vu, int *il, int *iu, double *abstol, int *m, double *w, npy_complex128 *z, int *ldz, npy_complex128 *work, int *lwork, double *rwork, int *iwork, int *ifail, int *info); +void BLAS_FUNC(zherfs)(char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *af, int *ldaf, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(zhesv)(char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zhesvx)(char *fact, char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *af, int *ldaf, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *rcond, double *ferr, double *berr, npy_complex128 *work, int *lwork, double *rwork, int *info); +void BLAS_FUNC(zheswapr)(char *uplo, int *n, npy_complex128 *a, int *lda, int *i1, int *i2); +void BLAS_FUNC(zhetd2)(char *uplo, int *n, npy_complex128 *a, int *lda, double *d, double *e, npy_complex128 *tau, int *info); +void BLAS_FUNC(zhetf2)(char *uplo, int *n, npy_complex128 *a, int *lda, int *ipiv, int *info); +void BLAS_FUNC(zhetrd)(char *uplo, int *n, npy_complex128 *a, int *lda, double *d, double *e, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zhetrf)(char *uplo, int *n, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zhetri)(char *uplo, int *n, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *work, int *info); +void BLAS_FUNC(zhetri2)(char *uplo, int *n, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zhetri2x)(char *uplo, int *n, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *work, int *nb, int *info); +void BLAS_FUNC(zhetrs)(char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *b, int *ldb, int *info); +void BLAS_FUNC(zhetrs2)(char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *work, int *info); +void BLAS_FUNC(zhfrk)(char *transr, char *uplo, char *trans, int *n, int *k, double *alpha, npy_complex128 *a, int *lda, double *beta, npy_complex128 *c); +void BLAS_FUNC(zhgeqz)(char *job, char *compq, char *compz, int *n, int *ilo, int *ihi, npy_complex128 *h, int *ldh, npy_complex128 *t, int *ldt, npy_complex128 *alpha, npy_complex128 *beta, npy_complex128 *q, int *ldq, npy_complex128 *z, int *ldz, npy_complex128 *work, int *lwork, double *rwork, int *info); +void BLAS_FUNC(zhpcon)(char *uplo, int *n, npy_complex128 *ap, int *ipiv, double *anorm, double *rcond, npy_complex128 *work, int *info); +void BLAS_FUNC(zhpev)(char *jobz, char *uplo, int *n, npy_complex128 *ap, double *w, npy_complex128 *z, int *ldz, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(zhpevd)(char *jobz, char *uplo, int *n, npy_complex128 *ap, double *w, npy_complex128 *z, int *ldz, npy_complex128 *work, int *lwork, double *rwork, int *lrwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(zhpevx)(char *jobz, char *range, char *uplo, int *n, npy_complex128 *ap, double *vl, double *vu, int *il, int *iu, double *abstol, int *m, double *w, npy_complex128 *z, int *ldz, npy_complex128 *work, double *rwork, int *iwork, int *ifail, int *info); +void BLAS_FUNC(zhpgst)(int *itype, char *uplo, int *n, npy_complex128 *ap, npy_complex128 *bp, int *info); +void BLAS_FUNC(zhpgv)(int *itype, char *jobz, char *uplo, int *n, npy_complex128 *ap, npy_complex128 *bp, double *w, npy_complex128 *z, int *ldz, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(zhpgvd)(int *itype, char *jobz, char *uplo, int *n, npy_complex128 *ap, npy_complex128 *bp, double *w, npy_complex128 *z, int *ldz, npy_complex128 *work, int *lwork, double *rwork, int *lrwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(zhpgvx)(int *itype, char *jobz, char *range, char *uplo, int *n, npy_complex128 *ap, npy_complex128 *bp, double *vl, double *vu, int *il, int *iu, double *abstol, int *m, double *w, npy_complex128 *z, int *ldz, npy_complex128 *work, double *rwork, int *iwork, int *ifail, int *info); +void BLAS_FUNC(zhprfs)(char *uplo, int *n, int *nrhs, npy_complex128 *ap, npy_complex128 *afp, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(zhpsv)(char *uplo, int *n, int *nrhs, npy_complex128 *ap, int *ipiv, npy_complex128 *b, int *ldb, int *info); +void BLAS_FUNC(zhpsvx)(char *fact, char *uplo, int *n, int *nrhs, npy_complex128 *ap, npy_complex128 *afp, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *rcond, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(zhptrd)(char *uplo, int *n, npy_complex128 *ap, double *d, double *e, npy_complex128 *tau, int *info); +void BLAS_FUNC(zhptrf)(char *uplo, int *n, npy_complex128 *ap, int *ipiv, int *info); +void BLAS_FUNC(zhptri)(char *uplo, int *n, npy_complex128 *ap, int *ipiv, npy_complex128 *work, int *info); +void BLAS_FUNC(zhptrs)(char *uplo, int *n, int *nrhs, npy_complex128 *ap, int *ipiv, npy_complex128 *b, int *ldb, int *info); +void BLAS_FUNC(zhsein)(char *side, char *eigsrc, char *initv, int *select, int *n, npy_complex128 *h, int *ldh, npy_complex128 *w, npy_complex128 *vl, int *ldvl, npy_complex128 *vr, int *ldvr, int *mm, int *m, npy_complex128 *work, double *rwork, int *ifaill, int *ifailr, int *info); +void BLAS_FUNC(zhseqr)(char *job, char *compz, int *n, int *ilo, int *ihi, npy_complex128 *h, int *ldh, npy_complex128 *w, npy_complex128 *z, int *ldz, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zlabrd)(int *m, int *n, int *nb, npy_complex128 *a, int *lda, double *d, double *e, npy_complex128 *tauq, npy_complex128 *taup, npy_complex128 *x, int *ldx, npy_complex128 *y, int *ldy); +void BLAS_FUNC(zlacgv)(int *n, npy_complex128 *x, int *incx); +void BLAS_FUNC(zlacn2)(int *n, npy_complex128 *v, npy_complex128 *x, double *est, int *kase, int *isave); +void BLAS_FUNC(zlacon)(int *n, npy_complex128 *v, npy_complex128 *x, double *est, int *kase); +void BLAS_FUNC(zlacp2)(char *uplo, int *m, int *n, double *a, int *lda, npy_complex128 *b, int *ldb); +void BLAS_FUNC(zlacpy)(char *uplo, int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb); +void BLAS_FUNC(zlacrm)(int *m, int *n, npy_complex128 *a, int *lda, double *b, int *ldb, npy_complex128 *c, int *ldc, double *rwork); +void BLAS_FUNC(zlacrt)(int *n, npy_complex128 *cx, int *incx, npy_complex128 *cy, int *incy, npy_complex128 *c, npy_complex128 *s); +void (zladivwrp_)(npy_complex128 *out, npy_complex128 *x, npy_complex128 *y); +void BLAS_FUNC(zlaed0)(int *qsiz, int *n, double *d, double *e, npy_complex128 *q, int *ldq, npy_complex128 *qstore, int *ldqs, double *rwork, int *iwork, int *info); +void BLAS_FUNC(zlaed7)(int *n, int *cutpnt, int *qsiz, int *tlvls, int *curlvl, int *curpbm, double *d, npy_complex128 *q, int *ldq, double *rho, int *indxq, double *qstore, int *qptr, int *prmptr, int *perm, int *givptr, int *givcol, double *givnum, npy_complex128 *work, double *rwork, int *iwork, int *info); +void BLAS_FUNC(zlaed8)(int *k, int *n, int *qsiz, npy_complex128 *q, int *ldq, double *d, double *rho, int *cutpnt, double *z, double *dlamda, npy_complex128 *q2, int *ldq2, double *w, int *indxp, int *indx, int *indxq, int *perm, int *givptr, int *givcol, double *givnum, int *info); +void BLAS_FUNC(zlaein)(int *rightv, int *noinit, int *n, npy_complex128 *h, int *ldh, npy_complex128 *w, npy_complex128 *v, npy_complex128 *b, int *ldb, double *rwork, double *eps3, double *smlnum, int *info); +void BLAS_FUNC(zlaesy)(npy_complex128 *a, npy_complex128 *b, npy_complex128 *c, npy_complex128 *rt1, npy_complex128 *rt2, npy_complex128 *evscal, npy_complex128 *cs1, npy_complex128 *sn1); +void BLAS_FUNC(zlaev2)(npy_complex128 *a, npy_complex128 *b, npy_complex128 *c, double *rt1, double *rt2, double *cs1, npy_complex128 *sn1); +void BLAS_FUNC(zlag2c)(int *m, int *n, npy_complex128 *a, int *lda, npy_complex64 *sa, int *ldsa, int *info); +void BLAS_FUNC(zlags2)(int *upper, double *a1, npy_complex128 *a2, double *a3, double *b1, npy_complex128 *b2, double *b3, double *csu, npy_complex128 *snu, double *csv, npy_complex128 *snv, double *csq, npy_complex128 *snq); +void BLAS_FUNC(zlagtm)(char *trans, int *n, int *nrhs, double *alpha, npy_complex128 *dl, npy_complex128 *d, npy_complex128 *du, npy_complex128 *x, int *ldx, double *beta, npy_complex128 *b, int *ldb); +void BLAS_FUNC(zlahef)(char *uplo, int *n, int *nb, int *kb, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *w, int *ldw, int *info); +void BLAS_FUNC(zlahqr)(int *wantt, int *wantz, int *n, int *ilo, int *ihi, npy_complex128 *h, int *ldh, npy_complex128 *w, int *iloz, int *ihiz, npy_complex128 *z, int *ldz, int *info); +void BLAS_FUNC(zlahr2)(int *n, int *k, int *nb, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *t, int *ldt, npy_complex128 *y, int *ldy); +void BLAS_FUNC(zlaic1)(int *job, int *j, npy_complex128 *x, double *sest, npy_complex128 *w, npy_complex128 *gamma, double *sestpr, npy_complex128 *s, npy_complex128 *c); +void BLAS_FUNC(zlals0)(int *icompq, int *nl, int *nr, int *sqre, int *nrhs, npy_complex128 *b, int *ldb, npy_complex128 *bx, int *ldbx, int *perm, int *givptr, int *givcol, int *ldgcol, double *givnum, int *ldgnum, double *poles, double *difl, double *difr, double *z, int *k, double *c, double *s, double *rwork, int *info); +void BLAS_FUNC(zlalsa)(int *icompq, int *smlsiz, int *n, int *nrhs, npy_complex128 *b, int *ldb, npy_complex128 *bx, int *ldbx, double *u, int *ldu, double *vt, int *k, double *difl, double *difr, double *z, double *poles, int *givptr, int *givcol, int *ldgcol, int *perm, double *givnum, double *c, double *s, double *rwork, int *iwork, int *info); +void BLAS_FUNC(zlalsd)(char *uplo, int *smlsiz, int *n, int *nrhs, double *d, double *e, npy_complex128 *b, int *ldb, double *rcond, int *rank, npy_complex128 *work, double *rwork, int *iwork, int *info); +double BLAS_FUNC(zlangb)(char *norm, int *n, int *kl, int *ku, npy_complex128 *ab, int *ldab, double *work); +double BLAS_FUNC(zlange)(char *norm, int *m, int *n, npy_complex128 *a, int *lda, double *work); +double BLAS_FUNC(zlangt)(char *norm, int *n, npy_complex128 *dl, npy_complex128 *d_, npy_complex128 *du); +double BLAS_FUNC(zlanhb)(char *norm, char *uplo, int *n, int *k, npy_complex128 *ab, int *ldab, double *work); +double BLAS_FUNC(zlanhe)(char *norm, char *uplo, int *n, npy_complex128 *a, int *lda, double *work); +double BLAS_FUNC(zlanhf)(char *norm, char *transr, char *uplo, int *n, npy_complex128 *a, double *work); +double BLAS_FUNC(zlanhp)(char *norm, char *uplo, int *n, npy_complex128 *ap, double *work); +double BLAS_FUNC(zlanhs)(char *norm, int *n, npy_complex128 *a, int *lda, double *work); +double BLAS_FUNC(zlanht)(char *norm, int *n, double *d_, npy_complex128 *e); +double BLAS_FUNC(zlansb)(char *norm, char *uplo, int *n, int *k, npy_complex128 *ab, int *ldab, double *work); +double BLAS_FUNC(zlansp)(char *norm, char *uplo, int *n, npy_complex128 *ap, double *work); +double BLAS_FUNC(zlansy)(char *norm, char *uplo, int *n, npy_complex128 *a, int *lda, double *work); +double BLAS_FUNC(zlantb)(char *norm, char *uplo, char *diag, int *n, int *k, npy_complex128 *ab, int *ldab, double *work); +double BLAS_FUNC(zlantp)(char *norm, char *uplo, char *diag, int *n, npy_complex128 *ap, double *work); +double BLAS_FUNC(zlantr)(char *norm, char *uplo, char *diag, int *m, int *n, npy_complex128 *a, int *lda, double *work); +void BLAS_FUNC(zlapll)(int *n, npy_complex128 *x, int *incx, npy_complex128 *y, int *incy, double *ssmin); +void BLAS_FUNC(zlapmr)(int *forwrd, int *m, int *n, npy_complex128 *x, int *ldx, int *k); +void BLAS_FUNC(zlapmt)(int *forwrd, int *m, int *n, npy_complex128 *x, int *ldx, int *k); +void BLAS_FUNC(zlaqgb)(int *m, int *n, int *kl, int *ku, npy_complex128 *ab, int *ldab, double *r, double *c, double *rowcnd, double *colcnd, double *amax, char *equed); +void BLAS_FUNC(zlaqge)(int *m, int *n, npy_complex128 *a, int *lda, double *r, double *c, double *rowcnd, double *colcnd, double *amax, char *equed); +void BLAS_FUNC(zlaqhb)(char *uplo, int *n, int *kd, npy_complex128 *ab, int *ldab, double *s, double *scond, double *amax, char *equed); +void BLAS_FUNC(zlaqhe)(char *uplo, int *n, npy_complex128 *a, int *lda, double *s, double *scond, double *amax, char *equed); +void BLAS_FUNC(zlaqhp)(char *uplo, int *n, npy_complex128 *ap, double *s, double *scond, double *amax, char *equed); +void BLAS_FUNC(zlaqp2)(int *m, int *n, int *offset, npy_complex128 *a, int *lda, int *jpvt, npy_complex128 *tau, double *vn1, double *vn2, npy_complex128 *work); +void BLAS_FUNC(zlaqps)(int *m, int *n, int *offset, int *nb, int *kb, npy_complex128 *a, int *lda, int *jpvt, npy_complex128 *tau, double *vn1, double *vn2, npy_complex128 *auxv, npy_complex128 *f, int *ldf); +void BLAS_FUNC(zlaqr0)(int *wantt, int *wantz, int *n, int *ilo, int *ihi, npy_complex128 *h, int *ldh, npy_complex128 *w, int *iloz, int *ihiz, npy_complex128 *z, int *ldz, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zlaqr1)(int *n, npy_complex128 *h, int *ldh, npy_complex128 *s1, npy_complex128 *s2, npy_complex128 *v); +void BLAS_FUNC(zlaqr2)(int *wantt, int *wantz, int *n, int *ktop, int *kbot, int *nw, npy_complex128 *h, int *ldh, int *iloz, int *ihiz, npy_complex128 *z, int *ldz, int *ns, int *nd, npy_complex128 *sh, npy_complex128 *v, int *ldv, int *nh, npy_complex128 *t, int *ldt, int *nv, npy_complex128 *wv, int *ldwv, npy_complex128 *work, int *lwork); +void BLAS_FUNC(zlaqr3)(int *wantt, int *wantz, int *n, int *ktop, int *kbot, int *nw, npy_complex128 *h, int *ldh, int *iloz, int *ihiz, npy_complex128 *z, int *ldz, int *ns, int *nd, npy_complex128 *sh, npy_complex128 *v, int *ldv, int *nh, npy_complex128 *t, int *ldt, int *nv, npy_complex128 *wv, int *ldwv, npy_complex128 *work, int *lwork); +void BLAS_FUNC(zlaqr4)(int *wantt, int *wantz, int *n, int *ilo, int *ihi, npy_complex128 *h, int *ldh, npy_complex128 *w, int *iloz, int *ihiz, npy_complex128 *z, int *ldz, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zlaqr5)(int *wantt, int *wantz, int *kacc22, int *n, int *ktop, int *kbot, int *nshfts, npy_complex128 *s, npy_complex128 *h, int *ldh, int *iloz, int *ihiz, npy_complex128 *z, int *ldz, npy_complex128 *v, int *ldv, npy_complex128 *u, int *ldu, int *nv, npy_complex128 *wv, int *ldwv, int *nh, npy_complex128 *wh, int *ldwh); +void BLAS_FUNC(zlaqsb)(char *uplo, int *n, int *kd, npy_complex128 *ab, int *ldab, double *s, double *scond, double *amax, char *equed); +void BLAS_FUNC(zlaqsp)(char *uplo, int *n, npy_complex128 *ap, double *s, double *scond, double *amax, char *equed); +void BLAS_FUNC(zlaqsy)(char *uplo, int *n, npy_complex128 *a, int *lda, double *s, double *scond, double *amax, char *equed); +void BLAS_FUNC(zlar1v)(int *n, int *b1, int *bn, double *lambda_, double *d, double *l, double *ld, double *lld, double *pivmin, double *gaptol, npy_complex128 *z, int *wantnc, int *negcnt, double *ztz, double *mingma, int *r, int *isuppz, double *nrminv, double *resid, double *rqcorr, double *work); +void BLAS_FUNC(zlar2v)(int *n, npy_complex128 *x, npy_complex128 *y, npy_complex128 *z, int *incx, double *c, npy_complex128 *s, int *incc); +void BLAS_FUNC(zlarcm)(int *m, int *n, double *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *c, int *ldc, double *rwork); +void BLAS_FUNC(zlarf)(char *side, int *m, int *n, npy_complex128 *v, int *incv, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work); +void BLAS_FUNC(zlarfb)(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, npy_complex128 *v, int *ldv, npy_complex128 *t, int *ldt, npy_complex128 *c, int *ldc, npy_complex128 *work, int *ldwork); +void BLAS_FUNC(zlarfg)(int *n, npy_complex128 *alpha, npy_complex128 *x, int *incx, npy_complex128 *tau); +void BLAS_FUNC(zlarfgp)(int *n, npy_complex128 *alpha, npy_complex128 *x, int *incx, npy_complex128 *tau); +void BLAS_FUNC(zlarft)(char *direct, char *storev, int *n, int *k, npy_complex128 *v, int *ldv, npy_complex128 *tau, npy_complex128 *t, int *ldt); +void BLAS_FUNC(zlarfx)(char *side, int *m, int *n, npy_complex128 *v, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work); +void BLAS_FUNC(zlargv)(int *n, npy_complex128 *x, int *incx, npy_complex128 *y, int *incy, double *c, int *incc); +void BLAS_FUNC(zlarnv)(int *idist, int *iseed, int *n, npy_complex128 *x); +void BLAS_FUNC(zlarrv)(int *n, double *vl, double *vu, double *d, double *l, double *pivmin, int *isplit, int *m, int *dol, int *dou, double *minrgp, double *rtol1, double *rtol2, double *w, double *werr, double *wgap, int *iblock, int *indexw, double *gers, npy_complex128 *z, int *ldz, int *isuppz, double *work, int *iwork, int *info); +void BLAS_FUNC(zlartg)(npy_complex128 *f, npy_complex128 *g, double *cs, npy_complex128 *sn, npy_complex128 *r); +void BLAS_FUNC(zlartv)(int *n, npy_complex128 *x, int *incx, npy_complex128 *y, int *incy, double *c, npy_complex128 *s, int *incc); +void BLAS_FUNC(zlarz)(char *side, int *m, int *n, int *l, npy_complex128 *v, int *incv, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work); +void BLAS_FUNC(zlarzb)(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, npy_complex128 *v, int *ldv, npy_complex128 *t, int *ldt, npy_complex128 *c, int *ldc, npy_complex128 *work, int *ldwork); +void BLAS_FUNC(zlarzt)(char *direct, char *storev, int *n, int *k, npy_complex128 *v, int *ldv, npy_complex128 *tau, npy_complex128 *t, int *ldt); +void BLAS_FUNC(zlascl)(char *type_bn, int *kl, int *ku, double *cfrom, double *cto, int *m, int *n, npy_complex128 *a, int *lda, int *info); +void BLAS_FUNC(zlaset)(char *uplo, int *m, int *n, npy_complex128 *alpha, npy_complex128 *beta, npy_complex128 *a, int *lda); +void BLAS_FUNC(zlasr)(char *side, char *pivot, char *direct, int *m, int *n, double *c, double *s, npy_complex128 *a, int *lda); +void BLAS_FUNC(zlassq)(int *n, npy_complex128 *x, int *incx, double *scale, double *sumsq); +void BLAS_FUNC(zlaswp)(int *n, npy_complex128 *a, int *lda, int *k1, int *k2, int *ipiv, int *incx); +void BLAS_FUNC(zlasyf)(char *uplo, int *n, int *nb, int *kb, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *w, int *ldw, int *info); +void BLAS_FUNC(zlat2c)(char *uplo, int *n, npy_complex128 *a, int *lda, npy_complex64 *sa, int *ldsa, int *info); +void BLAS_FUNC(zlatbs)(char *uplo, char *trans, char *diag, char *normin, int *n, int *kd, npy_complex128 *ab, int *ldab, npy_complex128 *x, double *scale, double *cnorm, int *info); +void BLAS_FUNC(zlatdf)(int *ijob, int *n, npy_complex128 *z, int *ldz, npy_complex128 *rhs, double *rdsum, double *rdscal, int *ipiv, int *jpiv); +void BLAS_FUNC(zlatps)(char *uplo, char *trans, char *diag, char *normin, int *n, npy_complex128 *ap, npy_complex128 *x, double *scale, double *cnorm, int *info); +void BLAS_FUNC(zlatrd)(char *uplo, int *n, int *nb, npy_complex128 *a, int *lda, double *e, npy_complex128 *tau, npy_complex128 *w, int *ldw); +void BLAS_FUNC(zlatrs)(char *uplo, char *trans, char *diag, char *normin, int *n, npy_complex128 *a, int *lda, npy_complex128 *x, double *scale, double *cnorm, int *info); +void BLAS_FUNC(zlatrz)(int *m, int *n, int *l, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work); +void BLAS_FUNC(zlauu2)(char *uplo, int *n, npy_complex128 *a, int *lda, int *info); +void BLAS_FUNC(zlauum)(char *uplo, int *n, npy_complex128 *a, int *lda, int *info); +void BLAS_FUNC(zpbcon)(char *uplo, int *n, int *kd, npy_complex128 *ab, int *ldab, double *anorm, double *rcond, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(zpbequ)(char *uplo, int *n, int *kd, npy_complex128 *ab, int *ldab, double *s, double *scond, double *amax, int *info); +void BLAS_FUNC(zpbrfs)(char *uplo, int *n, int *kd, int *nrhs, npy_complex128 *ab, int *ldab, npy_complex128 *afb, int *ldafb, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(zpbstf)(char *uplo, int *n, int *kd, npy_complex128 *ab, int *ldab, int *info); +void BLAS_FUNC(zpbsv)(char *uplo, int *n, int *kd, int *nrhs, npy_complex128 *ab, int *ldab, npy_complex128 *b, int *ldb, int *info); +void BLAS_FUNC(zpbsvx)(char *fact, char *uplo, int *n, int *kd, int *nrhs, npy_complex128 *ab, int *ldab, npy_complex128 *afb, int *ldafb, char *equed, double *s, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *rcond, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(zpbtf2)(char *uplo, int *n, int *kd, npy_complex128 *ab, int *ldab, int *info); +void BLAS_FUNC(zpbtrf)(char *uplo, int *n, int *kd, npy_complex128 *ab, int *ldab, int *info); +void BLAS_FUNC(zpbtrs)(char *uplo, int *n, int *kd, int *nrhs, npy_complex128 *ab, int *ldab, npy_complex128 *b, int *ldb, int *info); +void BLAS_FUNC(zpftrf)(char *transr, char *uplo, int *n, npy_complex128 *a, int *info); +void BLAS_FUNC(zpftri)(char *transr, char *uplo, int *n, npy_complex128 *a, int *info); +void BLAS_FUNC(zpftrs)(char *transr, char *uplo, int *n, int *nrhs, npy_complex128 *a, npy_complex128 *b, int *ldb, int *info); +void BLAS_FUNC(zpocon)(char *uplo, int *n, npy_complex128 *a, int *lda, double *anorm, double *rcond, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(zpoequ)(int *n, npy_complex128 *a, int *lda, double *s, double *scond, double *amax, int *info); +void BLAS_FUNC(zpoequb)(int *n, npy_complex128 *a, int *lda, double *s, double *scond, double *amax, int *info); +void BLAS_FUNC(zporfs)(char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *af, int *ldaf, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(zposv)(char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, int *info); +void BLAS_FUNC(zposvx)(char *fact, char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *af, int *ldaf, char *equed, double *s, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *rcond, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(zpotf2)(char *uplo, int *n, npy_complex128 *a, int *lda, int *info); +void BLAS_FUNC(zpotrf)(char *uplo, int *n, npy_complex128 *a, int *lda, int *info); +void BLAS_FUNC(zpotri)(char *uplo, int *n, npy_complex128 *a, int *lda, int *info); +void BLAS_FUNC(zpotrs)(char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, int *info); +void BLAS_FUNC(zppcon)(char *uplo, int *n, npy_complex128 *ap, double *anorm, double *rcond, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(zppequ)(char *uplo, int *n, npy_complex128 *ap, double *s, double *scond, double *amax, int *info); +void BLAS_FUNC(zpprfs)(char *uplo, int *n, int *nrhs, npy_complex128 *ap, npy_complex128 *afp, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(zppsv)(char *uplo, int *n, int *nrhs, npy_complex128 *ap, npy_complex128 *b, int *ldb, int *info); +void BLAS_FUNC(zppsvx)(char *fact, char *uplo, int *n, int *nrhs, npy_complex128 *ap, npy_complex128 *afp, char *equed, double *s, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *rcond, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(zpptrf)(char *uplo, int *n, npy_complex128 *ap, int *info); +void BLAS_FUNC(zpptri)(char *uplo, int *n, npy_complex128 *ap, int *info); +void BLAS_FUNC(zpptrs)(char *uplo, int *n, int *nrhs, npy_complex128 *ap, npy_complex128 *b, int *ldb, int *info); +void BLAS_FUNC(zpstf2)(char *uplo, int *n, npy_complex128 *a, int *lda, int *piv, int *rank, double *tol, double *work, int *info); +void BLAS_FUNC(zpstrf)(char *uplo, int *n, npy_complex128 *a, int *lda, int *piv, int *rank, double *tol, double *work, int *info); +void BLAS_FUNC(zptcon)(int *n, double *d, npy_complex128 *e, double *anorm, double *rcond, double *rwork, int *info); +void BLAS_FUNC(zpteqr)(char *compz, int *n, double *d, double *e, npy_complex128 *z, int *ldz, double *work, int *info); +void BLAS_FUNC(zptrfs)(char *uplo, int *n, int *nrhs, double *d, npy_complex128 *e, double *df, npy_complex128 *ef, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(zptsv)(int *n, int *nrhs, double *d, npy_complex128 *e, npy_complex128 *b, int *ldb, int *info); +void BLAS_FUNC(zptsvx)(char *fact, int *n, int *nrhs, double *d, npy_complex128 *e, double *df, npy_complex128 *ef, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *rcond, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(zpttrf)(int *n, double *d, npy_complex128 *e, int *info); +void BLAS_FUNC(zpttrs)(char *uplo, int *n, int *nrhs, double *d, npy_complex128 *e, npy_complex128 *b, int *ldb, int *info); +void BLAS_FUNC(zptts2)(int *iuplo, int *n, int *nrhs, double *d, npy_complex128 *e, npy_complex128 *b, int *ldb); +void BLAS_FUNC(zrot)(int *n, npy_complex128 *cx, int *incx, npy_complex128 *cy, int *incy, double *c, npy_complex128 *s); +void BLAS_FUNC(zspcon)(char *uplo, int *n, npy_complex128 *ap, int *ipiv, double *anorm, double *rcond, npy_complex128 *work, int *info); +void BLAS_FUNC(zspmv)(char *uplo, int *n, npy_complex128 *alpha, npy_complex128 *ap, npy_complex128 *x, int *incx, npy_complex128 *beta, npy_complex128 *y, int *incy); +void BLAS_FUNC(zspr)(char *uplo, int *n, npy_complex128 *alpha, npy_complex128 *x, int *incx, npy_complex128 *ap); +void BLAS_FUNC(zsprfs)(char *uplo, int *n, int *nrhs, npy_complex128 *ap, npy_complex128 *afp, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(zspsv)(char *uplo, int *n, int *nrhs, npy_complex128 *ap, int *ipiv, npy_complex128 *b, int *ldb, int *info); +void BLAS_FUNC(zspsvx)(char *fact, char *uplo, int *n, int *nrhs, npy_complex128 *ap, npy_complex128 *afp, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *rcond, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(zsptrf)(char *uplo, int *n, npy_complex128 *ap, int *ipiv, int *info); +void BLAS_FUNC(zsptri)(char *uplo, int *n, npy_complex128 *ap, int *ipiv, npy_complex128 *work, int *info); +void BLAS_FUNC(zsptrs)(char *uplo, int *n, int *nrhs, npy_complex128 *ap, int *ipiv, npy_complex128 *b, int *ldb, int *info); +void BLAS_FUNC(zstedc)(char *compz, int *n, double *d, double *e, npy_complex128 *z, int *ldz, npy_complex128 *work, int *lwork, double *rwork, int *lrwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(zstegr)(char *jobz, char *range, int *n, double *d, double *e, double *vl, double *vu, int *il, int *iu, double *abstol, int *m, double *w, npy_complex128 *z, int *ldz, int *isuppz, double *work, int *lwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(zstein)(int *n, double *d, double *e, int *m, double *w, int *iblock, int *isplit, npy_complex128 *z, int *ldz, double *work, int *iwork, int *ifail, int *info); +void BLAS_FUNC(zstemr)(char *jobz, char *range, int *n, double *d, double *e, double *vl, double *vu, int *il, int *iu, int *m, double *w, npy_complex128 *z, int *ldz, int *nzc, int *isuppz, int *tryrac, double *work, int *lwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(zsteqr)(char *compz, int *n, double *d, double *e, npy_complex128 *z, int *ldz, double *work, int *info); +void BLAS_FUNC(zsycon)(char *uplo, int *n, npy_complex128 *a, int *lda, int *ipiv, double *anorm, double *rcond, npy_complex128 *work, int *info); +void BLAS_FUNC(zsyconv)(char *uplo, char *way, int *n, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *work, int *info); +void BLAS_FUNC(zsyequb)(char *uplo, int *n, npy_complex128 *a, int *lda, double *s, double *scond, double *amax, npy_complex128 *work, int *info); +void BLAS_FUNC(zsymv)(char *uplo, int *n, npy_complex128 *alpha, npy_complex128 *a, int *lda, npy_complex128 *x, int *incx, npy_complex128 *beta, npy_complex128 *y, int *incy); +void BLAS_FUNC(zsyr)(char *uplo, int *n, npy_complex128 *alpha, npy_complex128 *x, int *incx, npy_complex128 *a, int *lda); +void BLAS_FUNC(zsyrfs)(char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *af, int *ldaf, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(zsysv)(char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zsysvx)(char *fact, char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *af, int *ldaf, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *rcond, double *ferr, double *berr, npy_complex128 *work, int *lwork, double *rwork, int *info); +void BLAS_FUNC(zsyswapr)(char *uplo, int *n, npy_complex128 *a, int *lda, int *i1, int *i2); +void BLAS_FUNC(zsytf2)(char *uplo, int *n, npy_complex128 *a, int *lda, int *ipiv, int *info); +void BLAS_FUNC(zsytrf)(char *uplo, int *n, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zsytri)(char *uplo, int *n, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *work, int *info); +void BLAS_FUNC(zsytri2)(char *uplo, int *n, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zsytri2x)(char *uplo, int *n, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *work, int *nb, int *info); +void BLAS_FUNC(zsytrs)(char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *b, int *ldb, int *info); +void BLAS_FUNC(zsytrs2)(char *uplo, int *n, int *nrhs, npy_complex128 *a, int *lda, int *ipiv, npy_complex128 *b, int *ldb, npy_complex128 *work, int *info); +void BLAS_FUNC(ztbcon)(char *norm, char *uplo, char *diag, int *n, int *kd, npy_complex128 *ab, int *ldab, double *rcond, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(ztbrfs)(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, npy_complex128 *ab, int *ldab, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(ztbtrs)(char *uplo, char *trans, char *diag, int *n, int *kd, int *nrhs, npy_complex128 *ab, int *ldab, npy_complex128 *b, int *ldb, int *info); +void BLAS_FUNC(ztfsm)(char *transr, char *side, char *uplo, char *trans, char *diag, int *m, int *n, npy_complex128 *alpha, npy_complex128 *a, npy_complex128 *b, int *ldb); +void BLAS_FUNC(ztftri)(char *transr, char *uplo, char *diag, int *n, npy_complex128 *a, int *info); +void BLAS_FUNC(ztfttp)(char *transr, char *uplo, int *n, npy_complex128 *arf, npy_complex128 *ap, int *info); +void BLAS_FUNC(ztfttr)(char *transr, char *uplo, int *n, npy_complex128 *arf, npy_complex128 *a, int *lda, int *info); +void BLAS_FUNC(ztgevc)(char *side, char *howmny, int *select, int *n, npy_complex128 *s, int *lds, npy_complex128 *p, int *ldp, npy_complex128 *vl, int *ldvl, npy_complex128 *vr, int *ldvr, int *mm, int *m, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(ztgex2)(int *wantq, int *wantz, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *q, int *ldq, npy_complex128 *z, int *ldz, int *j1, int *info); +void BLAS_FUNC(ztgexc)(int *wantq, int *wantz, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *q, int *ldq, npy_complex128 *z, int *ldz, int *ifst, int *ilst, int *info); +void BLAS_FUNC(ztgsen)(int *ijob, int *wantq, int *wantz, int *select, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *alpha, npy_complex128 *beta, npy_complex128 *q, int *ldq, npy_complex128 *z, int *ldz, int *m, double *pl, double *pr, double *dif, npy_complex128 *work, int *lwork, int *iwork, int *liwork, int *info); +void BLAS_FUNC(ztgsja)(char *jobu, char *jobv, char *jobq, int *m, int *p, int *n, int *k, int *l, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, double *tola, double *tolb, double *alpha, double *beta, npy_complex128 *u, int *ldu, npy_complex128 *v, int *ldv, npy_complex128 *q, int *ldq, npy_complex128 *work, int *ncycle, int *info); +void BLAS_FUNC(ztgsna)(char *job, char *howmny, int *select, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *vl, int *ldvl, npy_complex128 *vr, int *ldvr, double *s, double *dif, int *mm, int *m, npy_complex128 *work, int *lwork, int *iwork, int *info); +void BLAS_FUNC(ztgsy2)(char *trans, int *ijob, int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *c, int *ldc, npy_complex128 *d, int *ldd, npy_complex128 *e, int *lde, npy_complex128 *f, int *ldf, double *scale, double *rdsum, double *rdscal, int *info); +void BLAS_FUNC(ztgsyl)(char *trans, int *ijob, int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *c, int *ldc, npy_complex128 *d, int *ldd, npy_complex128 *e, int *lde, npy_complex128 *f, int *ldf, double *scale, double *dif, npy_complex128 *work, int *lwork, int *iwork, int *info); +void BLAS_FUNC(ztpcon)(char *norm, char *uplo, char *diag, int *n, npy_complex128 *ap, double *rcond, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(ztpmqrt)(char *side, char *trans, int *m, int *n, int *k, int *l, int *nb, npy_complex128 *v, int *ldv, npy_complex128 *t, int *ldt, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *work, int *info); +void BLAS_FUNC(ztpqrt)(int *m, int *n, int *l, int *nb, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *t, int *ldt, npy_complex128 *work, int *info); +void BLAS_FUNC(ztpqrt2)(int *m, int *n, int *l, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *t, int *ldt, int *info); +void BLAS_FUNC(ztprfb)(char *side, char *trans, char *direct, char *storev, int *m, int *n, int *k, int *l, npy_complex128 *v, int *ldv, npy_complex128 *t, int *ldt, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *work, int *ldwork); +void BLAS_FUNC(ztprfs)(char *uplo, char *trans, char *diag, int *n, int *nrhs, npy_complex128 *ap, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(ztptri)(char *uplo, char *diag, int *n, npy_complex128 *ap, int *info); +void BLAS_FUNC(ztptrs)(char *uplo, char *trans, char *diag, int *n, int *nrhs, npy_complex128 *ap, npy_complex128 *b, int *ldb, int *info); +void BLAS_FUNC(ztpttf)(char *transr, char *uplo, int *n, npy_complex128 *ap, npy_complex128 *arf, int *info); +void BLAS_FUNC(ztpttr)(char *uplo, int *n, npy_complex128 *ap, npy_complex128 *a, int *lda, int *info); +void BLAS_FUNC(ztrcon)(char *norm, char *uplo, char *diag, int *n, npy_complex128 *a, int *lda, double *rcond, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(ztrevc)(char *side, char *howmny, int *select, int *n, npy_complex128 *t, int *ldt, npy_complex128 *vl, int *ldvl, npy_complex128 *vr, int *ldvr, int *mm, int *m, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(ztrexc)(char *compq, int *n, npy_complex128 *t, int *ldt, npy_complex128 *q, int *ldq, int *ifst, int *ilst, int *info); +void BLAS_FUNC(ztrrfs)(char *uplo, char *trans, char *diag, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *x, int *ldx, double *ferr, double *berr, npy_complex128 *work, double *rwork, int *info); +void BLAS_FUNC(ztrsen)(char *job, char *compq, int *select, int *n, npy_complex128 *t, int *ldt, npy_complex128 *q, int *ldq, npy_complex128 *w, int *m, double *s, double *sep, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(ztrsna)(char *job, char *howmny, int *select, int *n, npy_complex128 *t, int *ldt, npy_complex128 *vl, int *ldvl, npy_complex128 *vr, int *ldvr, double *s, double *sep, int *mm, int *m, npy_complex128 *work, int *ldwork, double *rwork, int *info); +void BLAS_FUNC(ztrsyl)(char *trana, char *tranb, int *isgn, int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, npy_complex128 *c, int *ldc, double *scale, int *info); +void BLAS_FUNC(ztrti2)(char *uplo, char *diag, int *n, npy_complex128 *a, int *lda, int *info); +void BLAS_FUNC(ztrtri)(char *uplo, char *diag, int *n, npy_complex128 *a, int *lda, int *info); +void BLAS_FUNC(ztrtrs)(char *uplo, char *trans, char *diag, int *n, int *nrhs, npy_complex128 *a, int *lda, npy_complex128 *b, int *ldb, int *info); +void BLAS_FUNC(ztrttf)(char *transr, char *uplo, int *n, npy_complex128 *a, int *lda, npy_complex128 *arf, int *info); +void BLAS_FUNC(ztrttp)(char *uplo, int *n, npy_complex128 *a, int *lda, npy_complex128 *ap, int *info); +void BLAS_FUNC(ztzrzf)(int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zunbdb)(char *trans, char *signs, int *m, int *p, int *q, npy_complex128 *x11, int *ldx11, npy_complex128 *x12, int *ldx12, npy_complex128 *x21, int *ldx21, npy_complex128 *x22, int *ldx22, double *theta, double *phi, npy_complex128 *taup1, npy_complex128 *taup2, npy_complex128 *tauq1, npy_complex128 *tauq2, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zuncsd)(char *jobu1, char *jobu2, char *jobv1t, char *jobv2t, char *trans, char *signs, int *m, int *p, int *q, npy_complex128 *x11, int *ldx11, npy_complex128 *x12, int *ldx12, npy_complex128 *x21, int *ldx21, npy_complex128 *x22, int *ldx22, double *theta, npy_complex128 *u1, int *ldu1, npy_complex128 *u2, int *ldu2, npy_complex128 *v1t, int *ldv1t, npy_complex128 *v2t, int *ldv2t, npy_complex128 *work, int *lwork, double *rwork, int *lrwork, int *iwork, int *info); +void BLAS_FUNC(zung2l)(int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *info); +void BLAS_FUNC(zung2r)(int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *info); +void BLAS_FUNC(zungbr)(char *vect, int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zunghr)(int *n, int *ilo, int *ihi, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zungl2)(int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *info); +void BLAS_FUNC(zunglq)(int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zungql)(int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zungqr)(int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zungr2)(int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *info); +void BLAS_FUNC(zungrq)(int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zungtr)(char *uplo, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zunm2l)(char *side, char *trans, int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *info); +void BLAS_FUNC(zunm2r)(char *side, char *trans, int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *info); +void BLAS_FUNC(zunmbr)(char *vect, char *side, char *trans, int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zunmhr)(char *side, char *trans, int *m, int *n, int *ilo, int *ihi, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zunml2)(char *side, char *trans, int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *info); +void BLAS_FUNC(zunmlq)(char *side, char *trans, int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zunmql)(char *side, char *trans, int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zunmqr)(char *side, char *trans, int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zunmr2)(char *side, char *trans, int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *info); +void BLAS_FUNC(zunmr3)(char *side, char *trans, int *m, int *n, int *k, int *l, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *info); +void BLAS_FUNC(zunmrq)(char *side, char *trans, int *m, int *n, int *k, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zunmrz)(char *side, char *trans, int *m, int *n, int *k, int *l, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zunmtr)(char *side, char *uplo, char *trans, int *m, int *n, npy_complex128 *a, int *lda, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *lwork, int *info); +void BLAS_FUNC(zupgtr)(char *uplo, int *n, npy_complex128 *ap, npy_complex128 *tau, npy_complex128 *q, int *ldq, npy_complex128 *work, int *info); +void BLAS_FUNC(zupmtr)(char *side, char *uplo, char *trans, int *m, int *n, npy_complex128 *ap, npy_complex128 *tau, npy_complex128 *c, int *ldc, npy_complex128 *work, int *info); + +#ifdef __cplusplus +} +#endif diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_matfuncs.py b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_matfuncs.py new file mode 100644 index 0000000000000000000000000000000000000000..64fc65df291dcb1547d3c6c0f419eee3ff8e083d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_matfuncs.py @@ -0,0 +1,861 @@ +# +# Author: Travis Oliphant, March 2002 +# +from itertools import product + +import numpy as np +from numpy import (dot, diag, prod, logical_not, ravel, transpose, + conjugate, absolute, amax, sign, isfinite, triu) + +# Local imports +from scipy.linalg import LinAlgError, bandwidth +from ._misc import norm +from ._basic import solve, inv +from ._decomp_svd import svd +from ._decomp_schur import schur, rsf2csf +from ._expm_frechet import expm_frechet, expm_cond +from ._matfuncs_sqrtm import sqrtm +from ._matfuncs_expm import pick_pade_structure, pade_UV_calc + +# deprecated imports to be removed in SciPy 1.13.0 +from numpy import single # noqa: F401 + +__all__ = ['expm', 'cosm', 'sinm', 'tanm', 'coshm', 'sinhm', 'tanhm', 'logm', + 'funm', 'signm', 'sqrtm', 'fractional_matrix_power', 'expm_frechet', + 'expm_cond', 'khatri_rao'] + +eps = np.finfo('d').eps +feps = np.finfo('f').eps + +_array_precision = {'i': 1, 'l': 1, 'f': 0, 'd': 1, 'F': 0, 'D': 1} + + +############################################################################### +# Utility functions. + + +def _asarray_square(A): + """ + Wraps asarray with the extra requirement that the input be a square matrix. + + The motivation is that the matfuncs module has real functions that have + been lifted to square matrix functions. + + Parameters + ---------- + A : array_like + A square matrix. + + Returns + ------- + out : ndarray + An ndarray copy or view or other representation of A. + + """ + A = np.asarray(A) + if len(A.shape) != 2 or A.shape[0] != A.shape[1]: + raise ValueError('expected square array_like input') + return A + + +def _maybe_real(A, B, tol=None): + """ + Return either B or the real part of B, depending on properties of A and B. + + The motivation is that B has been computed as a complicated function of A, + and B may be perturbed by negligible imaginary components. + If A is real and B is complex with small imaginary components, + then return a real copy of B. The assumption in that case would be that + the imaginary components of B are numerical artifacts. + + Parameters + ---------- + A : ndarray + Input array whose type is to be checked as real vs. complex. + B : ndarray + Array to be returned, possibly without its imaginary part. + tol : float + Absolute tolerance. + + Returns + ------- + out : real or complex array + Either the input array B or only the real part of the input array B. + + """ + # Note that booleans and integers compare as real. + if np.isrealobj(A) and np.iscomplexobj(B): + if tol is None: + tol = {0: feps*1e3, 1: eps*1e6}[_array_precision[B.dtype.char]] + if np.allclose(B.imag, 0.0, atol=tol): + B = B.real + return B + + +############################################################################### +# Matrix functions. + + +def fractional_matrix_power(A, t): + """ + Compute the fractional power of a matrix. + + Proceeds according to the discussion in section (6) of [1]_. + + Parameters + ---------- + A : (N, N) array_like + Matrix whose fractional power to evaluate. + t : float + Fractional power. + + Returns + ------- + X : (N, N) array_like + The fractional power of the matrix. + + References + ---------- + .. [1] Nicholas J. Higham and Lijing lin (2011) + "A Schur-Pade Algorithm for Fractional Powers of a Matrix." + SIAM Journal on Matrix Analysis and Applications, + 32 (3). pp. 1056-1078. ISSN 0895-4798 + + Examples + -------- + >>> import numpy as np + >>> from scipy.linalg import fractional_matrix_power + >>> a = np.array([[1.0, 3.0], [1.0, 4.0]]) + >>> b = fractional_matrix_power(a, 0.5) + >>> b + array([[ 0.75592895, 1.13389342], + [ 0.37796447, 1.88982237]]) + >>> np.dot(b, b) # Verify square root + array([[ 1., 3.], + [ 1., 4.]]) + + """ + # This fixes some issue with imports; + # this function calls onenormest which is in scipy.sparse. + A = _asarray_square(A) + import scipy.linalg._matfuncs_inv_ssq + return scipy.linalg._matfuncs_inv_ssq._fractional_matrix_power(A, t) + + +def logm(A, disp=True): + """ + Compute matrix logarithm. + + The matrix logarithm is the inverse of + expm: expm(logm(`A`)) == `A` + + Parameters + ---------- + A : (N, N) array_like + Matrix whose logarithm to evaluate + disp : bool, optional + Print warning if error in the result is estimated large + instead of returning estimated error. (Default: True) + + Returns + ------- + logm : (N, N) ndarray + Matrix logarithm of `A` + errest : float + (if disp == False) + + 1-norm of the estimated error, ||err||_1 / ||A||_1 + + References + ---------- + .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2012) + "Improved Inverse Scaling and Squaring Algorithms + for the Matrix Logarithm." + SIAM Journal on Scientific Computing, 34 (4). C152-C169. + ISSN 1095-7197 + + .. [2] Nicholas J. Higham (2008) + "Functions of Matrices: Theory and Computation" + ISBN 978-0-898716-46-7 + + .. [3] Nicholas J. Higham and Lijing lin (2011) + "A Schur-Pade Algorithm for Fractional Powers of a Matrix." + SIAM Journal on Matrix Analysis and Applications, + 32 (3). pp. 1056-1078. ISSN 0895-4798 + + Examples + -------- + >>> import numpy as np + >>> from scipy.linalg import logm, expm + >>> a = np.array([[1.0, 3.0], [1.0, 4.0]]) + >>> b = logm(a) + >>> b + array([[-1.02571087, 2.05142174], + [ 0.68380725, 1.02571087]]) + >>> expm(b) # Verify expm(logm(a)) returns a + array([[ 1., 3.], + [ 1., 4.]]) + + """ + A = _asarray_square(A) + # Avoid circular import ... this is OK, right? + import scipy.linalg._matfuncs_inv_ssq + F = scipy.linalg._matfuncs_inv_ssq._logm(A) + F = _maybe_real(A, F) + errtol = 1000*eps + # TODO use a better error approximation + errest = norm(expm(F)-A, 1) / norm(A, 1) + if disp: + if not isfinite(errest) or errest >= errtol: + print("logm result may be inaccurate, approximate err =", errest) + return F + else: + return F, errest + + +def expm(A): + """Compute the matrix exponential of an array. + + Parameters + ---------- + A : ndarray + Input with last two dimensions are square ``(..., n, n)``. + + Returns + ------- + eA : ndarray + The resulting matrix exponential with the same shape of ``A`` + + Notes + ----- + Implements the algorithm given in [1], which is essentially a Pade + approximation with a variable order that is decided based on the array + data. + + For input with size ``n``, the memory usage is in the worst case in the + order of ``8*(n**2)``. If the input data is not of single and double + precision of real and complex dtypes, it is copied to a new array. + + For cases ``n >= 400``, the exact 1-norm computation cost, breaks even with + 1-norm estimation and from that point on the estimation scheme given in + [2] is used to decide on the approximation order. + + References + ---------- + .. [1] Awad H. Al-Mohy and Nicholas J. Higham, (2009), "A New Scaling + and Squaring Algorithm for the Matrix Exponential", SIAM J. Matrix + Anal. Appl. 31(3):970-989, :doi:`10.1137/09074721X` + + .. [2] Nicholas J. Higham and Francoise Tisseur (2000), "A Block Algorithm + for Matrix 1-Norm Estimation, with an Application to 1-Norm + Pseudospectra." SIAM J. Matrix Anal. Appl. 21(4):1185-1201, + :doi:`10.1137/S0895479899356080` + + Examples + -------- + >>> import numpy as np + >>> from scipy.linalg import expm, sinm, cosm + + Matrix version of the formula exp(0) = 1: + + >>> expm(np.zeros((3, 2, 2))) + array([[[1., 0.], + [0., 1.]], + + [[1., 0.], + [0., 1.]], + + [[1., 0.], + [0., 1.]]]) + + Euler's identity (exp(i*theta) = cos(theta) + i*sin(theta)) + applied to a matrix: + + >>> a = np.array([[1.0, 2.0], [-1.0, 3.0]]) + >>> expm(1j*a) + array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j], + [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]]) + >>> cosm(a) + 1j*sinm(a) + array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j], + [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]]) + + """ + a = np.asarray(A) + if a.size == 1 and a.ndim < 2: + return np.array([[np.exp(a.item())]]) + + if a.ndim < 2: + raise LinAlgError('The input array must be at least two-dimensional') + if a.shape[-1] != a.shape[-2]: + raise LinAlgError('Last 2 dimensions of the array must be square') + n = a.shape[-1] + # Empty array + if min(*a.shape) == 0: + return np.empty_like(a) + + # Scalar case + if a.shape[-2:] == (1, 1): + return np.exp(a) + + if not np.issubdtype(a.dtype, np.inexact): + a = a.astype(np.float64) + elif a.dtype == np.float16: + a = a.astype(np.float32) + + # An explicit formula for 2x2 case exists (formula (2.2) in [1]). However, without + # Kahan's method, numerical instabilities can occur (See gh-19584). Hence removed + # here until we have a more stable implementation. + + n = a.shape[-1] + eA = np.empty(a.shape, dtype=a.dtype) + # working memory to hold intermediate arrays + Am = np.empty((5, n, n), dtype=a.dtype) + + # Main loop to go through the slices of an ndarray and passing to expm + for ind in product(*[range(x) for x in a.shape[:-2]]): + aw = a[ind] + + lu = bandwidth(aw) + if not any(lu): # a is diagonal? + eA[ind] = np.diag(np.exp(np.diag(aw))) + continue + + # Generic/triangular case; copy the slice into scratch and send. + # Am will be mutated by pick_pade_structure + Am[0, :, :] = aw + m, s = pick_pade_structure(Am) + + if s != 0: # scaling needed + Am[:4] *= [[[2**(-s)]], [[4**(-s)]], [[16**(-s)]], [[64**(-s)]]] + + pade_UV_calc(Am, n, m) + eAw = Am[0] + + if s != 0: # squaring needed + + if (lu[1] == 0) or (lu[0] == 0): # lower/upper triangular + # This branch implements Code Fragment 2.1 of [1] + + diag_aw = np.diag(aw) + # einsum returns a writable view + np.einsum('ii->i', eAw)[:] = np.exp(diag_aw * 2**(-s)) + # super/sub diagonal + sd = np.diag(aw, k=-1 if lu[1] == 0 else 1) + + for i in range(s-1, -1, -1): + eAw = eAw @ eAw + + # diagonal + np.einsum('ii->i', eAw)[:] = np.exp(diag_aw * 2.**(-i)) + exp_sd = _exp_sinch(diag_aw * (2.**(-i))) * (sd * 2**(-i)) + if lu[1] == 0: # lower + np.einsum('ii->i', eAw[1:, :-1])[:] = exp_sd + else: # upper + np.einsum('ii->i', eAw[:-1, 1:])[:] = exp_sd + + else: # generic + for _ in range(s): + eAw = eAw @ eAw + + # Zero out the entries from np.empty in case of triangular input + if (lu[0] == 0) or (lu[1] == 0): + eA[ind] = np.triu(eAw) if lu[0] == 0 else np.tril(eAw) + else: + eA[ind] = eAw + + return eA + + +def _exp_sinch(x): + # Higham's formula (10.42), might overflow, see GH-11839 + lexp_diff = np.diff(np.exp(x)) + l_diff = np.diff(x) + mask_z = l_diff == 0. + lexp_diff[~mask_z] /= l_diff[~mask_z] + lexp_diff[mask_z] = np.exp(x[:-1][mask_z]) + return lexp_diff + + +def cosm(A): + """ + Compute the matrix cosine. + + This routine uses expm to compute the matrix exponentials. + + Parameters + ---------- + A : (N, N) array_like + Input array + + Returns + ------- + cosm : (N, N) ndarray + Matrix cosine of A + + Examples + -------- + >>> import numpy as np + >>> from scipy.linalg import expm, sinm, cosm + + Euler's identity (exp(i*theta) = cos(theta) + i*sin(theta)) + applied to a matrix: + + >>> a = np.array([[1.0, 2.0], [-1.0, 3.0]]) + >>> expm(1j*a) + array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j], + [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]]) + >>> cosm(a) + 1j*sinm(a) + array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j], + [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]]) + + """ + A = _asarray_square(A) + if np.iscomplexobj(A): + return 0.5*(expm(1j*A) + expm(-1j*A)) + else: + return expm(1j*A).real + + +def sinm(A): + """ + Compute the matrix sine. + + This routine uses expm to compute the matrix exponentials. + + Parameters + ---------- + A : (N, N) array_like + Input array. + + Returns + ------- + sinm : (N, N) ndarray + Matrix sine of `A` + + Examples + -------- + >>> import numpy as np + >>> from scipy.linalg import expm, sinm, cosm + + Euler's identity (exp(i*theta) = cos(theta) + i*sin(theta)) + applied to a matrix: + + >>> a = np.array([[1.0, 2.0], [-1.0, 3.0]]) + >>> expm(1j*a) + array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j], + [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]]) + >>> cosm(a) + 1j*sinm(a) + array([[ 0.42645930+1.89217551j, -2.13721484-0.97811252j], + [ 1.06860742+0.48905626j, -1.71075555+0.91406299j]]) + + """ + A = _asarray_square(A) + if np.iscomplexobj(A): + return -0.5j*(expm(1j*A) - expm(-1j*A)) + else: + return expm(1j*A).imag + + +def tanm(A): + """ + Compute the matrix tangent. + + This routine uses expm to compute the matrix exponentials. + + Parameters + ---------- + A : (N, N) array_like + Input array. + + Returns + ------- + tanm : (N, N) ndarray + Matrix tangent of `A` + + Examples + -------- + >>> import numpy as np + >>> from scipy.linalg import tanm, sinm, cosm + >>> a = np.array([[1.0, 3.0], [1.0, 4.0]]) + >>> t = tanm(a) + >>> t + array([[ -2.00876993, -8.41880636], + [ -2.80626879, -10.42757629]]) + + Verify tanm(a) = sinm(a).dot(inv(cosm(a))) + + >>> s = sinm(a) + >>> c = cosm(a) + >>> s.dot(np.linalg.inv(c)) + array([[ -2.00876993, -8.41880636], + [ -2.80626879, -10.42757629]]) + + """ + A = _asarray_square(A) + return _maybe_real(A, solve(cosm(A), sinm(A))) + + +def coshm(A): + """ + Compute the hyperbolic matrix cosine. + + This routine uses expm to compute the matrix exponentials. + + Parameters + ---------- + A : (N, N) array_like + Input array. + + Returns + ------- + coshm : (N, N) ndarray + Hyperbolic matrix cosine of `A` + + Examples + -------- + >>> import numpy as np + >>> from scipy.linalg import tanhm, sinhm, coshm + >>> a = np.array([[1.0, 3.0], [1.0, 4.0]]) + >>> c = coshm(a) + >>> c + array([[ 11.24592233, 38.76236492], + [ 12.92078831, 50.00828725]]) + + Verify tanhm(a) = sinhm(a).dot(inv(coshm(a))) + + >>> t = tanhm(a) + >>> s = sinhm(a) + >>> t - s.dot(np.linalg.inv(c)) + array([[ 2.72004641e-15, 4.55191440e-15], + [ 0.00000000e+00, -5.55111512e-16]]) + + """ + A = _asarray_square(A) + return _maybe_real(A, 0.5 * (expm(A) + expm(-A))) + + +def sinhm(A): + """ + Compute the hyperbolic matrix sine. + + This routine uses expm to compute the matrix exponentials. + + Parameters + ---------- + A : (N, N) array_like + Input array. + + Returns + ------- + sinhm : (N, N) ndarray + Hyperbolic matrix sine of `A` + + Examples + -------- + >>> import numpy as np + >>> from scipy.linalg import tanhm, sinhm, coshm + >>> a = np.array([[1.0, 3.0], [1.0, 4.0]]) + >>> s = sinhm(a) + >>> s + array([[ 10.57300653, 39.28826594], + [ 13.09608865, 49.86127247]]) + + Verify tanhm(a) = sinhm(a).dot(inv(coshm(a))) + + >>> t = tanhm(a) + >>> c = coshm(a) + >>> t - s.dot(np.linalg.inv(c)) + array([[ 2.72004641e-15, 4.55191440e-15], + [ 0.00000000e+00, -5.55111512e-16]]) + + """ + A = _asarray_square(A) + return _maybe_real(A, 0.5 * (expm(A) - expm(-A))) + + +def tanhm(A): + """ + Compute the hyperbolic matrix tangent. + + This routine uses expm to compute the matrix exponentials. + + Parameters + ---------- + A : (N, N) array_like + Input array + + Returns + ------- + tanhm : (N, N) ndarray + Hyperbolic matrix tangent of `A` + + Examples + -------- + >>> import numpy as np + >>> from scipy.linalg import tanhm, sinhm, coshm + >>> a = np.array([[1.0, 3.0], [1.0, 4.0]]) + >>> t = tanhm(a) + >>> t + array([[ 0.3428582 , 0.51987926], + [ 0.17329309, 0.86273746]]) + + Verify tanhm(a) = sinhm(a).dot(inv(coshm(a))) + + >>> s = sinhm(a) + >>> c = coshm(a) + >>> t - s.dot(np.linalg.inv(c)) + array([[ 2.72004641e-15, 4.55191440e-15], + [ 0.00000000e+00, -5.55111512e-16]]) + + """ + A = _asarray_square(A) + return _maybe_real(A, solve(coshm(A), sinhm(A))) + + +def funm(A, func, disp=True): + """ + Evaluate a matrix function specified by a callable. + + Returns the value of matrix-valued function ``f`` at `A`. The + function ``f`` is an extension of the scalar-valued function `func` + to matrices. + + Parameters + ---------- + A : (N, N) array_like + Matrix at which to evaluate the function + func : callable + Callable object that evaluates a scalar function f. + Must be vectorized (eg. using vectorize). + disp : bool, optional + Print warning if error in the result is estimated large + instead of returning estimated error. (Default: True) + + Returns + ------- + funm : (N, N) ndarray + Value of the matrix function specified by func evaluated at `A` + errest : float + (if disp == False) + + 1-norm of the estimated error, ||err||_1 / ||A||_1 + + Notes + ----- + This function implements the general algorithm based on Schur decomposition + (Algorithm 9.1.1. in [1]_). + + If the input matrix is known to be diagonalizable, then relying on the + eigendecomposition is likely to be faster. For example, if your matrix is + Hermitian, you can do + + >>> from scipy.linalg import eigh + >>> def funm_herm(a, func, check_finite=False): + ... w, v = eigh(a, check_finite=check_finite) + ... ## if you further know that your matrix is positive semidefinite, + ... ## you can optionally guard against precision errors by doing + ... # w = np.maximum(w, 0) + ... w = func(w) + ... return (v * w).dot(v.conj().T) + + References + ---------- + .. [1] Gene H. Golub, Charles F. van Loan, Matrix Computations 4th ed. + + Examples + -------- + >>> import numpy as np + >>> from scipy.linalg import funm + >>> a = np.array([[1.0, 3.0], [1.0, 4.0]]) + >>> funm(a, lambda x: x*x) + array([[ 4., 15.], + [ 5., 19.]]) + >>> a.dot(a) + array([[ 4., 15.], + [ 5., 19.]]) + + """ + A = _asarray_square(A) + # Perform Shur decomposition (lapack ?gees) + T, Z = schur(A) + T, Z = rsf2csf(T, Z) + n, n = T.shape + F = diag(func(diag(T))) # apply function to diagonal elements + F = F.astype(T.dtype.char) # e.g., when F is real but T is complex + + minden = abs(T[0, 0]) + + # implement Algorithm 11.1.1 from Golub and Van Loan + # "matrix Computations." + for p in range(1, n): + for i in range(1, n-p+1): + j = i + p + s = T[i-1, j-1] * (F[j-1, j-1] - F[i-1, i-1]) + ksl = slice(i, j-1) + val = dot(T[i-1, ksl], F[ksl, j-1]) - dot(F[i-1, ksl], T[ksl, j-1]) + s = s + val + den = T[j-1, j-1] - T[i-1, i-1] + if den != 0.0: + s = s / den + F[i-1, j-1] = s + minden = min(minden, abs(den)) + + F = dot(dot(Z, F), transpose(conjugate(Z))) + F = _maybe_real(A, F) + + tol = {0: feps, 1: eps}[_array_precision[F.dtype.char]] + if minden == 0.0: + minden = tol + err = min(1, max(tol, (tol/minden)*norm(triu(T, 1), 1))) + if prod(ravel(logical_not(isfinite(F))), axis=0): + err = np.inf + if disp: + if err > 1000*tol: + print("funm result may be inaccurate, approximate err =", err) + return F + else: + return F, err + + +def signm(A, disp=True): + """ + Matrix sign function. + + Extension of the scalar sign(x) to matrices. + + Parameters + ---------- + A : (N, N) array_like + Matrix at which to evaluate the sign function + disp : bool, optional + Print warning if error in the result is estimated large + instead of returning estimated error. (Default: True) + + Returns + ------- + signm : (N, N) ndarray + Value of the sign function at `A` + errest : float + (if disp == False) + + 1-norm of the estimated error, ||err||_1 / ||A||_1 + + Examples + -------- + >>> from scipy.linalg import signm, eigvals + >>> a = [[1,2,3], [1,2,1], [1,1,1]] + >>> eigvals(a) + array([ 4.12488542+0.j, -0.76155718+0.j, 0.63667176+0.j]) + >>> eigvals(signm(a)) + array([-1.+0.j, 1.+0.j, 1.+0.j]) + + """ + A = _asarray_square(A) + + def rounded_sign(x): + rx = np.real(x) + if rx.dtype.char == 'f': + c = 1e3*feps*amax(x) + else: + c = 1e3*eps*amax(x) + return sign((absolute(rx) > c) * rx) + result, errest = funm(A, rounded_sign, disp=0) + errtol = {0: 1e3*feps, 1: 1e3*eps}[_array_precision[result.dtype.char]] + if errest < errtol: + return result + + # Handle signm of defective matrices: + + # See "E.D.Denman and J.Leyva-Ramos, Appl.Math.Comp., + # 8:237-250,1981" for how to improve the following (currently a + # rather naive) iteration process: + + # a = result # sometimes iteration converges faster but where?? + + # Shifting to avoid zero eigenvalues. How to ensure that shifting does + # not change the spectrum too much? + vals = svd(A, compute_uv=False) + max_sv = np.amax(vals) + # min_nonzero_sv = vals[(vals>max_sv*errtol).tolist().count(1)-1] + # c = 0.5/min_nonzero_sv + c = 0.5/max_sv + S0 = A + c*np.identity(A.shape[0]) + prev_errest = errest + for i in range(100): + iS0 = inv(S0) + S0 = 0.5*(S0 + iS0) + Pp = 0.5*(dot(S0, S0)+S0) + errest = norm(dot(Pp, Pp)-Pp, 1) + if errest < errtol or prev_errest == errest: + break + prev_errest = errest + if disp: + if not isfinite(errest) or errest >= errtol: + print("signm result may be inaccurate, approximate err =", errest) + return S0 + else: + return S0, errest + + +def khatri_rao(a, b): + r""" + Khatri-rao product + + A column-wise Kronecker product of two matrices + + Parameters + ---------- + a : (n, k) array_like + Input array + b : (m, k) array_like + Input array + + Returns + ------- + c: (n*m, k) ndarray + Khatri-rao product of `a` and `b`. + + See Also + -------- + kron : Kronecker product + + Notes + ----- + The mathematical definition of the Khatri-Rao product is: + + .. math:: + + (A_{ij} \bigotimes B_{ij})_{ij} + + which is the Kronecker product of every column of A and B, e.g.:: + + c = np.vstack([np.kron(a[:, k], b[:, k]) for k in range(b.shape[1])]).T + + Examples + -------- + >>> import numpy as np + >>> from scipy import linalg + >>> a = np.array([[1, 2, 3], [4, 5, 6]]) + >>> b = np.array([[3, 4, 5], [6, 7, 8], [2, 3, 9]]) + >>> linalg.khatri_rao(a, b) + array([[ 3, 8, 15], + [ 6, 14, 24], + [ 2, 6, 27], + [12, 20, 30], + [24, 35, 48], + [ 8, 15, 54]]) + + """ + a = np.asarray(a) + b = np.asarray(b) + + if not (a.ndim == 2 and b.ndim == 2): + raise ValueError("The both arrays should be 2-dimensional.") + + if not a.shape[1] == b.shape[1]: + raise ValueError("The number of columns for both arrays " + "should be equal.") + + # c = np.vstack([np.kron(a[:, k], b[:, k]) for k in range(b.shape[1])]).T + c = a[..., :, np.newaxis, :] * b[..., np.newaxis, :, :] + return c.reshape((-1,) + c.shape[2:]) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_matfuncs_expm.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_matfuncs_expm.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..85da0f54cabbc67f2bed480376b0e3ce936577d6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_matfuncs_expm.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_matfuncs_sqrtm.py b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_matfuncs_sqrtm.py new file mode 100644 index 0000000000000000000000000000000000000000..9c84ca573bd29641a8b1ba32163cddd15854abf9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_matfuncs_sqrtm.py @@ -0,0 +1,214 @@ +""" +Matrix square root for general matrices and for upper triangular matrices. + +This module exists to avoid cyclic imports. + +""" +__all__ = ['sqrtm'] + +import numpy as np + +from scipy._lib._util import _asarray_validated + +# Local imports +from ._misc import norm +from .lapack import ztrsyl, dtrsyl +from ._decomp_schur import schur, rsf2csf + + + +class SqrtmError(np.linalg.LinAlgError): + pass + + +from ._matfuncs_sqrtm_triu import within_block_loop # noqa: E402 + + +def _sqrtm_triu(T, blocksize=64): + """ + Matrix square root of an upper triangular matrix. + + This is a helper function for `sqrtm` and `logm`. + + Parameters + ---------- + T : (N, N) array_like upper triangular + Matrix whose square root to evaluate + blocksize : int, optional + If the blocksize is not degenerate with respect to the + size of the input array, then use a blocked algorithm. (Default: 64) + + Returns + ------- + sqrtm : (N, N) ndarray + Value of the sqrt function at `T` + + References + ---------- + .. [1] Edvin Deadman, Nicholas J. Higham, Rui Ralha (2013) + "Blocked Schur Algorithms for Computing the Matrix Square Root, + Lecture Notes in Computer Science, 7782. pp. 171-182. + + """ + T_diag = np.diag(T) + keep_it_real = np.isrealobj(T) and np.min(T_diag) >= 0 + + # Cast to complex as necessary + ensure double precision + if not keep_it_real: + T = np.asarray(T, dtype=np.complex128, order="C") + T_diag = np.asarray(T_diag, dtype=np.complex128) + else: + T = np.asarray(T, dtype=np.float64, order="C") + T_diag = np.asarray(T_diag, dtype=np.float64) + + R = np.diag(np.sqrt(T_diag)) + + # Compute the number of blocks to use; use at least one block. + n, n = T.shape + nblocks = max(n // blocksize, 1) + + # Compute the smaller of the two sizes of blocks that + # we will actually use, and compute the number of large blocks. + bsmall, nlarge = divmod(n, nblocks) + blarge = bsmall + 1 + nsmall = nblocks - nlarge + if nsmall * bsmall + nlarge * blarge != n: + raise Exception('internal inconsistency') + + # Define the index range covered by each block. + start_stop_pairs = [] + start = 0 + for count, size in ((nsmall, bsmall), (nlarge, blarge)): + for i in range(count): + start_stop_pairs.append((start, start + size)) + start += size + + # Within-block interactions (Cythonized) + try: + within_block_loop(R, T, start_stop_pairs, nblocks) + except RuntimeError as e: + raise SqrtmError(*e.args) from e + + # Between-block interactions (Cython would give no significant speedup) + for j in range(nblocks): + jstart, jstop = start_stop_pairs[j] + for i in range(j-1, -1, -1): + istart, istop = start_stop_pairs[i] + S = T[istart:istop, jstart:jstop] + if j - i > 1: + S = S - R[istart:istop, istop:jstart].dot(R[istop:jstart, + jstart:jstop]) + + # Invoke LAPACK. + # For more details, see the solve_sylvester implementation + # and the fortran dtrsyl and ztrsyl docs. + Rii = R[istart:istop, istart:istop] + Rjj = R[jstart:jstop, jstart:jstop] + if keep_it_real: + x, scale, info = dtrsyl(Rii, Rjj, S) + else: + x, scale, info = ztrsyl(Rii, Rjj, S) + R[istart:istop, jstart:jstop] = x * scale + + # Return the matrix square root. + return R + + +def sqrtm(A, disp=True, blocksize=64): + """ + Matrix square root. + + Parameters + ---------- + A : (N, N) array_like + Matrix whose square root to evaluate + disp : bool, optional + Print warning if error in the result is estimated large + instead of returning estimated error. (Default: True) + blocksize : integer, optional + If the blocksize is not degenerate with respect to the + size of the input array, then use a blocked algorithm. (Default: 64) + + Returns + ------- + sqrtm : (N, N) ndarray + Value of the sqrt function at `A`. The dtype is float or complex. + The precision (data size) is determined based on the precision of + input `A`. When the dtype is float, the precision is the same as `A`. + When the dtype is complex, the precision is double that of `A`. The + precision might be clipped by each dtype precision range. + + errest : float + (if disp == False) + + Frobenius norm of the estimated error, ||err||_F / ||A||_F + + References + ---------- + .. [1] Edvin Deadman, Nicholas J. Higham, Rui Ralha (2013) + "Blocked Schur Algorithms for Computing the Matrix Square Root, + Lecture Notes in Computer Science, 7782. pp. 171-182. + + Examples + -------- + >>> import numpy as np + >>> from scipy.linalg import sqrtm + >>> a = np.array([[1.0, 3.0], [1.0, 4.0]]) + >>> r = sqrtm(a) + >>> r + array([[ 0.75592895, 1.13389342], + [ 0.37796447, 1.88982237]]) + >>> r.dot(r) + array([[ 1., 3.], + [ 1., 4.]]) + + """ + byte_size = np.asarray(A).dtype.itemsize + A = _asarray_validated(A, check_finite=True, as_inexact=True) + if len(A.shape) != 2: + raise ValueError("Non-matrix input to matrix function.") + if blocksize < 1: + raise ValueError("The blocksize should be at least 1.") + keep_it_real = np.isrealobj(A) + if keep_it_real: + T, Z = schur(A) + d0 = np.diagonal(T) + d1 = np.diagonal(T, -1) + eps = np.finfo(T.dtype).eps + needs_conversion = abs(d1) > eps * (abs(d0[1:]) + abs(d0[:-1])) + if needs_conversion.any(): + T, Z = rsf2csf(T, Z) + else: + T, Z = schur(A, output='complex') + failflag = False + try: + R = _sqrtm_triu(T, blocksize=blocksize) + ZH = np.conjugate(Z).T + X = Z.dot(R).dot(ZH) + if not np.iscomplexobj(X): + # float byte size range: f2 ~ f16 + X = X.astype(f"f{np.clip(byte_size, 2, 16)}", copy=False) + else: + # complex byte size range: c8 ~ c32. + # c32(complex256) might not be supported in some environments. + if hasattr(np, 'complex256'): + X = X.astype(f"c{np.clip(byte_size*2, 8, 32)}", copy=False) + else: + X = X.astype(f"c{np.clip(byte_size*2, 8, 16)}", copy=False) + except SqrtmError: + failflag = True + X = np.empty_like(A) + X.fill(np.nan) + + if disp: + if failflag: + print("Failed to find a square root.") + return X + else: + try: + arg2 = norm(X.dot(X) - A, 'fro')**2 / norm(A, 'fro') + except ValueError: + # NaNs in matrix + arg2 = np.inf + + return X, arg2 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_misc.py b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_misc.py new file mode 100644 index 0000000000000000000000000000000000000000..79341f78435945d5562571e55b5e28c57cd44e21 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_misc.py @@ -0,0 +1,191 @@ +import numpy as np +from numpy.linalg import LinAlgError +from .blas import get_blas_funcs +from .lapack import get_lapack_funcs + +__all__ = ['LinAlgError', 'LinAlgWarning', 'norm'] + + +class LinAlgWarning(RuntimeWarning): + """ + The warning emitted when a linear algebra related operation is close + to fail conditions of the algorithm or loss of accuracy is expected. + """ + pass + + +def norm(a, ord=None, axis=None, keepdims=False, check_finite=True): + """ + Matrix or vector norm. + + This function is able to return one of eight different matrix norms, + or one of an infinite number of vector norms (described below), depending + on the value of the ``ord`` parameter. For tensors with rank different from + 1 or 2, only `ord=None` is supported. + + Parameters + ---------- + a : array_like + Input array. If `axis` is None, `a` must be 1-D or 2-D, unless `ord` + is None. If both `axis` and `ord` are None, the 2-norm of + ``a.ravel`` will be returned. + ord : {int, inf, -inf, 'fro', 'nuc', None}, optional + Order of the norm (see table under ``Notes``). inf means NumPy's + `inf` object. + axis : {int, 2-tuple of ints, None}, optional + If `axis` is an integer, it specifies the axis of `a` along which to + compute the vector norms. If `axis` is a 2-tuple, it specifies the + axes that hold 2-D matrices, and the matrix norms of these matrices + are computed. If `axis` is None then either a vector norm (when `a` + is 1-D) or a matrix norm (when `a` is 2-D) is returned. + keepdims : bool, optional + If this is set to True, the axes which are normed over are left in the + result as dimensions with size one. With this option the result will + broadcast correctly against the original `a`. + check_finite : bool, optional + Whether to check that the input matrix contains only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + n : float or ndarray + Norm of the matrix or vector(s). + + Notes + ----- + For values of ``ord <= 0``, the result is, strictly speaking, not a + mathematical 'norm', but it may still be useful for various numerical + purposes. + + The following norms can be calculated: + + ===== ============================ ========================== + ord norm for matrices norm for vectors + ===== ============================ ========================== + None Frobenius norm 2-norm + 'fro' Frobenius norm -- + 'nuc' nuclear norm -- + inf max(sum(abs(a), axis=1)) max(abs(a)) + -inf min(sum(abs(a), axis=1)) min(abs(a)) + 0 -- sum(a != 0) + 1 max(sum(abs(a), axis=0)) as below + -1 min(sum(abs(a), axis=0)) as below + 2 2-norm (largest sing. value) as below + -2 smallest singular value as below + other -- sum(abs(a)**ord)**(1./ord) + ===== ============================ ========================== + + The Frobenius norm is given by [1]_: + + :math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}` + + The nuclear norm is the sum of the singular values. + + Both the Frobenius and nuclear norm orders are only defined for + matrices. + + References + ---------- + .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*, + Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15 + + Examples + -------- + >>> import numpy as np + >>> from scipy.linalg import norm + >>> a = np.arange(9) - 4.0 + >>> a + array([-4., -3., -2., -1., 0., 1., 2., 3., 4.]) + >>> b = a.reshape((3, 3)) + >>> b + array([[-4., -3., -2.], + [-1., 0., 1.], + [ 2., 3., 4.]]) + + >>> norm(a) + 7.745966692414834 + >>> norm(b) + 7.745966692414834 + >>> norm(b, 'fro') + 7.745966692414834 + >>> norm(a, np.inf) + 4 + >>> norm(b, np.inf) + 9 + >>> norm(a, -np.inf) + 0 + >>> norm(b, -np.inf) + 2 + + >>> norm(a, 1) + 20 + >>> norm(b, 1) + 7 + >>> norm(a, -1) + -4.6566128774142013e-010 + >>> norm(b, -1) + 6 + >>> norm(a, 2) + 7.745966692414834 + >>> norm(b, 2) + 7.3484692283495345 + + >>> norm(a, -2) + 0 + >>> norm(b, -2) + 1.8570331885190563e-016 + >>> norm(a, 3) + 5.8480354764257312 + >>> norm(a, -3) + 0 + + """ + # Differs from numpy only in non-finite handling and the use of blas. + if check_finite: + a = np.asarray_chkfinite(a) + else: + a = np.asarray(a) + + if a.size and a.dtype.char in 'fdFD' and axis is None and not keepdims: + + if ord in (None, 2) and (a.ndim == 1): + # use blas for fast and stable euclidean norm + nrm2 = get_blas_funcs('nrm2', dtype=a.dtype, ilp64='preferred') + return nrm2(a) + + if a.ndim == 2: + # Use lapack for a couple fast matrix norms. + # For some reason the *lange frobenius norm is slow. + lange_args = None + # Make sure this works if the user uses the axis keywords + # to apply the norm to the transpose. + if ord == 1: + if np.isfortran(a): + lange_args = '1', a + elif np.isfortran(a.T): + lange_args = 'i', a.T + elif ord == np.inf: + if np.isfortran(a): + lange_args = 'i', a + elif np.isfortran(a.T): + lange_args = '1', a.T + if lange_args: + lange = get_lapack_funcs('lange', dtype=a.dtype, ilp64='preferred') + return lange(*lange_args) + + # fall back to numpy in every other case + return np.linalg.norm(a, ord=ord, axis=axis, keepdims=keepdims) + + +def _datacopied(arr, original): + """ + Strict check for `arr` not sharing any data with `original`, + under the assumption that arr = asarray(original) + + """ + if arr is original: + return False + if not isinstance(original, np.ndarray) and hasattr(original, '__array__'): + return False + return arr.base is None diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_procrustes.py b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_procrustes.py new file mode 100644 index 0000000000000000000000000000000000000000..1e835f15681c74fedbadb6a621ef6e36a2f4c3f5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_procrustes.py @@ -0,0 +1,90 @@ +""" +Solve the orthogonal Procrustes problem. + +""" +import numpy as np +from ._decomp_svd import svd + + +__all__ = ['orthogonal_procrustes'] + + +def orthogonal_procrustes(A, B, check_finite=True): + """ + Compute the matrix solution of the orthogonal Procrustes problem. + + Given matrices A and B of equal shape, find an orthogonal matrix R + that most closely maps A to B using the algorithm given in [1]_. + + Parameters + ---------- + A : (M, N) array_like + Matrix to be mapped. + B : (M, N) array_like + Target matrix. + check_finite : bool, optional + Whether to check that the input matrices contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + + Returns + ------- + R : (N, N) ndarray + The matrix solution of the orthogonal Procrustes problem. + Minimizes the Frobenius norm of ``(A @ R) - B``, subject to + ``R.T @ R = I``. + scale : float + Sum of the singular values of ``A.T @ B``. + + Raises + ------ + ValueError + If the input array shapes don't match or if check_finite is True and + the arrays contain Inf or NaN. + + Notes + ----- + Note that unlike higher level Procrustes analyses of spatial data, this + function only uses orthogonal transformations like rotations and + reflections, and it does not use scaling or translation. + + .. versionadded:: 0.15.0 + + References + ---------- + .. [1] Peter H. Schonemann, "A generalized solution of the orthogonal + Procrustes problem", Psychometrica -- Vol. 31, No. 1, March, 1966. + :doi:`10.1007/BF02289451` + + Examples + -------- + >>> import numpy as np + >>> from scipy.linalg import orthogonal_procrustes + >>> A = np.array([[ 2, 0, 1], [-2, 0, 0]]) + + Flip the order of columns and check for the anti-diagonal mapping + + >>> R, sca = orthogonal_procrustes(A, np.fliplr(A)) + >>> R + array([[-5.34384992e-17, 0.00000000e+00, 1.00000000e+00], + [ 0.00000000e+00, 1.00000000e+00, 0.00000000e+00], + [ 1.00000000e+00, 0.00000000e+00, -7.85941422e-17]]) + >>> sca + 9.0 + + """ + if check_finite: + A = np.asarray_chkfinite(A) + B = np.asarray_chkfinite(B) + else: + A = np.asanyarray(A) + B = np.asanyarray(B) + if A.ndim != 2: + raise ValueError('expected ndim to be 2, but observed %s' % A.ndim) + if A.shape != B.shape: + raise ValueError(f'the shapes of A and B differ ({A.shape} vs {B.shape})') + # Be clever with transposes, with the intention to save memory. + u, w, vt = svd(B.T.dot(A).T) + R = u.dot(vt) + scale = w.sum() + return R, scale diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_special_matrices.py b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_special_matrices.py new file mode 100644 index 0000000000000000000000000000000000000000..df9b767481d16f880c69516b62ff2388594e57e7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/_special_matrices.py @@ -0,0 +1,1257 @@ +import math + +import numpy as np +from numpy.lib.stride_tricks import as_strided + +__all__ = ['toeplitz', 'circulant', 'hankel', + 'hadamard', 'leslie', 'kron', 'block_diag', 'companion', + 'helmert', 'hilbert', 'invhilbert', 'pascal', 'invpascal', 'dft', + 'fiedler', 'fiedler_companion', 'convolution_matrix'] + + +# ----------------------------------------------------------------------------- +# matrix construction functions +# ----------------------------------------------------------------------------- + + +def toeplitz(c, r=None): + """ + Construct a Toeplitz matrix. + + The Toeplitz matrix has constant diagonals, with c as its first column + and r as its first row. If r is not given, ``r == conjugate(c)`` is + assumed. + + Parameters + ---------- + c : array_like + First column of the matrix. Whatever the actual shape of `c`, it + will be converted to a 1-D array. + r : array_like, optional + First row of the matrix. If None, ``r = conjugate(c)`` is assumed; + in this case, if c[0] is real, the result is a Hermitian matrix. + r[0] is ignored; the first row of the returned matrix is + ``[c[0], r[1:]]``. Whatever the actual shape of `r`, it will be + converted to a 1-D array. + + Returns + ------- + A : (len(c), len(r)) ndarray + The Toeplitz matrix. Dtype is the same as ``(c[0] + r[0]).dtype``. + + See Also + -------- + circulant : circulant matrix + hankel : Hankel matrix + solve_toeplitz : Solve a Toeplitz system. + + Notes + ----- + The behavior when `c` or `r` is a scalar, or when `c` is complex and + `r` is None, was changed in version 0.8.0. The behavior in previous + versions was undocumented and is no longer supported. + + Examples + -------- + >>> from scipy.linalg import toeplitz + >>> toeplitz([1,2,3], [1,4,5,6]) + array([[1, 4, 5, 6], + [2, 1, 4, 5], + [3, 2, 1, 4]]) + >>> toeplitz([1.0, 2+3j, 4-1j]) + array([[ 1.+0.j, 2.-3.j, 4.+1.j], + [ 2.+3.j, 1.+0.j, 2.-3.j], + [ 4.-1.j, 2.+3.j, 1.+0.j]]) + + """ + c = np.asarray(c).ravel() + if r is None: + r = c.conjugate() + else: + r = np.asarray(r).ravel() + # Form a 1-D array containing a reversed c followed by r[1:] that could be + # strided to give us toeplitz matrix. + vals = np.concatenate((c[::-1], r[1:])) + out_shp = len(c), len(r) + n = vals.strides[0] + return as_strided(vals[len(c)-1:], shape=out_shp, strides=(-n, n)).copy() + + +def circulant(c): + """ + Construct a circulant matrix. + + Parameters + ---------- + c : (N,) array_like + 1-D array, the first column of the matrix. + + Returns + ------- + A : (N, N) ndarray + A circulant matrix whose first column is `c`. + + See Also + -------- + toeplitz : Toeplitz matrix + hankel : Hankel matrix + solve_circulant : Solve a circulant system. + + Notes + ----- + .. versionadded:: 0.8.0 + + Examples + -------- + >>> from scipy.linalg import circulant + >>> circulant([1, 2, 3]) + array([[1, 3, 2], + [2, 1, 3], + [3, 2, 1]]) + + """ + c = np.asarray(c).ravel() + # Form an extended array that could be strided to give circulant version + c_ext = np.concatenate((c[::-1], c[:0:-1])) + L = len(c) + n = c_ext.strides[0] + return as_strided(c_ext[L-1:], shape=(L, L), strides=(-n, n)).copy() + + +def hankel(c, r=None): + """ + Construct a Hankel matrix. + + The Hankel matrix has constant anti-diagonals, with `c` as its + first column and `r` as its last row. If `r` is not given, then + `r = zeros_like(c)` is assumed. + + Parameters + ---------- + c : array_like + First column of the matrix. Whatever the actual shape of `c`, it + will be converted to a 1-D array. + r : array_like, optional + Last row of the matrix. If None, ``r = zeros_like(c)`` is assumed. + r[0] is ignored; the last row of the returned matrix is + ``[c[-1], r[1:]]``. Whatever the actual shape of `r`, it will be + converted to a 1-D array. + + Returns + ------- + A : (len(c), len(r)) ndarray + The Hankel matrix. Dtype is the same as ``(c[0] + r[0]).dtype``. + + See Also + -------- + toeplitz : Toeplitz matrix + circulant : circulant matrix + + Examples + -------- + >>> from scipy.linalg import hankel + >>> hankel([1, 17, 99]) + array([[ 1, 17, 99], + [17, 99, 0], + [99, 0, 0]]) + >>> hankel([1,2,3,4], [4,7,7,8,9]) + array([[1, 2, 3, 4, 7], + [2, 3, 4, 7, 7], + [3, 4, 7, 7, 8], + [4, 7, 7, 8, 9]]) + + """ + c = np.asarray(c).ravel() + if r is None: + r = np.zeros_like(c) + else: + r = np.asarray(r).ravel() + # Form a 1-D array of values to be used in the matrix, containing `c` + # followed by r[1:]. + vals = np.concatenate((c, r[1:])) + # Stride on concatenated array to get hankel matrix + out_shp = len(c), len(r) + n = vals.strides[0] + return as_strided(vals, shape=out_shp, strides=(n, n)).copy() + + +def hadamard(n, dtype=int): + """ + Construct an Hadamard matrix. + + Constructs an n-by-n Hadamard matrix, using Sylvester's + construction. `n` must be a power of 2. + + Parameters + ---------- + n : int + The order of the matrix. `n` must be a power of 2. + dtype : dtype, optional + The data type of the array to be constructed. + + Returns + ------- + H : (n, n) ndarray + The Hadamard matrix. + + Notes + ----- + .. versionadded:: 0.8.0 + + Examples + -------- + >>> from scipy.linalg import hadamard + >>> hadamard(2, dtype=complex) + array([[ 1.+0.j, 1.+0.j], + [ 1.+0.j, -1.-0.j]]) + >>> hadamard(4) + array([[ 1, 1, 1, 1], + [ 1, -1, 1, -1], + [ 1, 1, -1, -1], + [ 1, -1, -1, 1]]) + + """ + + # This function is a slightly modified version of the + # function contributed by Ivo in ticket #675. + + if n < 1: + lg2 = 0 + else: + lg2 = int(math.log(n, 2)) + if 2 ** lg2 != n: + raise ValueError("n must be an positive integer, and n must be " + "a power of 2") + + H = np.array([[1]], dtype=dtype) + + # Sylvester's construction + for i in range(0, lg2): + H = np.vstack((np.hstack((H, H)), np.hstack((H, -H)))) + + return H + + +def leslie(f, s): + """ + Create a Leslie matrix. + + Given the length n array of fecundity coefficients `f` and the length + n-1 array of survival coefficients `s`, return the associated Leslie + matrix. + + Parameters + ---------- + f : (N,) array_like + The "fecundity" coefficients. + s : (N-1,) array_like + The "survival" coefficients, has to be 1-D. The length of `s` + must be one less than the length of `f`, and it must be at least 1. + + Returns + ------- + L : (N, N) ndarray + The array is zero except for the first row, + which is `f`, and the first sub-diagonal, which is `s`. + The data-type of the array will be the data-type of ``f[0]+s[0]``. + + Notes + ----- + .. versionadded:: 0.8.0 + + The Leslie matrix is used to model discrete-time, age-structured + population growth [1]_ [2]_. In a population with `n` age classes, two sets + of parameters define a Leslie matrix: the `n` "fecundity coefficients", + which give the number of offspring per-capita produced by each age + class, and the `n` - 1 "survival coefficients", which give the + per-capita survival rate of each age class. + + References + ---------- + .. [1] P. H. Leslie, On the use of matrices in certain population + mathematics, Biometrika, Vol. 33, No. 3, 183--212 (Nov. 1945) + .. [2] P. H. Leslie, Some further notes on the use of matrices in + population mathematics, Biometrika, Vol. 35, No. 3/4, 213--245 + (Dec. 1948) + + Examples + -------- + >>> from scipy.linalg import leslie + >>> leslie([0.1, 2.0, 1.0, 0.1], [0.2, 0.8, 0.7]) + array([[ 0.1, 2. , 1. , 0.1], + [ 0.2, 0. , 0. , 0. ], + [ 0. , 0.8, 0. , 0. ], + [ 0. , 0. , 0.7, 0. ]]) + + """ + f = np.atleast_1d(f) + s = np.atleast_1d(s) + if f.ndim != 1: + raise ValueError("Incorrect shape for f. f must be 1D") + if s.ndim != 1: + raise ValueError("Incorrect shape for s. s must be 1D") + if f.size != s.size + 1: + raise ValueError("Incorrect lengths for f and s. The length" + " of s must be one less than the length of f.") + if s.size == 0: + raise ValueError("The length of s must be at least 1.") + + tmp = f[0] + s[0] + n = f.size + a = np.zeros((n, n), dtype=tmp.dtype) + a[0] = f + a[list(range(1, n)), list(range(0, n - 1))] = s + return a + + +def kron(a, b): + """ + Kronecker product. + + The result is the block matrix:: + + a[0,0]*b a[0,1]*b ... a[0,-1]*b + a[1,0]*b a[1,1]*b ... a[1,-1]*b + ... + a[-1,0]*b a[-1,1]*b ... a[-1,-1]*b + + Parameters + ---------- + a : (M, N) ndarray + Input array + b : (P, Q) ndarray + Input array + + Returns + ------- + A : (M*P, N*Q) ndarray + Kronecker product of `a` and `b`. + + Examples + -------- + >>> from numpy import array + >>> from scipy.linalg import kron + >>> kron(array([[1,2],[3,4]]), array([[1,1,1]])) + array([[1, 1, 1, 2, 2, 2], + [3, 3, 3, 4, 4, 4]]) + + """ + if not a.flags['CONTIGUOUS']: + a = np.reshape(a, a.shape) + if not b.flags['CONTIGUOUS']: + b = np.reshape(b, b.shape) + o = np.outer(a, b) + o = o.reshape(a.shape + b.shape) + return np.concatenate(np.concatenate(o, axis=1), axis=1) + + +def block_diag(*arrs): + """ + Create a block diagonal matrix from provided arrays. + + Given the inputs `A`, `B` and `C`, the output will have these + arrays arranged on the diagonal:: + + [[A, 0, 0], + [0, B, 0], + [0, 0, C]] + + Parameters + ---------- + A, B, C, ... : array_like, up to 2-D + Input arrays. A 1-D array or array_like sequence of length `n` is + treated as a 2-D array with shape ``(1,n)``. + + Returns + ------- + D : ndarray + Array with `A`, `B`, `C`, ... on the diagonal. `D` has the + same dtype as `A`. + + Notes + ----- + If all the input arrays are square, the output is known as a + block diagonal matrix. + + Empty sequences (i.e., array-likes of zero size) will not be ignored. + Noteworthy, both [] and [[]] are treated as matrices with shape ``(1,0)``. + + Examples + -------- + >>> import numpy as np + >>> from scipy.linalg import block_diag + >>> A = [[1, 0], + ... [0, 1]] + >>> B = [[3, 4, 5], + ... [6, 7, 8]] + >>> C = [[7]] + >>> P = np.zeros((2, 0), dtype='int32') + >>> block_diag(A, B, C) + array([[1, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0], + [0, 0, 3, 4, 5, 0], + [0, 0, 6, 7, 8, 0], + [0, 0, 0, 0, 0, 7]]) + >>> block_diag(A, P, B, C) + array([[1, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 3, 4, 5, 0], + [0, 0, 6, 7, 8, 0], + [0, 0, 0, 0, 0, 7]]) + >>> block_diag(1.0, [2, 3], [[4, 5], [6, 7]]) + array([[ 1., 0., 0., 0., 0.], + [ 0., 2., 3., 0., 0.], + [ 0., 0., 0., 4., 5.], + [ 0., 0., 0., 6., 7.]]) + + """ + if arrs == (): + arrs = ([],) + arrs = [np.atleast_2d(a) for a in arrs] + + bad_args = [k for k in range(len(arrs)) if arrs[k].ndim > 2] + if bad_args: + raise ValueError("arguments in the following positions have dimension " + "greater than 2: %s" % bad_args) + + shapes = np.array([a.shape for a in arrs]) + out_dtype = np.result_type(*[arr.dtype for arr in arrs]) + out = np.zeros(np.sum(shapes, axis=0), dtype=out_dtype) + + r, c = 0, 0 + for i, (rr, cc) in enumerate(shapes): + out[r:r + rr, c:c + cc] = arrs[i] + r += rr + c += cc + return out + + +def companion(a): + """ + Create a companion matrix. + + Create the companion matrix [1]_ associated with the polynomial whose + coefficients are given in `a`. + + Parameters + ---------- + a : (N,) array_like + 1-D array of polynomial coefficients. The length of `a` must be + at least two, and ``a[0]`` must not be zero. + + Returns + ------- + c : (N-1, N-1) ndarray + The first row of `c` is ``-a[1:]/a[0]``, and the first + sub-diagonal is all ones. The data-type of the array is the same + as the data-type of ``1.0*a[0]``. + + Raises + ------ + ValueError + If any of the following are true: a) ``a.ndim != 1``; + b) ``a.size < 2``; c) ``a[0] == 0``. + + Notes + ----- + .. versionadded:: 0.8.0 + + References + ---------- + .. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK: + Cambridge University Press, 1999, pp. 146-7. + + Examples + -------- + >>> from scipy.linalg import companion + >>> companion([1, -10, 31, -30]) + array([[ 10., -31., 30.], + [ 1., 0., 0.], + [ 0., 1., 0.]]) + + """ + a = np.atleast_1d(a) + + if a.ndim != 1: + raise ValueError("Incorrect shape for `a`. `a` must be " + "one-dimensional.") + + if a.size < 2: + raise ValueError("The length of `a` must be at least 2.") + + if a[0] == 0: + raise ValueError("The first coefficient in `a` must not be zero.") + + first_row = -a[1:] / (1.0 * a[0]) + n = a.size + c = np.zeros((n - 1, n - 1), dtype=first_row.dtype) + c[0] = first_row + c[list(range(1, n - 1)), list(range(0, n - 2))] = 1 + return c + + +def helmert(n, full=False): + """ + Create an Helmert matrix of order `n`. + + This has applications in statistics, compositional or simplicial analysis, + and in Aitchison geometry. + + Parameters + ---------- + n : int + The size of the array to create. + full : bool, optional + If True the (n, n) ndarray will be returned. + Otherwise the submatrix that does not include the first + row will be returned. + Default: False. + + Returns + ------- + M : ndarray + The Helmert matrix. + The shape is (n, n) or (n-1, n) depending on the `full` argument. + + Examples + -------- + >>> from scipy.linalg import helmert + >>> helmert(5, full=True) + array([[ 0.4472136 , 0.4472136 , 0.4472136 , 0.4472136 , 0.4472136 ], + [ 0.70710678, -0.70710678, 0. , 0. , 0. ], + [ 0.40824829, 0.40824829, -0.81649658, 0. , 0. ], + [ 0.28867513, 0.28867513, 0.28867513, -0.8660254 , 0. ], + [ 0.2236068 , 0.2236068 , 0.2236068 , 0.2236068 , -0.89442719]]) + + """ + H = np.tril(np.ones((n, n)), -1) - np.diag(np.arange(n)) + d = np.arange(n) * np.arange(1, n+1) + H[0] = 1 + d[0] = n + H_full = H / np.sqrt(d)[:, np.newaxis] + if full: + return H_full + else: + return H_full[1:] + + +def hilbert(n): + """ + Create a Hilbert matrix of order `n`. + + Returns the `n` by `n` array with entries `h[i,j] = 1 / (i + j + 1)`. + + Parameters + ---------- + n : int + The size of the array to create. + + Returns + ------- + h : (n, n) ndarray + The Hilbert matrix. + + See Also + -------- + invhilbert : Compute the inverse of a Hilbert matrix. + + Notes + ----- + .. versionadded:: 0.10.0 + + Examples + -------- + >>> from scipy.linalg import hilbert + >>> hilbert(3) + array([[ 1. , 0.5 , 0.33333333], + [ 0.5 , 0.33333333, 0.25 ], + [ 0.33333333, 0.25 , 0.2 ]]) + + """ + values = 1.0 / (1.0 + np.arange(2 * n - 1)) + h = hankel(values[:n], r=values[n - 1:]) + return h + + +def invhilbert(n, exact=False): + """ + Compute the inverse of the Hilbert matrix of order `n`. + + The entries in the inverse of a Hilbert matrix are integers. When `n` + is greater than 14, some entries in the inverse exceed the upper limit + of 64 bit integers. The `exact` argument provides two options for + dealing with these large integers. + + Parameters + ---------- + n : int + The order of the Hilbert matrix. + exact : bool, optional + If False, the data type of the array that is returned is np.float64, + and the array is an approximation of the inverse. + If True, the array is the exact integer inverse array. To represent + the exact inverse when n > 14, the returned array is an object array + of long integers. For n <= 14, the exact inverse is returned as an + array with data type np.int64. + + Returns + ------- + invh : (n, n) ndarray + The data type of the array is np.float64 if `exact` is False. + If `exact` is True, the data type is either np.int64 (for n <= 14) + or object (for n > 14). In the latter case, the objects in the + array will be long integers. + + See Also + -------- + hilbert : Create a Hilbert matrix. + + Notes + ----- + .. versionadded:: 0.10.0 + + Examples + -------- + >>> from scipy.linalg import invhilbert + >>> invhilbert(4) + array([[ 16., -120., 240., -140.], + [ -120., 1200., -2700., 1680.], + [ 240., -2700., 6480., -4200.], + [ -140., 1680., -4200., 2800.]]) + >>> invhilbert(4, exact=True) + array([[ 16, -120, 240, -140], + [ -120, 1200, -2700, 1680], + [ 240, -2700, 6480, -4200], + [ -140, 1680, -4200, 2800]], dtype=int64) + >>> invhilbert(16)[7,7] + 4.2475099528537506e+19 + >>> invhilbert(16, exact=True)[7,7] + 42475099528537378560 + + """ + from scipy.special import comb + if exact: + if n > 14: + dtype = object + else: + dtype = np.int64 + else: + dtype = np.float64 + invh = np.empty((n, n), dtype=dtype) + for i in range(n): + for j in range(0, i + 1): + s = i + j + invh[i, j] = ((-1) ** s * (s + 1) * + comb(n + i, n - j - 1, exact=exact) * + comb(n + j, n - i - 1, exact=exact) * + comb(s, i, exact=exact) ** 2) + if i != j: + invh[j, i] = invh[i, j] + return invh + + +def pascal(n, kind='symmetric', exact=True): + """ + Returns the n x n Pascal matrix. + + The Pascal matrix is a matrix containing the binomial coefficients as + its elements. + + Parameters + ---------- + n : int + The size of the matrix to create; that is, the result is an n x n + matrix. + kind : str, optional + Must be one of 'symmetric', 'lower', or 'upper'. + Default is 'symmetric'. + exact : bool, optional + If `exact` is True, the result is either an array of type + numpy.uint64 (if n < 35) or an object array of Python long integers. + If `exact` is False, the coefficients in the matrix are computed using + `scipy.special.comb` with `exact=False`. The result will be a floating + point array, and the values in the array will not be the exact + coefficients, but this version is much faster than `exact=True`. + + Returns + ------- + p : (n, n) ndarray + The Pascal matrix. + + See Also + -------- + invpascal + + Notes + ----- + See https://en.wikipedia.org/wiki/Pascal_matrix for more information + about Pascal matrices. + + .. versionadded:: 0.11.0 + + Examples + -------- + >>> from scipy.linalg import pascal + >>> pascal(4) + array([[ 1, 1, 1, 1], + [ 1, 2, 3, 4], + [ 1, 3, 6, 10], + [ 1, 4, 10, 20]], dtype=uint64) + >>> pascal(4, kind='lower') + array([[1, 0, 0, 0], + [1, 1, 0, 0], + [1, 2, 1, 0], + [1, 3, 3, 1]], dtype=uint64) + >>> pascal(50)[-1, -1] + 25477612258980856902730428600 + >>> from scipy.special import comb + >>> comb(98, 49, exact=True) + 25477612258980856902730428600 + + """ + + from scipy.special import comb + if kind not in ['symmetric', 'lower', 'upper']: + raise ValueError("kind must be 'symmetric', 'lower', or 'upper'") + + if exact: + if n >= 35: + L_n = np.empty((n, n), dtype=object) + L_n.fill(0) + else: + L_n = np.zeros((n, n), dtype=np.uint64) + for i in range(n): + for j in range(i + 1): + L_n[i, j] = comb(i, j, exact=True) + else: + L_n = comb(*np.ogrid[:n, :n]) + + if kind == 'lower': + p = L_n + elif kind == 'upper': + p = L_n.T + else: + p = np.dot(L_n, L_n.T) + + return p + + +def invpascal(n, kind='symmetric', exact=True): + """ + Returns the inverse of the n x n Pascal matrix. + + The Pascal matrix is a matrix containing the binomial coefficients as + its elements. + + Parameters + ---------- + n : int + The size of the matrix to create; that is, the result is an n x n + matrix. + kind : str, optional + Must be one of 'symmetric', 'lower', or 'upper'. + Default is 'symmetric'. + exact : bool, optional + If `exact` is True, the result is either an array of type + ``numpy.int64`` (if `n` <= 35) or an object array of Python integers. + If `exact` is False, the coefficients in the matrix are computed using + `scipy.special.comb` with `exact=False`. The result will be a floating + point array, and for large `n`, the values in the array will not be the + exact coefficients. + + Returns + ------- + invp : (n, n) ndarray + The inverse of the Pascal matrix. + + See Also + -------- + pascal + + Notes + ----- + + .. versionadded:: 0.16.0 + + References + ---------- + .. [1] "Pascal matrix", https://en.wikipedia.org/wiki/Pascal_matrix + .. [2] Cohen, A. M., "The inverse of a Pascal matrix", Mathematical + Gazette, 59(408), pp. 111-112, 1975. + + Examples + -------- + >>> from scipy.linalg import invpascal, pascal + >>> invp = invpascal(5) + >>> invp + array([[ 5, -10, 10, -5, 1], + [-10, 30, -35, 19, -4], + [ 10, -35, 46, -27, 6], + [ -5, 19, -27, 17, -4], + [ 1, -4, 6, -4, 1]]) + + >>> p = pascal(5) + >>> p.dot(invp) + array([[ 1., 0., 0., 0., 0.], + [ 0., 1., 0., 0., 0.], + [ 0., 0., 1., 0., 0.], + [ 0., 0., 0., 1., 0.], + [ 0., 0., 0., 0., 1.]]) + + An example of the use of `kind` and `exact`: + + >>> invpascal(5, kind='lower', exact=False) + array([[ 1., -0., 0., -0., 0.], + [-1., 1., -0., 0., -0.], + [ 1., -2., 1., -0., 0.], + [-1., 3., -3., 1., -0.], + [ 1., -4., 6., -4., 1.]]) + + """ + from scipy.special import comb + + if kind not in ['symmetric', 'lower', 'upper']: + raise ValueError("'kind' must be 'symmetric', 'lower' or 'upper'.") + + if kind == 'symmetric': + if exact: + if n > 34: + dt = object + else: + dt = np.int64 + else: + dt = np.float64 + invp = np.empty((n, n), dtype=dt) + for i in range(n): + for j in range(0, i + 1): + v = 0 + for k in range(n - i): + v += comb(i + k, k, exact=exact) * comb(i + k, i + k - j, + exact=exact) + invp[i, j] = (-1)**(i - j) * v + if i != j: + invp[j, i] = invp[i, j] + else: + # For the 'lower' and 'upper' cases, we computer the inverse by + # changing the sign of every other diagonal of the pascal matrix. + invp = pascal(n, kind=kind, exact=exact) + if invp.dtype == np.uint64: + # This cast from np.uint64 to int64 OK, because if `kind` is not + # "symmetric", the values in invp are all much less than 2**63. + invp = invp.view(np.int64) + + # The toeplitz matrix has alternating bands of 1 and -1. + invp *= toeplitz((-1)**np.arange(n)).astype(invp.dtype) + + return invp + + +def dft(n, scale=None): + """ + Discrete Fourier transform matrix. + + Create the matrix that computes the discrete Fourier transform of a + sequence [1]_. The nth primitive root of unity used to generate the + matrix is exp(-2*pi*i/n), where i = sqrt(-1). + + Parameters + ---------- + n : int + Size the matrix to create. + scale : str, optional + Must be None, 'sqrtn', or 'n'. + If `scale` is 'sqrtn', the matrix is divided by `sqrt(n)`. + If `scale` is 'n', the matrix is divided by `n`. + If `scale` is None (the default), the matrix is not normalized, and the + return value is simply the Vandermonde matrix of the roots of unity. + + Returns + ------- + m : (n, n) ndarray + The DFT matrix. + + Notes + ----- + When `scale` is None, multiplying a vector by the matrix returned by + `dft` is mathematically equivalent to (but much less efficient than) + the calculation performed by `scipy.fft.fft`. + + .. versionadded:: 0.14.0 + + References + ---------- + .. [1] "DFT matrix", https://en.wikipedia.org/wiki/DFT_matrix + + Examples + -------- + >>> import numpy as np + >>> from scipy.linalg import dft + >>> np.set_printoptions(precision=2, suppress=True) # for compact output + >>> m = dft(5) + >>> m + array([[ 1. +0.j , 1. +0.j , 1. +0.j , 1. +0.j , 1. +0.j ], + [ 1. +0.j , 0.31-0.95j, -0.81-0.59j, -0.81+0.59j, 0.31+0.95j], + [ 1. +0.j , -0.81-0.59j, 0.31+0.95j, 0.31-0.95j, -0.81+0.59j], + [ 1. +0.j , -0.81+0.59j, 0.31-0.95j, 0.31+0.95j, -0.81-0.59j], + [ 1. +0.j , 0.31+0.95j, -0.81+0.59j, -0.81-0.59j, 0.31-0.95j]]) + >>> x = np.array([1, 2, 3, 0, 3]) + >>> m @ x # Compute the DFT of x + array([ 9. +0.j , 0.12-0.81j, -2.12+3.44j, -2.12-3.44j, 0.12+0.81j]) + + Verify that ``m @ x`` is the same as ``fft(x)``. + + >>> from scipy.fft import fft + >>> fft(x) # Same result as m @ x + array([ 9. +0.j , 0.12-0.81j, -2.12+3.44j, -2.12-3.44j, 0.12+0.81j]) + """ + if scale not in [None, 'sqrtn', 'n']: + raise ValueError("scale must be None, 'sqrtn', or 'n'; " + f"{scale!r} is not valid.") + + omegas = np.exp(-2j * np.pi * np.arange(n) / n).reshape(-1, 1) + m = omegas ** np.arange(n) + if scale == 'sqrtn': + m /= math.sqrt(n) + elif scale == 'n': + m /= n + return m + + +def fiedler(a): + """Returns a symmetric Fiedler matrix + + Given an sequence of numbers `a`, Fiedler matrices have the structure + ``F[i, j] = np.abs(a[i] - a[j])``, and hence zero diagonals and nonnegative + entries. A Fiedler matrix has a dominant positive eigenvalue and other + eigenvalues are negative. Although not valid generally, for certain inputs, + the inverse and the determinant can be derived explicitly as given in [1]_. + + Parameters + ---------- + a : (n,) array_like + coefficient array + + Returns + ------- + F : (n, n) ndarray + + See Also + -------- + circulant, toeplitz + + Notes + ----- + + .. versionadded:: 1.3.0 + + References + ---------- + .. [1] J. Todd, "Basic Numerical Mathematics: Vol.2 : Numerical Algebra", + 1977, Birkhauser, :doi:`10.1007/978-3-0348-7286-7` + + Examples + -------- + >>> import numpy as np + >>> from scipy.linalg import det, inv, fiedler + >>> a = [1, 4, 12, 45, 77] + >>> n = len(a) + >>> A = fiedler(a) + >>> A + array([[ 0, 3, 11, 44, 76], + [ 3, 0, 8, 41, 73], + [11, 8, 0, 33, 65], + [44, 41, 33, 0, 32], + [76, 73, 65, 32, 0]]) + + The explicit formulas for determinant and inverse seem to hold only for + monotonically increasing/decreasing arrays. Note the tridiagonal structure + and the corners. + + >>> Ai = inv(A) + >>> Ai[np.abs(Ai) < 1e-12] = 0. # cleanup the numerical noise for display + >>> Ai + array([[-0.16008772, 0.16666667, 0. , 0. , 0.00657895], + [ 0.16666667, -0.22916667, 0.0625 , 0. , 0. ], + [ 0. , 0.0625 , -0.07765152, 0.01515152, 0. ], + [ 0. , 0. , 0.01515152, -0.03077652, 0.015625 ], + [ 0.00657895, 0. , 0. , 0.015625 , -0.00904605]]) + >>> det(A) + 15409151.999999998 + >>> (-1)**(n-1) * 2**(n-2) * np.diff(a).prod() * (a[-1] - a[0]) + 15409152 + + """ + a = np.atleast_1d(a) + + if a.ndim != 1: + raise ValueError("Input 'a' must be a 1D array.") + + if a.size == 0: + return np.array([], dtype=float) + elif a.size == 1: + return np.array([[0.]]) + else: + return np.abs(a[:, None] - a) + + +def fiedler_companion(a): + """ Returns a Fiedler companion matrix + + Given a polynomial coefficient array ``a``, this function forms a + pentadiagonal matrix with a special structure whose eigenvalues coincides + with the roots of ``a``. + + Parameters + ---------- + a : (N,) array_like + 1-D array of polynomial coefficients in descending order with a nonzero + leading coefficient. For ``N < 2``, an empty array is returned. + + Returns + ------- + c : (N-1, N-1) ndarray + Resulting companion matrix + + See Also + -------- + companion + + Notes + ----- + Similar to `companion` the leading coefficient should be nonzero. In the case + the leading coefficient is not 1, other coefficients are rescaled before + the array generation. To avoid numerical issues, it is best to provide a + monic polynomial. + + .. versionadded:: 1.3.0 + + References + ---------- + .. [1] M. Fiedler, " A note on companion matrices", Linear Algebra and its + Applications, 2003, :doi:`10.1016/S0024-3795(03)00548-2` + + Examples + -------- + >>> import numpy as np + >>> from scipy.linalg import fiedler_companion, eigvals + >>> p = np.poly(np.arange(1, 9, 2)) # [1., -16., 86., -176., 105.] + >>> fc = fiedler_companion(p) + >>> fc + array([[ 16., -86., 1., 0.], + [ 1., 0., 0., 0.], + [ 0., 176., 0., -105.], + [ 0., 1., 0., 0.]]) + >>> eigvals(fc) + array([7.+0.j, 5.+0.j, 3.+0.j, 1.+0.j]) + + """ + a = np.atleast_1d(a) + + if a.ndim != 1: + raise ValueError("Input 'a' must be a 1-D array.") + + if a.size <= 2: + if a.size == 2: + return np.array([[-(a/a[0])[-1]]]) + return np.array([], dtype=a.dtype) + + if a[0] == 0.: + raise ValueError('Leading coefficient is zero.') + + a = a/a[0] + n = a.size - 1 + c = np.zeros((n, n), dtype=a.dtype) + # subdiagonals + c[range(3, n, 2), range(1, n-2, 2)] = 1. + c[range(2, n, 2), range(1, n-1, 2)] = -a[3::2] + # superdiagonals + c[range(0, n-2, 2), range(2, n, 2)] = 1. + c[range(0, n-1, 2), range(1, n, 2)] = -a[2::2] + c[[0, 1], 0] = [-a[1], 1] + + return c + + +def convolution_matrix(a, n, mode='full'): + """ + Construct a convolution matrix. + + Constructs the Toeplitz matrix representing one-dimensional + convolution [1]_. See the notes below for details. + + Parameters + ---------- + a : (m,) array_like + The 1-D array to convolve. + n : int + The number of columns in the resulting matrix. It gives the length + of the input to be convolved with `a`. This is analogous to the + length of `v` in ``numpy.convolve(a, v)``. + mode : str + This is analogous to `mode` in ``numpy.convolve(v, a, mode)``. + It must be one of ('full', 'valid', 'same'). + See below for how `mode` determines the shape of the result. + + Returns + ------- + A : (k, n) ndarray + The convolution matrix whose row count `k` depends on `mode`:: + + ======= ========================= + mode k + ======= ========================= + 'full' m + n -1 + 'same' max(m, n) + 'valid' max(m, n) - min(m, n) + 1 + ======= ========================= + + See Also + -------- + toeplitz : Toeplitz matrix + + Notes + ----- + The code:: + + A = convolution_matrix(a, n, mode) + + creates a Toeplitz matrix `A` such that ``A @ v`` is equivalent to + using ``convolve(a, v, mode)``. The returned array always has `n` + columns. The number of rows depends on the specified `mode`, as + explained above. + + In the default 'full' mode, the entries of `A` are given by:: + + A[i, j] == (a[i-j] if (0 <= (i-j) < m) else 0) + + where ``m = len(a)``. Suppose, for example, the input array is + ``[x, y, z]``. The convolution matrix has the form:: + + [x, 0, 0, ..., 0, 0] + [y, x, 0, ..., 0, 0] + [z, y, x, ..., 0, 0] + ... + [0, 0, 0, ..., x, 0] + [0, 0, 0, ..., y, x] + [0, 0, 0, ..., z, y] + [0, 0, 0, ..., 0, z] + + In 'valid' mode, the entries of `A` are given by:: + + A[i, j] == (a[i-j+m-1] if (0 <= (i-j+m-1) < m) else 0) + + This corresponds to a matrix whose rows are the subset of those from + the 'full' case where all the coefficients in `a` are contained in the + row. For input ``[x, y, z]``, this array looks like:: + + [z, y, x, 0, 0, ..., 0, 0, 0] + [0, z, y, x, 0, ..., 0, 0, 0] + [0, 0, z, y, x, ..., 0, 0, 0] + ... + [0, 0, 0, 0, 0, ..., x, 0, 0] + [0, 0, 0, 0, 0, ..., y, x, 0] + [0, 0, 0, 0, 0, ..., z, y, x] + + In the 'same' mode, the entries of `A` are given by:: + + d = (m - 1) // 2 + A[i, j] == (a[i-j+d] if (0 <= (i-j+d) < m) else 0) + + The typical application of the 'same' mode is when one has a signal of + length `n` (with `n` greater than ``len(a)``), and the desired output + is a filtered signal that is still of length `n`. + + For input ``[x, y, z]``, this array looks like:: + + [y, x, 0, 0, ..., 0, 0, 0] + [z, y, x, 0, ..., 0, 0, 0] + [0, z, y, x, ..., 0, 0, 0] + [0, 0, z, y, ..., 0, 0, 0] + ... + [0, 0, 0, 0, ..., y, x, 0] + [0, 0, 0, 0, ..., z, y, x] + [0, 0, 0, 0, ..., 0, z, y] + + .. versionadded:: 1.5.0 + + References + ---------- + .. [1] "Convolution", https://en.wikipedia.org/wiki/Convolution + + Examples + -------- + >>> import numpy as np + >>> from scipy.linalg import convolution_matrix + >>> A = convolution_matrix([-1, 4, -2], 5, mode='same') + >>> A + array([[ 4, -1, 0, 0, 0], + [-2, 4, -1, 0, 0], + [ 0, -2, 4, -1, 0], + [ 0, 0, -2, 4, -1], + [ 0, 0, 0, -2, 4]]) + + Compare multiplication by `A` with the use of `numpy.convolve`. + + >>> x = np.array([1, 2, 0, -3, 0.5]) + >>> A @ x + array([ 2. , 6. , -1. , -12.5, 8. ]) + + Verify that ``A @ x`` produced the same result as applying the + convolution function. + + >>> np.convolve([-1, 4, -2], x, mode='same') + array([ 2. , 6. , -1. , -12.5, 8. ]) + + For comparison to the case ``mode='same'`` shown above, here are the + matrices produced by ``mode='full'`` and ``mode='valid'`` for the + same coefficients and size. + + >>> convolution_matrix([-1, 4, -2], 5, mode='full') + array([[-1, 0, 0, 0, 0], + [ 4, -1, 0, 0, 0], + [-2, 4, -1, 0, 0], + [ 0, -2, 4, -1, 0], + [ 0, 0, -2, 4, -1], + [ 0, 0, 0, -2, 4], + [ 0, 0, 0, 0, -2]]) + + >>> convolution_matrix([-1, 4, -2], 5, mode='valid') + array([[-2, 4, -1, 0, 0], + [ 0, -2, 4, -1, 0], + [ 0, 0, -2, 4, -1]]) + """ + if n <= 0: + raise ValueError('n must be a positive integer.') + + a = np.asarray(a) + if a.ndim != 1: + raise ValueError('convolution_matrix expects a one-dimensional ' + 'array as input') + if a.size == 0: + raise ValueError('len(a) must be at least 1.') + + if mode not in ('full', 'valid', 'same'): + raise ValueError( + "'mode' argument must be one of ('full', 'valid', 'same')") + + # create zero padded versions of the array + az = np.pad(a, (0, n-1), 'constant') + raz = np.pad(a[::-1], (0, n-1), 'constant') + + if mode == 'same': + trim = min(n, len(a)) - 1 + tb = trim//2 + te = trim - tb + col0 = az[tb:len(az)-te] + row0 = raz[-n-tb:len(raz)-tb] + elif mode == 'valid': + tb = min(n, len(a)) - 1 + te = tb + col0 = az[tb:len(az)-te] + row0 = raz[-n-tb:len(raz)-tb] + else: # 'full' + col0 = az + row0 = raz[-n:] + return toeplitz(col0, row0) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/basic.py b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/basic.py new file mode 100644 index 0000000000000000000000000000000000000000..643fbc74d02a13209e606fcdc53e9cfd1693dde0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/basic.py @@ -0,0 +1,24 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.linalg` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'solve', 'solve_triangular', 'solveh_banded', 'solve_banded', + 'solve_toeplitz', 'solve_circulant', 'inv', 'det', 'lstsq', + 'pinv', 'pinvh', 'matrix_balance', 'matmul_toeplitz', + 'atleast_1d', 'atleast_2d', 'get_lapack_funcs', + 'LinAlgError', 'LinAlgWarning', 'levinson', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="linalg", module="basic", + private_modules=["_basic"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/blas.py b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/blas.py new file mode 100644 index 0000000000000000000000000000000000000000..c42190ed453a6d4d460ca077eb2d1bf72b7aab00 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/blas.py @@ -0,0 +1,484 @@ +""" +Low-level BLAS functions (:mod:`scipy.linalg.blas`) +=================================================== + +This module contains low-level functions from the BLAS library. + +.. versionadded:: 0.12.0 + +.. note:: + + The common ``overwrite_<>`` option in many routines, allows the + input arrays to be overwritten to avoid extra memory allocation. + However this requires the array to satisfy two conditions + which are memory order and the data type to match exactly the + order and the type expected by the routine. + + As an example, if you pass a double precision float array to any + ``S....`` routine which expects single precision arguments, f2py + will create an intermediate array to match the argument types and + overwriting will be performed on that intermediate array. + + Similarly, if a C-contiguous array is passed, f2py will pass a + FORTRAN-contiguous array internally. Please make sure that these + details are satisfied. More information can be found in the f2py + documentation. + +.. warning:: + + These functions do little to no error checking. + It is possible to cause crashes by mis-using them, + so prefer using the higher-level routines in `scipy.linalg`. + +Finding functions +----------------- + +.. autosummary:: + :toctree: generated/ + + get_blas_funcs + find_best_blas_type + +BLAS Level 1 functions +---------------------- + +.. autosummary:: + :toctree: generated/ + + caxpy + ccopy + cdotc + cdotu + crotg + cscal + csrot + csscal + cswap + dasum + daxpy + dcopy + ddot + dnrm2 + drot + drotg + drotm + drotmg + dscal + dswap + dzasum + dznrm2 + icamax + idamax + isamax + izamax + sasum + saxpy + scasum + scnrm2 + scopy + sdot + snrm2 + srot + srotg + srotm + srotmg + sscal + sswap + zaxpy + zcopy + zdotc + zdotu + zdrot + zdscal + zrotg + zscal + zswap + +BLAS Level 2 functions +---------------------- + +.. autosummary:: + :toctree: generated/ + + sgbmv + sgemv + sger + ssbmv + sspr + sspr2 + ssymv + ssyr + ssyr2 + stbmv + stpsv + strmv + strsv + dgbmv + dgemv + dger + dsbmv + dspr + dspr2 + dsymv + dsyr + dsyr2 + dtbmv + dtpsv + dtrmv + dtrsv + cgbmv + cgemv + cgerc + cgeru + chbmv + chemv + cher + cher2 + chpmv + chpr + chpr2 + ctbmv + ctbsv + ctpmv + ctpsv + ctrmv + ctrsv + csyr + zgbmv + zgemv + zgerc + zgeru + zhbmv + zhemv + zher + zher2 + zhpmv + zhpr + zhpr2 + ztbmv + ztbsv + ztpmv + ztrmv + ztrsv + zsyr + +BLAS Level 3 functions +---------------------- + +.. autosummary:: + :toctree: generated/ + + sgemm + ssymm + ssyr2k + ssyrk + strmm + strsm + dgemm + dsymm + dsyr2k + dsyrk + dtrmm + dtrsm + cgemm + chemm + cher2k + cherk + csymm + csyr2k + csyrk + ctrmm + ctrsm + zgemm + zhemm + zher2k + zherk + zsymm + zsyr2k + zsyrk + ztrmm + ztrsm + +""" +# +# Author: Pearu Peterson, March 2002 +# refactoring by Fabian Pedregosa, March 2010 +# + +__all__ = ['get_blas_funcs', 'find_best_blas_type'] + +import numpy as _np +import functools + +from scipy.linalg import _fblas +try: + from scipy.linalg import _cblas +except ImportError: + _cblas = None + +try: + from scipy.linalg import _fblas_64 + HAS_ILP64 = True +except ImportError: + HAS_ILP64 = False + _fblas_64 = None + +# Expose all functions (only fblas --- cblas is an implementation detail) +empty_module = None +from scipy.linalg._fblas import * # noqa: E402, F403 +del empty_module + +# all numeric dtypes '?bBhHiIlLqQefdgFDGO' that are safe to be converted to + +# single precision float : '?bBhH!!!!!!ef!!!!!!' +# double precision float : '?bBhHiIlLqQefdg!!!!' +# single precision complex : '?bBhH!!!!!!ef!!F!!!' +# double precision complex : '?bBhHiIlLqQefdgFDG!' + +_type_score = {x: 1 for x in '?bBhHef'} +_type_score.update({x: 2 for x in 'iIlLqQd'}) + +# Handle float128(g) and complex256(G) separately in case non-Windows systems. +# On Windows, the values will be rewritten to the same key with the same value. +_type_score.update({'F': 3, 'D': 4, 'g': 2, 'G': 4}) + +# Final mapping to the actual prefixes and dtypes +_type_conv = {1: ('s', _np.dtype('float32')), + 2: ('d', _np.dtype('float64')), + 3: ('c', _np.dtype('complex64')), + 4: ('z', _np.dtype('complex128'))} + +# some convenience alias for complex functions +_blas_alias = {'cnrm2': 'scnrm2', 'znrm2': 'dznrm2', + 'cdot': 'cdotc', 'zdot': 'zdotc', + 'cger': 'cgerc', 'zger': 'zgerc', + 'sdotc': 'sdot', 'sdotu': 'sdot', + 'ddotc': 'ddot', 'ddotu': 'ddot'} + + +def find_best_blas_type(arrays=(), dtype=None): + """Find best-matching BLAS/LAPACK type. + + Arrays are used to determine the optimal prefix of BLAS routines. + + Parameters + ---------- + arrays : sequence of ndarrays, optional + Arrays can be given to determine optimal prefix of BLAS + routines. If not given, double-precision routines will be + used, otherwise the most generic type in arrays will be used. + dtype : str or dtype, optional + Data-type specifier. Not used if `arrays` is non-empty. + + Returns + ------- + prefix : str + BLAS/LAPACK prefix character. + dtype : dtype + Inferred Numpy data type. + prefer_fortran : bool + Whether to prefer Fortran order routines over C order. + + Examples + -------- + >>> import numpy as np + >>> import scipy.linalg.blas as bla + >>> rng = np.random.default_rng() + >>> a = rng.random((10,15)) + >>> b = np.asfortranarray(a) # Change the memory layout order + >>> bla.find_best_blas_type((a,)) + ('d', dtype('float64'), False) + >>> bla.find_best_blas_type((a*1j,)) + ('z', dtype('complex128'), False) + >>> bla.find_best_blas_type((b,)) + ('d', dtype('float64'), True) + + """ + dtype = _np.dtype(dtype) + max_score = _type_score.get(dtype.char, 5) + prefer_fortran = False + + if arrays: + # In most cases, single element is passed through, quicker route + if len(arrays) == 1: + max_score = _type_score.get(arrays[0].dtype.char, 5) + prefer_fortran = arrays[0].flags['FORTRAN'] + else: + # use the most generic type in arrays + scores = [_type_score.get(x.dtype.char, 5) for x in arrays] + max_score = max(scores) + ind_max_score = scores.index(max_score) + # safe upcasting for mix of float64 and complex64 --> prefix 'z' + if max_score == 3 and (2 in scores): + max_score = 4 + + if arrays[ind_max_score].flags['FORTRAN']: + # prefer Fortran for leading array with column major order + prefer_fortran = True + + # Get the LAPACK prefix and the corresponding dtype if not fall back + # to 'd' and double precision float. + prefix, dtype = _type_conv.get(max_score, ('d', _np.dtype('float64'))) + + return prefix, dtype, prefer_fortran + + +def _get_funcs(names, arrays, dtype, + lib_name, fmodule, cmodule, + fmodule_name, cmodule_name, alias, + ilp64=False): + """ + Return available BLAS/LAPACK functions. + + Used also in lapack.py. See get_blas_funcs for docstring. + """ + + funcs = [] + unpack = False + dtype = _np.dtype(dtype) + module1 = (cmodule, cmodule_name) + module2 = (fmodule, fmodule_name) + + if isinstance(names, str): + names = (names,) + unpack = True + + prefix, dtype, prefer_fortran = find_best_blas_type(arrays, dtype) + + if prefer_fortran: + module1, module2 = module2, module1 + + for name in names: + func_name = prefix + name + func_name = alias.get(func_name, func_name) + func = getattr(module1[0], func_name, None) + module_name = module1[1] + if func is None: + func = getattr(module2[0], func_name, None) + module_name = module2[1] + if func is None: + raise ValueError( + f'{lib_name} function {func_name} could not be found') + func.module_name, func.typecode = module_name, prefix + func.dtype = dtype + if not ilp64: + func.int_dtype = _np.dtype(_np.intc) + else: + func.int_dtype = _np.dtype(_np.int64) + func.prefix = prefix # Backward compatibility + funcs.append(func) + + if unpack: + return funcs[0] + else: + return funcs + + +def _memoize_get_funcs(func): + """ + Memoized fast path for _get_funcs instances + """ + memo = {} + func.memo = memo + + @functools.wraps(func) + def getter(names, arrays=(), dtype=None, ilp64=False): + key = (names, dtype, ilp64) + for array in arrays: + # cf. find_blas_funcs + key += (array.dtype.char, array.flags.fortran) + + try: + value = memo.get(key) + except TypeError: + # unhashable key etc. + key = None + value = None + + if value is not None: + return value + + value = func(names, arrays, dtype, ilp64) + + if key is not None: + memo[key] = value + + return value + + return getter + + +@_memoize_get_funcs +def get_blas_funcs(names, arrays=(), dtype=None, ilp64=False): + """Return available BLAS function objects from names. + + Arrays are used to determine the optimal prefix of BLAS routines. + + Parameters + ---------- + names : str or sequence of str + Name(s) of BLAS functions without type prefix. + + arrays : sequence of ndarrays, optional + Arrays can be given to determine optimal prefix of BLAS + routines. If not given, double-precision routines will be + used, otherwise the most generic type in arrays will be used. + + dtype : str or dtype, optional + Data-type specifier. Not used if `arrays` is non-empty. + + ilp64 : {True, False, 'preferred'}, optional + Whether to return ILP64 routine variant. + Choosing 'preferred' returns ILP64 routine if available, + and otherwise the 32-bit routine. Default: False + + Returns + ------- + funcs : list + List containing the found function(s). + + + Notes + ----- + This routine automatically chooses between Fortran/C + interfaces. Fortran code is used whenever possible for arrays with + column major order. In all other cases, C code is preferred. + + In BLAS, the naming convention is that all functions start with a + type prefix, which depends on the type of the principal + matrix. These can be one of {'s', 'd', 'c', 'z'} for the NumPy + types {float32, float64, complex64, complex128} respectively. + The code and the dtype are stored in attributes `typecode` and `dtype` + of the returned functions. + + Examples + -------- + >>> import numpy as np + >>> import scipy.linalg as LA + >>> rng = np.random.default_rng() + >>> a = rng.random((3,2)) + >>> x_gemv = LA.get_blas_funcs('gemv', (a,)) + >>> x_gemv.typecode + 'd' + >>> x_gemv = LA.get_blas_funcs('gemv',(a*1j,)) + >>> x_gemv.typecode + 'z' + + """ + if isinstance(ilp64, str): + if ilp64 == 'preferred': + ilp64 = HAS_ILP64 + else: + raise ValueError("Invalid value for 'ilp64'") + + if not ilp64: + return _get_funcs(names, arrays, dtype, + "BLAS", _fblas, _cblas, "fblas", "cblas", + _blas_alias, ilp64=False) + else: + if not HAS_ILP64: + raise RuntimeError("BLAS ILP64 routine requested, but Scipy " + "compiled only with 32-bit BLAS") + return _get_funcs(names, arrays, dtype, + "BLAS", _fblas_64, None, "fblas_64", None, + _blas_alias, ilp64=True) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/cython_blas.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/cython_blas.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..b8fbebfebf935e8d8ac3e84fe02c430974c4c45d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/cython_blas.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/cython_lapack.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/cython_lapack.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..1ae6fbe49d8771523d9fcfa3c11c25d0ba3bcff5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/cython_lapack.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/decomp_lu.py b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/decomp_lu.py new file mode 100644 index 0000000000000000000000000000000000000000..ab46ddcbd13938a978c96b4e248302b5efe2b553 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/decomp_lu.py @@ -0,0 +1,21 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.linalg` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'lu', 'lu_solve', 'lu_factor', + 'asarray_chkfinite', 'LinAlgWarning', 'get_lapack_funcs', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="linalg", module="decomp_lu", + private_modules=["_decomp_lu"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/decomp_svd.py b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/decomp_svd.py new file mode 100644 index 0000000000000000000000000000000000000000..64d0ce8562f06a3837df050f0ea6b8b15a2b359e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/decomp_svd.py @@ -0,0 +1,21 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.linalg` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'svd', 'svdvals', 'diagsvd', 'orth', 'subspace_angles', 'null_space', + 'LinAlgError', 'get_lapack_funcs' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="linalg", module="decomp_svd", + private_modules=["_decomp_svd"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_blas.py b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_blas.py new file mode 100644 index 0000000000000000000000000000000000000000..727dfa45d37beea695febfe34a0f4415bc045e94 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_blas.py @@ -0,0 +1,1114 @@ +# +# Created by: Pearu Peterson, April 2002 +# + +import math +import pytest +import numpy as np +from numpy.testing import (assert_equal, assert_almost_equal, assert_, + assert_array_almost_equal, assert_allclose) +from pytest import raises as assert_raises + +from numpy import float32, float64, complex64, complex128, arange, triu, \ + tril, zeros, tril_indices, ones, mod, diag, append, eye, \ + nonzero + +from numpy.random import rand, seed +import scipy +from scipy.linalg import _fblas as fblas, get_blas_funcs, toeplitz, solve + +try: + from scipy.linalg import _cblas as cblas +except ImportError: + cblas = None + +REAL_DTYPES = [float32, float64] +COMPLEX_DTYPES = [complex64, complex128] +DTYPES = REAL_DTYPES + COMPLEX_DTYPES + + +def test_get_blas_funcs(): + # check that it returns Fortran code for arrays that are + # fortran-ordered + f1, f2, f3 = get_blas_funcs( + ('axpy', 'axpy', 'axpy'), + (np.empty((2, 2), dtype=np.complex64, order='F'), + np.empty((2, 2), dtype=np.complex128, order='C')) + ) + + # get_blas_funcs will choose libraries depending on most generic + # array + assert_equal(f1.typecode, 'z') + assert_equal(f2.typecode, 'z') + if cblas is not None: + assert_equal(f1.module_name, 'cblas') + assert_equal(f2.module_name, 'cblas') + + # check defaults. + f1 = get_blas_funcs('rotg') + assert_equal(f1.typecode, 'd') + + # check also dtype interface + f1 = get_blas_funcs('gemm', dtype=np.complex64) + assert_equal(f1.typecode, 'c') + f1 = get_blas_funcs('gemm', dtype='F') + assert_equal(f1.typecode, 'c') + + # extended precision complex + f1 = get_blas_funcs('gemm', dtype=np.clongdouble) + assert_equal(f1.typecode, 'z') + + # check safe complex upcasting + f1 = get_blas_funcs('axpy', + (np.empty((2, 2), dtype=np.float64), + np.empty((2, 2), dtype=np.complex64)) + ) + assert_equal(f1.typecode, 'z') + + +def test_get_blas_funcs_alias(): + # check alias for get_blas_funcs + f, g = get_blas_funcs(('nrm2', 'dot'), dtype=np.complex64) + assert f.typecode == 'c' + assert g.typecode == 'c' + + f, g, h = get_blas_funcs(('dot', 'dotc', 'dotu'), dtype=np.float64) + assert f is g + assert f is h + + +class TestCBLAS1Simple: + + def test_axpy(self): + for p in 'sd': + f = getattr(cblas, p+'axpy', None) + if f is None: + continue + assert_array_almost_equal(f([1, 2, 3], [2, -1, 3], a=5), + [7, 9, 18]) + for p in 'cz': + f = getattr(cblas, p+'axpy', None) + if f is None: + continue + assert_array_almost_equal(f([1, 2j, 3], [2, -1, 3], a=5), + [7, 10j-1, 18]) + + +class TestFBLAS1Simple: + + def test_axpy(self): + for p in 'sd': + f = getattr(fblas, p+'axpy', None) + if f is None: + continue + assert_array_almost_equal(f([1, 2, 3], [2, -1, 3], a=5), + [7, 9, 18]) + for p in 'cz': + f = getattr(fblas, p+'axpy', None) + if f is None: + continue + assert_array_almost_equal(f([1, 2j, 3], [2, -1, 3], a=5), + [7, 10j-1, 18]) + + def test_copy(self): + for p in 'sd': + f = getattr(fblas, p+'copy', None) + if f is None: + continue + assert_array_almost_equal(f([3, 4, 5], [8]*3), [3, 4, 5]) + for p in 'cz': + f = getattr(fblas, p+'copy', None) + if f is None: + continue + assert_array_almost_equal(f([3, 4j, 5+3j], [8]*3), [3, 4j, 5+3j]) + + def test_asum(self): + for p in 'sd': + f = getattr(fblas, p+'asum', None) + if f is None: + continue + assert_almost_equal(f([3, -4, 5]), 12) + for p in ['sc', 'dz']: + f = getattr(fblas, p+'asum', None) + if f is None: + continue + assert_almost_equal(f([3j, -4, 3-4j]), 14) + + def test_dot(self): + for p in 'sd': + f = getattr(fblas, p+'dot', None) + if f is None: + continue + assert_almost_equal(f([3, -4, 5], [2, 5, 1]), -9) + + def test_complex_dotu(self): + for p in 'cz': + f = getattr(fblas, p+'dotu', None) + if f is None: + continue + assert_almost_equal(f([3j, -4, 3-4j], [2, 3, 1]), -9+2j) + + def test_complex_dotc(self): + for p in 'cz': + f = getattr(fblas, p+'dotc', None) + if f is None: + continue + assert_almost_equal(f([3j, -4, 3-4j], [2, 3j, 1]), 3-14j) + + def test_nrm2(self): + for p in 'sd': + f = getattr(fblas, p+'nrm2', None) + if f is None: + continue + assert_almost_equal(f([3, -4, 5]), math.sqrt(50)) + for p in ['c', 'z', 'sc', 'dz']: + f = getattr(fblas, p+'nrm2', None) + if f is None: + continue + assert_almost_equal(f([3j, -4, 3-4j]), math.sqrt(50)) + + def test_scal(self): + for p in 'sd': + f = getattr(fblas, p+'scal', None) + if f is None: + continue + assert_array_almost_equal(f(2, [3, -4, 5]), [6, -8, 10]) + for p in 'cz': + f = getattr(fblas, p+'scal', None) + if f is None: + continue + assert_array_almost_equal(f(3j, [3j, -4, 3-4j]), [-9, -12j, 12+9j]) + for p in ['cs', 'zd']: + f = getattr(fblas, p+'scal', None) + if f is None: + continue + assert_array_almost_equal(f(3, [3j, -4, 3-4j]), [9j, -12, 9-12j]) + + def test_swap(self): + for p in 'sd': + f = getattr(fblas, p+'swap', None) + if f is None: + continue + x, y = [2, 3, 1], [-2, 3, 7] + x1, y1 = f(x, y) + assert_array_almost_equal(x1, y) + assert_array_almost_equal(y1, x) + for p in 'cz': + f = getattr(fblas, p+'swap', None) + if f is None: + continue + x, y = [2, 3j, 1], [-2, 3, 7-3j] + x1, y1 = f(x, y) + assert_array_almost_equal(x1, y) + assert_array_almost_equal(y1, x) + + def test_amax(self): + for p in 'sd': + f = getattr(fblas, 'i'+p+'amax') + assert_equal(f([-2, 4, 3]), 1) + for p in 'cz': + f = getattr(fblas, 'i'+p+'amax') + assert_equal(f([-5, 4+3j, 6]), 1) + # XXX: need tests for rot,rotm,rotg,rotmg + + +class TestFBLAS2Simple: + + def test_gemv(self): + for p in 'sd': + f = getattr(fblas, p+'gemv', None) + if f is None: + continue + assert_array_almost_equal(f(3, [[3]], [-4]), [-36]) + assert_array_almost_equal(f(3, [[3]], [-4], 3, [5]), [-21]) + for p in 'cz': + f = getattr(fblas, p+'gemv', None) + if f is None: + continue + assert_array_almost_equal(f(3j, [[3-4j]], [-4]), [-48-36j]) + assert_array_almost_equal(f(3j, [[3-4j]], [-4], 3, [5j]), + [-48-21j]) + + def test_ger(self): + + for p in 'sd': + f = getattr(fblas, p+'ger', None) + if f is None: + continue + assert_array_almost_equal(f(1, [1, 2], [3, 4]), [[3, 4], [6, 8]]) + assert_array_almost_equal(f(2, [1, 2, 3], [3, 4]), + [[6, 8], [12, 16], [18, 24]]) + + assert_array_almost_equal(f(1, [1, 2], [3, 4], + a=[[1, 2], [3, 4]]), [[4, 6], [9, 12]]) + + for p in 'cz': + f = getattr(fblas, p+'geru', None) + if f is None: + continue + assert_array_almost_equal(f(1, [1j, 2], [3, 4]), + [[3j, 4j], [6, 8]]) + assert_array_almost_equal(f(-2, [1j, 2j, 3j], [3j, 4j]), + [[6, 8], [12, 16], [18, 24]]) + + for p in 'cz': + for name in ('ger', 'gerc'): + f = getattr(fblas, p+name, None) + if f is None: + continue + assert_array_almost_equal(f(1, [1j, 2], [3, 4]), + [[3j, 4j], [6, 8]]) + assert_array_almost_equal(f(2, [1j, 2j, 3j], [3j, 4j]), + [[6, 8], [12, 16], [18, 24]]) + + def test_syr_her(self): + x = np.arange(1, 5, dtype='d') + resx = np.triu(x[:, np.newaxis] * x) + resx_reverse = np.triu(x[::-1, np.newaxis] * x[::-1]) + + y = np.linspace(0, 8.5, 17, endpoint=False) + + z = np.arange(1, 9, dtype='d').view('D') + resz = np.triu(z[:, np.newaxis] * z) + resz_reverse = np.triu(z[::-1, np.newaxis] * z[::-1]) + rehz = np.triu(z[:, np.newaxis] * z.conj()) + rehz_reverse = np.triu(z[::-1, np.newaxis] * z[::-1].conj()) + + w = np.c_[np.zeros(4), z, np.zeros(4)].ravel() + + for p, rtol in zip('sd', [1e-7, 1e-14]): + f = getattr(fblas, p+'syr', None) + if f is None: + continue + assert_allclose(f(1.0, x), resx, rtol=rtol) + assert_allclose(f(1.0, x, lower=True), resx.T, rtol=rtol) + assert_allclose(f(1.0, y, incx=2, offx=2, n=4), resx, rtol=rtol) + # negative increments imply reversed vectors in blas + assert_allclose(f(1.0, y, incx=-2, offx=2, n=4), + resx_reverse, rtol=rtol) + + a = np.zeros((4, 4), 'f' if p == 's' else 'd', 'F') + b = f(1.0, x, a=a, overwrite_a=True) + assert_allclose(a, resx, rtol=rtol) + + b = f(2.0, x, a=a) + assert_(a is not b) + assert_allclose(b, 3*resx, rtol=rtol) + + assert_raises(Exception, f, 1.0, x, incx=0) + assert_raises(Exception, f, 1.0, x, offx=5) + assert_raises(Exception, f, 1.0, x, offx=-2) + assert_raises(Exception, f, 1.0, x, n=-2) + assert_raises(Exception, f, 1.0, x, n=5) + assert_raises(Exception, f, 1.0, x, lower=2) + assert_raises(Exception, f, 1.0, x, a=np.zeros((2, 2), 'd', 'F')) + + for p, rtol in zip('cz', [1e-7, 1e-14]): + f = getattr(fblas, p+'syr', None) + if f is None: + continue + assert_allclose(f(1.0, z), resz, rtol=rtol) + assert_allclose(f(1.0, z, lower=True), resz.T, rtol=rtol) + assert_allclose(f(1.0, w, incx=3, offx=1, n=4), resz, rtol=rtol) + # negative increments imply reversed vectors in blas + assert_allclose(f(1.0, w, incx=-3, offx=1, n=4), + resz_reverse, rtol=rtol) + + a = np.zeros((4, 4), 'F' if p == 'c' else 'D', 'F') + b = f(1.0, z, a=a, overwrite_a=True) + assert_allclose(a, resz, rtol=rtol) + + b = f(2.0, z, a=a) + assert_(a is not b) + assert_allclose(b, 3*resz, rtol=rtol) + + assert_raises(Exception, f, 1.0, x, incx=0) + assert_raises(Exception, f, 1.0, x, offx=5) + assert_raises(Exception, f, 1.0, x, offx=-2) + assert_raises(Exception, f, 1.0, x, n=-2) + assert_raises(Exception, f, 1.0, x, n=5) + assert_raises(Exception, f, 1.0, x, lower=2) + assert_raises(Exception, f, 1.0, x, a=np.zeros((2, 2), 'd', 'F')) + + for p, rtol in zip('cz', [1e-7, 1e-14]): + f = getattr(fblas, p+'her', None) + if f is None: + continue + assert_allclose(f(1.0, z), rehz, rtol=rtol) + assert_allclose(f(1.0, z, lower=True), rehz.T.conj(), rtol=rtol) + assert_allclose(f(1.0, w, incx=3, offx=1, n=4), rehz, rtol=rtol) + # negative increments imply reversed vectors in blas + assert_allclose(f(1.0, w, incx=-3, offx=1, n=4), + rehz_reverse, rtol=rtol) + + a = np.zeros((4, 4), 'F' if p == 'c' else 'D', 'F') + b = f(1.0, z, a=a, overwrite_a=True) + assert_allclose(a, rehz, rtol=rtol) + + b = f(2.0, z, a=a) + assert_(a is not b) + assert_allclose(b, 3*rehz, rtol=rtol) + + assert_raises(Exception, f, 1.0, x, incx=0) + assert_raises(Exception, f, 1.0, x, offx=5) + assert_raises(Exception, f, 1.0, x, offx=-2) + assert_raises(Exception, f, 1.0, x, n=-2) + assert_raises(Exception, f, 1.0, x, n=5) + assert_raises(Exception, f, 1.0, x, lower=2) + assert_raises(Exception, f, 1.0, x, a=np.zeros((2, 2), 'd', 'F')) + + def test_syr2(self): + x = np.arange(1, 5, dtype='d') + y = np.arange(5, 9, dtype='d') + resxy = np.triu(x[:, np.newaxis] * y + y[:, np.newaxis] * x) + resxy_reverse = np.triu(x[::-1, np.newaxis] * y[::-1] + + y[::-1, np.newaxis] * x[::-1]) + + q = np.linspace(0, 8.5, 17, endpoint=False) + + for p, rtol in zip('sd', [1e-7, 1e-14]): + f = getattr(fblas, p+'syr2', None) + if f is None: + continue + assert_allclose(f(1.0, x, y), resxy, rtol=rtol) + assert_allclose(f(1.0, x, y, n=3), resxy[:3, :3], rtol=rtol) + assert_allclose(f(1.0, x, y, lower=True), resxy.T, rtol=rtol) + + assert_allclose(f(1.0, q, q, incx=2, offx=2, incy=2, offy=10), + resxy, rtol=rtol) + assert_allclose(f(1.0, q, q, incx=2, offx=2, incy=2, offy=10, n=3), + resxy[:3, :3], rtol=rtol) + # negative increments imply reversed vectors in blas + assert_allclose(f(1.0, q, q, incx=-2, offx=2, incy=-2, offy=10), + resxy_reverse, rtol=rtol) + + a = np.zeros((4, 4), 'f' if p == 's' else 'd', 'F') + b = f(1.0, x, y, a=a, overwrite_a=True) + assert_allclose(a, resxy, rtol=rtol) + + b = f(2.0, x, y, a=a) + assert_(a is not b) + assert_allclose(b, 3*resxy, rtol=rtol) + + assert_raises(Exception, f, 1.0, x, y, incx=0) + assert_raises(Exception, f, 1.0, x, y, offx=5) + assert_raises(Exception, f, 1.0, x, y, offx=-2) + assert_raises(Exception, f, 1.0, x, y, incy=0) + assert_raises(Exception, f, 1.0, x, y, offy=5) + assert_raises(Exception, f, 1.0, x, y, offy=-2) + assert_raises(Exception, f, 1.0, x, y, n=-2) + assert_raises(Exception, f, 1.0, x, y, n=5) + assert_raises(Exception, f, 1.0, x, y, lower=2) + assert_raises(Exception, f, 1.0, x, y, + a=np.zeros((2, 2), 'd', 'F')) + + def test_her2(self): + x = np.arange(1, 9, dtype='d').view('D') + y = np.arange(9, 17, dtype='d').view('D') + resxy = x[:, np.newaxis] * y.conj() + y[:, np.newaxis] * x.conj() + resxy = np.triu(resxy) + + resxy_reverse = x[::-1, np.newaxis] * y[::-1].conj() + resxy_reverse += y[::-1, np.newaxis] * x[::-1].conj() + resxy_reverse = np.triu(resxy_reverse) + + u = np.c_[np.zeros(4), x, np.zeros(4)].ravel() + v = np.c_[np.zeros(4), y, np.zeros(4)].ravel() + + for p, rtol in zip('cz', [1e-7, 1e-14]): + f = getattr(fblas, p+'her2', None) + if f is None: + continue + assert_allclose(f(1.0, x, y), resxy, rtol=rtol) + assert_allclose(f(1.0, x, y, n=3), resxy[:3, :3], rtol=rtol) + assert_allclose(f(1.0, x, y, lower=True), resxy.T.conj(), + rtol=rtol) + + assert_allclose(f(1.0, u, v, incx=3, offx=1, incy=3, offy=1), + resxy, rtol=rtol) + assert_allclose(f(1.0, u, v, incx=3, offx=1, incy=3, offy=1, n=3), + resxy[:3, :3], rtol=rtol) + # negative increments imply reversed vectors in blas + assert_allclose(f(1.0, u, v, incx=-3, offx=1, incy=-3, offy=1), + resxy_reverse, rtol=rtol) + + a = np.zeros((4, 4), 'F' if p == 'c' else 'D', 'F') + b = f(1.0, x, y, a=a, overwrite_a=True) + assert_allclose(a, resxy, rtol=rtol) + + b = f(2.0, x, y, a=a) + assert_(a is not b) + assert_allclose(b, 3*resxy, rtol=rtol) + + assert_raises(Exception, f, 1.0, x, y, incx=0) + assert_raises(Exception, f, 1.0, x, y, offx=5) + assert_raises(Exception, f, 1.0, x, y, offx=-2) + assert_raises(Exception, f, 1.0, x, y, incy=0) + assert_raises(Exception, f, 1.0, x, y, offy=5) + assert_raises(Exception, f, 1.0, x, y, offy=-2) + assert_raises(Exception, f, 1.0, x, y, n=-2) + assert_raises(Exception, f, 1.0, x, y, n=5) + assert_raises(Exception, f, 1.0, x, y, lower=2) + assert_raises(Exception, f, 1.0, x, y, + a=np.zeros((2, 2), 'd', 'F')) + + def test_gbmv(self): + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 7 + m = 5 + kl = 1 + ku = 2 + # fake a banded matrix via toeplitz + A = toeplitz(append(rand(kl+1), zeros(m-kl-1)), + append(rand(ku+1), zeros(n-ku-1))) + A = A.astype(dtype) + Ab = zeros((kl+ku+1, n), dtype=dtype) + + # Form the banded storage + Ab[2, :5] = A[0, 0] # diag + Ab[1, 1:6] = A[0, 1] # sup1 + Ab[0, 2:7] = A[0, 2] # sup2 + Ab[3, :4] = A[1, 0] # sub1 + + x = rand(n).astype(dtype) + y = rand(m).astype(dtype) + alpha, beta = dtype(3), dtype(-5) + + func, = get_blas_funcs(('gbmv',), dtype=dtype) + y1 = func(m=m, n=n, ku=ku, kl=kl, alpha=alpha, a=Ab, + x=x, y=y, beta=beta) + y2 = alpha * A.dot(x) + beta * y + assert_array_almost_equal(y1, y2) + + y1 = func(m=m, n=n, ku=ku, kl=kl, alpha=alpha, a=Ab, + x=y, y=x, beta=beta, trans=1) + y2 = alpha * A.T.dot(y) + beta * x + assert_array_almost_equal(y1, y2) + + def test_sbmv_hbmv(self): + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 6 + k = 2 + A = zeros((n, n), dtype=dtype) + Ab = zeros((k+1, n), dtype=dtype) + + # Form the array and its packed banded storage + A[arange(n), arange(n)] = rand(n) + for ind2 in range(1, k+1): + temp = rand(n-ind2) + A[arange(n-ind2), arange(ind2, n)] = temp + Ab[-1-ind2, ind2:] = temp + A = A.astype(dtype) + A = A + A.T if ind < 2 else A + A.conj().T + Ab[-1, :] = diag(A) + x = rand(n).astype(dtype) + y = rand(n).astype(dtype) + alpha, beta = dtype(1.25), dtype(3) + + if ind > 1: + func, = get_blas_funcs(('hbmv',), dtype=dtype) + else: + func, = get_blas_funcs(('sbmv',), dtype=dtype) + y1 = func(k=k, alpha=alpha, a=Ab, x=x, y=y, beta=beta) + y2 = alpha * A.dot(x) + beta * y + assert_array_almost_equal(y1, y2) + + def test_spmv_hpmv(self): + seed(1234) + for ind, dtype in enumerate(DTYPES+COMPLEX_DTYPES): + n = 3 + A = rand(n, n).astype(dtype) + if ind > 1: + A += rand(n, n)*1j + A = A.astype(dtype) + A = A + A.T if ind < 4 else A + A.conj().T + c, r = tril_indices(n) + Ap = A[r, c] + x = rand(n).astype(dtype) + y = rand(n).astype(dtype) + xlong = arange(2*n).astype(dtype) + ylong = ones(2*n).astype(dtype) + alpha, beta = dtype(1.25), dtype(2) + + if ind > 3: + func, = get_blas_funcs(('hpmv',), dtype=dtype) + else: + func, = get_blas_funcs(('spmv',), dtype=dtype) + y1 = func(n=n, alpha=alpha, ap=Ap, x=x, y=y, beta=beta) + y2 = alpha * A.dot(x) + beta * y + assert_array_almost_equal(y1, y2) + + # Test inc and offsets + y1 = func(n=n-1, alpha=alpha, beta=beta, x=xlong, y=ylong, ap=Ap, + incx=2, incy=2, offx=n, offy=n) + y2 = (alpha * A[:-1, :-1]).dot(xlong[3::2]) + beta * ylong[3::2] + assert_array_almost_equal(y1[3::2], y2) + assert_almost_equal(y1[4], ylong[4]) + + def test_spr_hpr(self): + seed(1234) + for ind, dtype in enumerate(DTYPES+COMPLEX_DTYPES): + n = 3 + A = rand(n, n).astype(dtype) + if ind > 1: + A += rand(n, n)*1j + A = A.astype(dtype) + A = A + A.T if ind < 4 else A + A.conj().T + c, r = tril_indices(n) + Ap = A[r, c] + x = rand(n).astype(dtype) + alpha = (DTYPES+COMPLEX_DTYPES)[mod(ind, 4)](2.5) + + if ind > 3: + func, = get_blas_funcs(('hpr',), dtype=dtype) + y2 = alpha * x[:, None].dot(x[None, :].conj()) + A + else: + func, = get_blas_funcs(('spr',), dtype=dtype) + y2 = alpha * x[:, None].dot(x[None, :]) + A + + y1 = func(n=n, alpha=alpha, ap=Ap, x=x) + y1f = zeros((3, 3), dtype=dtype) + y1f[r, c] = y1 + y1f[c, r] = y1.conj() if ind > 3 else y1 + assert_array_almost_equal(y1f, y2) + + def test_spr2_hpr2(self): + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 3 + A = rand(n, n).astype(dtype) + if ind > 1: + A += rand(n, n)*1j + A = A.astype(dtype) + A = A + A.T if ind < 2 else A + A.conj().T + c, r = tril_indices(n) + Ap = A[r, c] + x = rand(n).astype(dtype) + y = rand(n).astype(dtype) + alpha = dtype(2) + + if ind > 1: + func, = get_blas_funcs(('hpr2',), dtype=dtype) + else: + func, = get_blas_funcs(('spr2',), dtype=dtype) + + u = alpha.conj() * x[:, None].dot(y[None, :].conj()) + y2 = A + u + u.conj().T + y1 = func(n=n, alpha=alpha, x=x, y=y, ap=Ap) + y1f = zeros((3, 3), dtype=dtype) + y1f[r, c] = y1 + y1f[[1, 2, 2], [0, 0, 1]] = y1[[1, 3, 4]].conj() + assert_array_almost_equal(y1f, y2) + + def test_tbmv(self): + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 10 + k = 3 + x = rand(n).astype(dtype) + A = zeros((n, n), dtype=dtype) + # Banded upper triangular array + for sup in range(k+1): + A[arange(n-sup), arange(sup, n)] = rand(n-sup) + + # Add complex parts for c,z + if ind > 1: + A[nonzero(A)] += 1j * rand((k+1)*n-(k*(k+1)//2)).astype(dtype) + + # Form the banded storage + Ab = zeros((k+1, n), dtype=dtype) + for row in range(k+1): + Ab[-row-1, row:] = diag(A, k=row) + func, = get_blas_funcs(('tbmv',), dtype=dtype) + + y1 = func(k=k, a=Ab, x=x) + y2 = A.dot(x) + assert_array_almost_equal(y1, y2) + + y1 = func(k=k, a=Ab, x=x, diag=1) + A[arange(n), arange(n)] = dtype(1) + y2 = A.dot(x) + assert_array_almost_equal(y1, y2) + + y1 = func(k=k, a=Ab, x=x, diag=1, trans=1) + y2 = A.T.dot(x) + assert_array_almost_equal(y1, y2) + + y1 = func(k=k, a=Ab, x=x, diag=1, trans=2) + y2 = A.conj().T.dot(x) + assert_array_almost_equal(y1, y2) + + def test_tbsv(self): + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 6 + k = 3 + x = rand(n).astype(dtype) + A = zeros((n, n), dtype=dtype) + # Banded upper triangular array + for sup in range(k+1): + A[arange(n-sup), arange(sup, n)] = rand(n-sup) + + # Add complex parts for c,z + if ind > 1: + A[nonzero(A)] += 1j * rand((k+1)*n-(k*(k+1)//2)).astype(dtype) + + # Form the banded storage + Ab = zeros((k+1, n), dtype=dtype) + for row in range(k+1): + Ab[-row-1, row:] = diag(A, k=row) + func, = get_blas_funcs(('tbsv',), dtype=dtype) + + y1 = func(k=k, a=Ab, x=x) + y2 = solve(A, x) + assert_array_almost_equal(y1, y2) + + y1 = func(k=k, a=Ab, x=x, diag=1) + A[arange(n), arange(n)] = dtype(1) + y2 = solve(A, x) + assert_array_almost_equal(y1, y2) + + y1 = func(k=k, a=Ab, x=x, diag=1, trans=1) + y2 = solve(A.T, x) + assert_array_almost_equal(y1, y2) + + y1 = func(k=k, a=Ab, x=x, diag=1, trans=2) + y2 = solve(A.conj().T, x) + assert_array_almost_equal(y1, y2) + + def test_tpmv(self): + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 10 + x = rand(n).astype(dtype) + # Upper triangular array + A = triu(rand(n, n)) if ind < 2 else triu(rand(n, n)+rand(n, n)*1j) + # Form the packed storage + c, r = tril_indices(n) + Ap = A[r, c] + func, = get_blas_funcs(('tpmv',), dtype=dtype) + + y1 = func(n=n, ap=Ap, x=x) + y2 = A.dot(x) + assert_array_almost_equal(y1, y2) + + y1 = func(n=n, ap=Ap, x=x, diag=1) + A[arange(n), arange(n)] = dtype(1) + y2 = A.dot(x) + assert_array_almost_equal(y1, y2) + + y1 = func(n=n, ap=Ap, x=x, diag=1, trans=1) + y2 = A.T.dot(x) + assert_array_almost_equal(y1, y2) + + y1 = func(n=n, ap=Ap, x=x, diag=1, trans=2) + y2 = A.conj().T.dot(x) + assert_array_almost_equal(y1, y2) + + def test_tpsv(self): + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 10 + x = rand(n).astype(dtype) + # Upper triangular array + A = triu(rand(n, n)) if ind < 2 else triu(rand(n, n)+rand(n, n)*1j) + A += eye(n) + # Form the packed storage + c, r = tril_indices(n) + Ap = A[r, c] + func, = get_blas_funcs(('tpsv',), dtype=dtype) + + y1 = func(n=n, ap=Ap, x=x) + y2 = solve(A, x) + assert_array_almost_equal(y1, y2) + + y1 = func(n=n, ap=Ap, x=x, diag=1) + A[arange(n), arange(n)] = dtype(1) + y2 = solve(A, x) + assert_array_almost_equal(y1, y2) + + y1 = func(n=n, ap=Ap, x=x, diag=1, trans=1) + y2 = solve(A.T, x) + assert_array_almost_equal(y1, y2) + + y1 = func(n=n, ap=Ap, x=x, diag=1, trans=2) + y2 = solve(A.conj().T, x) + assert_array_almost_equal(y1, y2) + + def test_trmv(self): + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 3 + A = (rand(n, n)+eye(n)).astype(dtype) + x = rand(3).astype(dtype) + func, = get_blas_funcs(('trmv',), dtype=dtype) + + y1 = func(a=A, x=x) + y2 = triu(A).dot(x) + assert_array_almost_equal(y1, y2) + + y1 = func(a=A, x=x, diag=1) + A[arange(n), arange(n)] = dtype(1) + y2 = triu(A).dot(x) + assert_array_almost_equal(y1, y2) + + y1 = func(a=A, x=x, diag=1, trans=1) + y2 = triu(A).T.dot(x) + assert_array_almost_equal(y1, y2) + + y1 = func(a=A, x=x, diag=1, trans=2) + y2 = triu(A).conj().T.dot(x) + assert_array_almost_equal(y1, y2) + + def test_trsv(self): + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 15 + A = (rand(n, n)+eye(n)).astype(dtype) + x = rand(n).astype(dtype) + func, = get_blas_funcs(('trsv',), dtype=dtype) + + y1 = func(a=A, x=x) + y2 = solve(triu(A), x) + assert_array_almost_equal(y1, y2) + + y1 = func(a=A, x=x, lower=1) + y2 = solve(tril(A), x) + assert_array_almost_equal(y1, y2) + + y1 = func(a=A, x=x, diag=1) + A[arange(n), arange(n)] = dtype(1) + y2 = solve(triu(A), x) + assert_array_almost_equal(y1, y2) + + y1 = func(a=A, x=x, diag=1, trans=1) + y2 = solve(triu(A).T, x) + assert_array_almost_equal(y1, y2) + + y1 = func(a=A, x=x, diag=1, trans=2) + y2 = solve(triu(A).conj().T, x) + assert_array_almost_equal(y1, y2) + + +class TestFBLAS3Simple: + + def test_gemm(self): + for p in 'sd': + f = getattr(fblas, p+'gemm', None) + if f is None: + continue + assert_array_almost_equal(f(3, [3], [-4]), [[-36]]) + assert_array_almost_equal(f(3, [3], [-4], 3, [5]), [-21]) + for p in 'cz': + f = getattr(fblas, p+'gemm', None) + if f is None: + continue + assert_array_almost_equal(f(3j, [3-4j], [-4]), [[-48-36j]]) + assert_array_almost_equal(f(3j, [3-4j], [-4], 3, [5j]), [-48-21j]) + + +def _get_func(func, ps='sdzc'): + """Just a helper: return a specified BLAS function w/typecode.""" + for p in ps: + f = getattr(fblas, p+func, None) + if f is None: + continue + yield f + + +class TestBLAS3Symm: + + def setup_method(self): + self.a = np.array([[1., 2.], + [0., 1.]]) + self.b = np.array([[1., 0., 3.], + [0., -1., 2.]]) + self.c = np.ones((2, 3)) + self.t = np.array([[2., -1., 8.], + [3., 0., 9.]]) + + def test_symm(self): + for f in _get_func('symm'): + res = f(a=self.a, b=self.b, c=self.c, alpha=1., beta=1.) + assert_array_almost_equal(res, self.t) + + res = f(a=self.a.T, b=self.b, lower=1, c=self.c, alpha=1., beta=1.) + assert_array_almost_equal(res, self.t) + + res = f(a=self.a, b=self.b.T, side=1, c=self.c.T, + alpha=1., beta=1.) + assert_array_almost_equal(res, self.t.T) + + def test_summ_wrong_side(self): + f = getattr(fblas, 'dsymm', None) + if f is not None: + assert_raises(Exception, f, **{'a': self.a, 'b': self.b, + 'alpha': 1, 'side': 1}) + # `side=1` means C <- B*A, hence shapes of A and B are to be + # compatible. Otherwise, f2py exception is raised + + def test_symm_wrong_uplo(self): + """SYMM only considers the upper/lower part of A. Hence setting + wrong value for `lower` (default is lower=0, meaning upper triangle) + gives a wrong result. + """ + f = getattr(fblas, 'dsymm', None) + if f is not None: + res = f(a=self.a, b=self.b, c=self.c, alpha=1., beta=1.) + assert np.allclose(res, self.t) + + res = f(a=self.a, b=self.b, lower=1, c=self.c, alpha=1., beta=1.) + assert not np.allclose(res, self.t) + + +class TestBLAS3Syrk: + def setup_method(self): + self.a = np.array([[1., 0.], + [0., -2.], + [2., 3.]]) + self.t = np.array([[1., 0., 2.], + [0., 4., -6.], + [2., -6., 13.]]) + self.tt = np.array([[5., 6.], + [6., 13.]]) + + def test_syrk(self): + for f in _get_func('syrk'): + c = f(a=self.a, alpha=1.) + assert_array_almost_equal(np.triu(c), np.triu(self.t)) + + c = f(a=self.a, alpha=1., lower=1) + assert_array_almost_equal(np.tril(c), np.tril(self.t)) + + c0 = np.ones(self.t.shape) + c = f(a=self.a, alpha=1., beta=1., c=c0) + assert_array_almost_equal(np.triu(c), np.triu(self.t+c0)) + + c = f(a=self.a, alpha=1., trans=1) + assert_array_almost_equal(np.triu(c), np.triu(self.tt)) + + # prints '0-th dimension must be fixed to 3 but got 5', + # FIXME: suppress? + # FIXME: how to catch the _fblas.error? + def test_syrk_wrong_c(self): + f = getattr(fblas, 'dsyrk', None) + if f is not None: + assert_raises(Exception, f, **{'a': self.a, 'alpha': 1., + 'c': np.ones((5, 8))}) + # if C is supplied, it must have compatible dimensions + + +class TestBLAS3Syr2k: + def setup_method(self): + self.a = np.array([[1., 0.], + [0., -2.], + [2., 3.]]) + self.b = np.array([[0., 1.], + [1., 0.], + [0, 1.]]) + self.t = np.array([[0., -1., 3.], + [-1., 0., 0.], + [3., 0., 6.]]) + self.tt = np.array([[0., 1.], + [1., 6]]) + + def test_syr2k(self): + for f in _get_func('syr2k'): + c = f(a=self.a, b=self.b, alpha=1.) + assert_array_almost_equal(np.triu(c), np.triu(self.t)) + + c = f(a=self.a, b=self.b, alpha=1., lower=1) + assert_array_almost_equal(np.tril(c), np.tril(self.t)) + + c0 = np.ones(self.t.shape) + c = f(a=self.a, b=self.b, alpha=1., beta=1., c=c0) + assert_array_almost_equal(np.triu(c), np.triu(self.t+c0)) + + c = f(a=self.a, b=self.b, alpha=1., trans=1) + assert_array_almost_equal(np.triu(c), np.triu(self.tt)) + + # prints '0-th dimension must be fixed to 3 but got 5', FIXME: suppress? + def test_syr2k_wrong_c(self): + f = getattr(fblas, 'dsyr2k', None) + if f is not None: + assert_raises(Exception, f, **{'a': self.a, + 'b': self.b, + 'alpha': 1., + 'c': np.zeros((15, 8))}) + # if C is supplied, it must have compatible dimensions + + +class TestSyHe: + """Quick and simple tests for (zc)-symm, syrk, syr2k.""" + + def setup_method(self): + self.sigma_y = np.array([[0., -1.j], + [1.j, 0.]]) + + def test_symm_zc(self): + for f in _get_func('symm', 'zc'): + # NB: a is symmetric w/upper diag of ONLY + res = f(a=self.sigma_y, b=self.sigma_y, alpha=1.) + assert_array_almost_equal(np.triu(res), np.diag([1, -1])) + + def test_hemm_zc(self): + for f in _get_func('hemm', 'zc'): + # NB: a is hermitian w/upper diag of ONLY + res = f(a=self.sigma_y, b=self.sigma_y, alpha=1.) + assert_array_almost_equal(np.triu(res), np.diag([1, 1])) + + def test_syrk_zr(self): + for f in _get_func('syrk', 'zc'): + res = f(a=self.sigma_y, alpha=1.) + assert_array_almost_equal(np.triu(res), np.diag([-1, -1])) + + def test_herk_zr(self): + for f in _get_func('herk', 'zc'): + res = f(a=self.sigma_y, alpha=1.) + assert_array_almost_equal(np.triu(res), np.diag([1, 1])) + + def test_syr2k_zr(self): + for f in _get_func('syr2k', 'zc'): + res = f(a=self.sigma_y, b=self.sigma_y, alpha=1.) + assert_array_almost_equal(np.triu(res), 2.*np.diag([-1, -1])) + + def test_her2k_zr(self): + for f in _get_func('her2k', 'zc'): + res = f(a=self.sigma_y, b=self.sigma_y, alpha=1.) + assert_array_almost_equal(np.triu(res), 2.*np.diag([1, 1])) + + +class TestTRMM: + """Quick and simple tests for dtrmm.""" + + def setup_method(self): + self.a = np.array([[1., 2., ], + [-2., 1.]]) + self.b = np.array([[3., 4., -1.], + [5., 6., -2.]]) + + self.a2 = np.array([[1, 1, 2, 3], + [0, 1, 4, 5], + [0, 0, 1, 6], + [0, 0, 0, 1]], order="f") + self.b2 = np.array([[1, 4], [2, 5], [3, 6], [7, 8], [9, 10]], + order="f") + + @pytest.mark.parametrize("dtype_", DTYPES) + def test_side(self, dtype_): + trmm = get_blas_funcs("trmm", dtype=dtype_) + # Provide large A array that works for side=1 but not 0 (see gh-10841) + assert_raises(Exception, trmm, 1.0, self.a2, self.b2) + res = trmm(1.0, self.a2.astype(dtype_), self.b2.astype(dtype_), + side=1) + k = self.b2.shape[1] + assert_allclose(res, self.b2 @ self.a2[:k, :k], rtol=0., + atol=100*np.finfo(dtype_).eps) + + def test_ab(self): + f = getattr(fblas, 'dtrmm', None) + if f is not None: + result = f(1., self.a, self.b) + # default a is upper triangular + expected = np.array([[13., 16., -5.], + [5., 6., -2.]]) + assert_array_almost_equal(result, expected) + + def test_ab_lower(self): + f = getattr(fblas, 'dtrmm', None) + if f is not None: + result = f(1., self.a, self.b, lower=True) + expected = np.array([[3., 4., -1.], + [-1., -2., 0.]]) # now a is lower triangular + assert_array_almost_equal(result, expected) + + def test_b_overwrites(self): + # BLAS dtrmm modifies B argument in-place. + # Here the default is to copy, but this can be overridden + f = getattr(fblas, 'dtrmm', None) + if f is not None: + for overwr in [True, False]: + bcopy = self.b.copy() + result = f(1., self.a, bcopy, overwrite_b=overwr) + # C-contiguous arrays are copied + assert_(bcopy.flags.f_contiguous is False and + np.may_share_memory(bcopy, result) is False) + assert_equal(bcopy, self.b) + + bcopy = np.asfortranarray(self.b.copy()) # or just transpose it + result = f(1., self.a, bcopy, overwrite_b=True) + assert_(bcopy.flags.f_contiguous is True and + np.may_share_memory(bcopy, result) is True) + assert_array_almost_equal(bcopy, result) + + +def test_trsm(): + seed(1234) + for ind, dtype in enumerate(DTYPES): + tol = np.finfo(dtype).eps*1000 + func, = get_blas_funcs(('trsm',), dtype=dtype) + + # Test protection against size mismatches + A = rand(4, 5).astype(dtype) + B = rand(4, 4).astype(dtype) + alpha = dtype(1) + assert_raises(Exception, func, alpha, A, B) + assert_raises(Exception, func, alpha, A.T, B) + + n = 8 + m = 7 + alpha = dtype(-2.5) + A = (rand(m, m) if ind < 2 else rand(m, m) + rand(m, m)*1j) + eye(m) + A = A.astype(dtype) + Au = triu(A) + Al = tril(A) + B1 = rand(m, n).astype(dtype) + B2 = rand(n, m).astype(dtype) + + x1 = func(alpha=alpha, a=A, b=B1) + assert_equal(B1.shape, x1.shape) + x2 = solve(Au, alpha*B1) + assert_allclose(x1, x2, atol=tol) + + x1 = func(alpha=alpha, a=A, b=B1, trans_a=1) + x2 = solve(Au.T, alpha*B1) + assert_allclose(x1, x2, atol=tol) + + x1 = func(alpha=alpha, a=A, b=B1, trans_a=2) + x2 = solve(Au.conj().T, alpha*B1) + assert_allclose(x1, x2, atol=tol) + + x1 = func(alpha=alpha, a=A, b=B1, diag=1) + Au[arange(m), arange(m)] = dtype(1) + x2 = solve(Au, alpha*B1) + assert_allclose(x1, x2, atol=tol) + + x1 = func(alpha=alpha, a=A, b=B2, diag=1, side=1) + x2 = solve(Au.conj().T, alpha*B2.conj().T) + assert_allclose(x1, x2.conj().T, atol=tol) + + x1 = func(alpha=alpha, a=A, b=B2, diag=1, side=1, lower=1) + Al[arange(m), arange(m)] = dtype(1) + x2 = solve(Al.conj().T, alpha*B2.conj().T) + assert_allclose(x1, x2.conj().T, atol=tol) + + +@pytest.mark.xfail(run=False, + reason="gh-16930") +def test_gh_169309(): + x = np.repeat(10, 9) + actual = scipy.linalg.blas.dnrm2(x, 5, 3, -1) + expected = math.sqrt(500) + assert_allclose(actual, expected) + + +def test_dnrm2_neg_incx(): + # check that dnrm2(..., incx < 0) raises + # XXX: remove the test after the lowest supported BLAS implements + # negative incx (new in LAPACK 3.10) + x = np.repeat(10, 9) + incx = -1 + with assert_raises(fblas.__fblas_error): + scipy.linalg.blas.dnrm2(x, 5, 3, incx) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_cython_blas.py b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_cython_blas.py new file mode 100644 index 0000000000000000000000000000000000000000..284e214d38ed331cf0493d1e3bba6e1214939b2c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_cython_blas.py @@ -0,0 +1,118 @@ +import numpy as np +from numpy.testing import (assert_allclose, + assert_equal) +import scipy.linalg.cython_blas as blas + +class TestDGEMM: + + def test_transposes(self): + + a = np.arange(12, dtype='d').reshape((3, 4))[:2,:2] + b = np.arange(1, 13, dtype='d').reshape((4, 3))[:2,:2] + c = np.empty((2, 4))[:2,:2] + + blas._test_dgemm(1., a, b, 0., c) + assert_allclose(c, a.dot(b)) + + blas._test_dgemm(1., a.T, b, 0., c) + assert_allclose(c, a.T.dot(b)) + + blas._test_dgemm(1., a, b.T, 0., c) + assert_allclose(c, a.dot(b.T)) + + blas._test_dgemm(1., a.T, b.T, 0., c) + assert_allclose(c, a.T.dot(b.T)) + + blas._test_dgemm(1., a, b, 0., c.T) + assert_allclose(c, a.dot(b).T) + + blas._test_dgemm(1., a.T, b, 0., c.T) + assert_allclose(c, a.T.dot(b).T) + + blas._test_dgemm(1., a, b.T, 0., c.T) + assert_allclose(c, a.dot(b.T).T) + + blas._test_dgemm(1., a.T, b.T, 0., c.T) + assert_allclose(c, a.T.dot(b.T).T) + + def test_shapes(self): + a = np.arange(6, dtype='d').reshape((3, 2)) + b = np.arange(-6, 2, dtype='d').reshape((2, 4)) + c = np.empty((3, 4)) + + blas._test_dgemm(1., a, b, 0., c) + assert_allclose(c, a.dot(b)) + + blas._test_dgemm(1., b.T, a.T, 0., c.T) + assert_allclose(c, b.T.dot(a.T).T) + +class TestWfuncPointers: + """ Test the function pointers that are expected to fail on + Mac OS X without the additional entry statement in their definitions + in fblas_l1.pyf.src. """ + + def test_complex_args(self): + + cx = np.array([.5 + 1.j, .25 - .375j, 12.5 - 4.j], np.complex64) + cy = np.array([.8 + 2.j, .875 - .625j, -1. + 2.j], np.complex64) + + assert_allclose(blas._test_cdotc(cx, cy), + -17.6468753815+21.3718757629j) + assert_allclose(blas._test_cdotu(cx, cy), + -6.11562538147+30.3156242371j) + + assert_equal(blas._test_icamax(cx), 3) + + assert_allclose(blas._test_scasum(cx), 18.625) + assert_allclose(blas._test_scnrm2(cx), 13.1796483994) + + assert_allclose(blas._test_cdotc(cx[::2], cy[::2]), + -18.1000003815+21.2000007629j) + assert_allclose(blas._test_cdotu(cx[::2], cy[::2]), + -6.10000038147+30.7999992371j) + assert_allclose(blas._test_scasum(cx[::2]), 18.) + assert_allclose(blas._test_scnrm2(cx[::2]), 13.1719398499) + + def test_double_args(self): + + x = np.array([5., -3, -.5], np.float64) + y = np.array([2, 1, .5], np.float64) + + assert_allclose(blas._test_dasum(x), 8.5) + assert_allclose(blas._test_ddot(x, y), 6.75) + assert_allclose(blas._test_dnrm2(x), 5.85234975815) + + assert_allclose(blas._test_dasum(x[::2]), 5.5) + assert_allclose(blas._test_ddot(x[::2], y[::2]), 9.75) + assert_allclose(blas._test_dnrm2(x[::2]), 5.0249376297) + + assert_equal(blas._test_idamax(x), 1) + + def test_float_args(self): + + x = np.array([5., -3, -.5], np.float32) + y = np.array([2, 1, .5], np.float32) + + assert_equal(blas._test_isamax(x), 1) + + assert_allclose(blas._test_sasum(x), 8.5) + assert_allclose(blas._test_sdot(x, y), 6.75) + assert_allclose(blas._test_snrm2(x), 5.85234975815) + + assert_allclose(blas._test_sasum(x[::2]), 5.5) + assert_allclose(blas._test_sdot(x[::2], y[::2]), 9.75) + assert_allclose(blas._test_snrm2(x[::2]), 5.0249376297) + + def test_double_complex_args(self): + + cx = np.array([.5 + 1.j, .25 - .375j, 13. - 4.j], np.complex128) + cy = np.array([.875 + 2.j, .875 - .625j, -1. + 2.j], np.complex128) + + assert_equal(blas._test_izamax(cx), 3) + + assert_allclose(blas._test_zdotc(cx, cy), -18.109375+22.296875j) + assert_allclose(blas._test_zdotu(cx, cy), -6.578125+31.390625j) + + assert_allclose(blas._test_zdotc(cx[::2], cy[::2]), -18.5625+22.125j) + assert_allclose(blas._test_zdotu(cx[::2], cy[::2]), -6.5625+31.875j) + diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_cython_lapack.py b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_cython_lapack.py new file mode 100644 index 0000000000000000000000000000000000000000..2a4e7b34b62042efdb0ce0f8ee61ce0189320995 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_cython_lapack.py @@ -0,0 +1,22 @@ +from numpy.testing import assert_allclose +from scipy.linalg import cython_lapack as cython_lapack +from scipy.linalg import lapack + + +class TestLamch: + + def test_slamch(self): + for c in [b'e', b's', b'b', b'p', b'n', b'r', b'm', b'u', b'l', b'o']: + assert_allclose(cython_lapack._test_slamch(c), + lapack.slamch(c)) + + def test_dlamch(self): + for c in [b'e', b's', b'b', b'p', b'n', b'r', b'm', b'u', b'l', b'o']: + assert_allclose(cython_lapack._test_dlamch(c), + lapack.dlamch(c)) + + def test_complex_ladiv(self): + cx = .5 + 1.j + cy = .875 + 2.j + assert_allclose(cython_lapack._test_zladiv(cy, cx), 1.95+0.1j) + assert_allclose(cython_lapack._test_cladiv(cy, cx), 1.95+0.1j) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_cythonized_array_utils.py b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_cythonized_array_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..19a0b39e28274d74e1bfcbe86807c03ec0159643 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_cythonized_array_utils.py @@ -0,0 +1,121 @@ +import numpy as np +from scipy.linalg import bandwidth, issymmetric, ishermitian +import pytest +from pytest import raises + + +def test_bandwidth_dtypes(): + n = 5 + for t in np.typecodes['All']: + A = np.zeros([n, n], dtype=t) + if t in 'eUVOMm': + raises(TypeError, bandwidth, A) + elif t == 'G': # No-op test. On win these pass on others fail. + pass + else: + _ = bandwidth(A) + + +def test_bandwidth_non2d_input(): + A = np.array([1, 2, 3]) + raises(ValueError, bandwidth, A) + A = np.array([[[1, 2, 3], [4, 5, 6]]]) + raises(ValueError, bandwidth, A) + + +@pytest.mark.parametrize('T', [x for x in np.typecodes['All'] + if x not in 'eGUVOMm']) +def test_bandwidth_square_inputs(T): + n = 20 + k = 4 + R = np.zeros([n, n], dtype=T, order='F') + # form a banded matrix inplace + R[[x for x in range(n)], [x for x in range(n)]] = 1 + R[[x for x in range(n-k)], [x for x in range(k, n)]] = 1 + R[[x for x in range(1, n)], [x for x in range(n-1)]] = 1 + R[[x for x in range(k, n)], [x for x in range(n-k)]] = 1 + assert bandwidth(R) == (k, k) + + +@pytest.mark.parametrize('T', [x for x in np.typecodes['All'] + if x not in 'eGUVOMm']) +def test_bandwidth_rect_inputs(T): + n, m = 10, 20 + k = 5 + R = np.zeros([n, m], dtype=T, order='F') + # form a banded matrix inplace + R[[x for x in range(n)], [x for x in range(n)]] = 1 + R[[x for x in range(n-k)], [x for x in range(k, n)]] = 1 + R[[x for x in range(1, n)], [x for x in range(n-1)]] = 1 + R[[x for x in range(k, n)], [x for x in range(n-k)]] = 1 + assert bandwidth(R) == (k, k) + + +def test_issymetric_ishermitian_dtypes(): + n = 5 + for t in np.typecodes['All']: + A = np.zeros([n, n], dtype=t) + if t in 'eUVOMm': + raises(TypeError, issymmetric, A) + raises(TypeError, ishermitian, A) + elif t == 'G': # No-op test. On win these pass on others fail. + pass + else: + assert issymmetric(A) + assert ishermitian(A) + + +def test_issymmetric_ishermitian_invalid_input(): + A = np.array([1, 2, 3]) + raises(ValueError, issymmetric, A) + raises(ValueError, ishermitian, A) + A = np.array([[[1, 2, 3], [4, 5, 6]]]) + raises(ValueError, issymmetric, A) + raises(ValueError, ishermitian, A) + A = np.array([[1, 2, 3], [4, 5, 6]]) + raises(ValueError, issymmetric, A) + raises(ValueError, ishermitian, A) + + +def test_issymetric_complex_decimals(): + A = np.arange(1, 10).astype(complex).reshape(3, 3) + A += np.arange(-4, 5).astype(complex).reshape(3, 3)*1j + # make entries decimal + A /= np.pi + A = A + A.T + assert issymmetric(A) + + +def test_ishermitian_complex_decimals(): + A = np.arange(1, 10).astype(complex).reshape(3, 3) + A += np.arange(-4, 5).astype(complex).reshape(3, 3)*1j + # make entries decimal + A /= np.pi + A = A + A.T.conj() + assert ishermitian(A) + + +def test_issymmetric_approximate_results(): + n = 20 + rng = np.random.RandomState(123456789) + x = rng.uniform(high=5., size=[n, n]) + y = x @ x.T # symmetric + p = rng.standard_normal([n, n]) + z = p @ y @ p.T + assert issymmetric(z, atol=1e-10) + assert issymmetric(z, atol=1e-10, rtol=0.) + assert issymmetric(z, atol=0., rtol=1e-12) + assert issymmetric(z, atol=1e-13, rtol=1e-12) + + +def test_ishermitian_approximate_results(): + n = 20 + rng = np.random.RandomState(987654321) + x = rng.uniform(high=5., size=[n, n]) + y = x @ x.T # symmetric + p = rng.standard_normal([n, n]) + rng.standard_normal([n, n])*1j + z = p @ y @ p.conj().T + assert ishermitian(z, atol=1e-10) + assert ishermitian(z, atol=1e-10, rtol=0.) + assert ishermitian(z, atol=0., rtol=1e-12) + assert ishermitian(z, atol=1e-13, rtol=1e-12) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_decomp.py b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_decomp.py new file mode 100644 index 0000000000000000000000000000000000000000..8722b31468202a3d3f7c8a17acc922d3d0fa8015 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_decomp.py @@ -0,0 +1,2794 @@ +import itertools +import platform +import sys + +import numpy as np +from numpy.testing import (assert_equal, assert_almost_equal, + assert_array_almost_equal, assert_array_equal, + assert_, assert_allclose) + +import pytest +from pytest import raises as assert_raises + +from scipy.linalg import (eig, eigvals, lu, svd, svdvals, cholesky, qr, + schur, rsf2csf, lu_solve, lu_factor, solve, diagsvd, + hessenberg, rq, eig_banded, eigvals_banded, eigh, + eigvalsh, qr_multiply, qz, orth, ordqz, + subspace_angles, hadamard, eigvalsh_tridiagonal, + eigh_tridiagonal, null_space, cdf2rdf, LinAlgError) + +from scipy.linalg.lapack import (dgbtrf, dgbtrs, zgbtrf, zgbtrs, dsbev, + dsbevd, dsbevx, zhbevd, zhbevx) + +from scipy.linalg._misc import norm +from scipy.linalg._decomp_qz import _select_function +from scipy.stats import ortho_group + +from numpy import (array, diag, full, linalg, argsort, zeros, arange, + float32, complex64, ravel, sqrt, iscomplex, shape, sort, + sign, asarray, isfinite, ndarray, eye,) + +from scipy.linalg._testutils import assert_no_overwrite +from scipy.sparse._sputils import matrix + +from scipy._lib._testutils import check_free_memory +from scipy.linalg.blas import HAS_ILP64 +try: + from scipy.__config__ import CONFIG +except ImportError: + CONFIG = None + + +def _random_hermitian_matrix(n, posdef=False, dtype=float): + "Generate random sym/hermitian array of the given size n" + if dtype in COMPLEX_DTYPES: + A = np.random.rand(n, n) + np.random.rand(n, n)*1.0j + A = (A + A.conj().T)/2 + else: + A = np.random.rand(n, n) + A = (A + A.T)/2 + + if posdef: + A += sqrt(2*n)*np.eye(n) + + return A.astype(dtype) + + +REAL_DTYPES = [np.float32, np.float64] +COMPLEX_DTYPES = [np.complex64, np.complex128] +DTYPES = REAL_DTYPES + COMPLEX_DTYPES + + +# XXX: This function should not be defined here, but somewhere in +# scipy.linalg namespace +def symrand(dim_or_eigv, rng): + """Return a random symmetric (Hermitian) matrix. + + If 'dim_or_eigv' is an integer N, return a NxN matrix, with eigenvalues + uniformly distributed on (-1,1). + + If 'dim_or_eigv' is 1-D real array 'a', return a matrix whose + eigenvalues are 'a'. + """ + if isinstance(dim_or_eigv, int): + dim = dim_or_eigv + d = rng.random(dim)*2 - 1 + elif (isinstance(dim_or_eigv, ndarray) and + len(dim_or_eigv.shape) == 1): + dim = dim_or_eigv.shape[0] + d = dim_or_eigv + else: + raise TypeError("input type not supported.") + + v = ortho_group.rvs(dim) + h = v.T.conj() @ diag(d) @ v + # to avoid roundoff errors, symmetrize the matrix (again) + h = 0.5*(h.T+h) + return h + + +class TestEigVals: + + def test_simple(self): + a = [[1, 2, 3], [1, 2, 3], [2, 5, 6]] + w = eigvals(a) + exact_w = [(9+sqrt(93))/2, 0, (9-sqrt(93))/2] + assert_array_almost_equal(w, exact_w) + + def test_simple_tr(self): + a = array([[1, 2, 3], [1, 2, 3], [2, 5, 6]], 'd').T + a = a.copy() + a = a.T + w = eigvals(a) + exact_w = [(9+sqrt(93))/2, 0, (9-sqrt(93))/2] + assert_array_almost_equal(w, exact_w) + + def test_simple_complex(self): + a = [[1, 2, 3], [1, 2, 3], [2, 5, 6+1j]] + w = eigvals(a) + exact_w = [(9+1j+sqrt(92+6j))/2, + 0, + (9+1j-sqrt(92+6j))/2] + assert_array_almost_equal(w, exact_w) + + def test_finite(self): + a = [[1, 2, 3], [1, 2, 3], [2, 5, 6]] + w = eigvals(a, check_finite=False) + exact_w = [(9+sqrt(93))/2, 0, (9-sqrt(93))/2] + assert_array_almost_equal(w, exact_w) + + +class TestEig: + + def test_simple(self): + a = array([[1, 2, 3], [1, 2, 3], [2, 5, 6]]) + w, v = eig(a) + exact_w = [(9+sqrt(93))/2, 0, (9-sqrt(93))/2] + v0 = array([1, 1, (1+sqrt(93)/3)/2]) + v1 = array([3., 0, -1]) + v2 = array([1, 1, (1-sqrt(93)/3)/2]) + v0 = v0 / norm(v0) + v1 = v1 / norm(v1) + v2 = v2 / norm(v2) + assert_array_almost_equal(w, exact_w) + assert_array_almost_equal(v0, v[:, 0]*sign(v[0, 0])) + assert_array_almost_equal(v1, v[:, 1]*sign(v[0, 1])) + assert_array_almost_equal(v2, v[:, 2]*sign(v[0, 2])) + for i in range(3): + assert_array_almost_equal(a @ v[:, i], w[i]*v[:, i]) + w, v = eig(a, left=1, right=0) + for i in range(3): + assert_array_almost_equal(a.T @ v[:, i], w[i]*v[:, i]) + + def test_simple_complex_eig(self): + a = array([[1, 2], [-2, 1]]) + w, vl, vr = eig(a, left=1, right=1) + assert_array_almost_equal(w, array([1+2j, 1-2j])) + for i in range(2): + assert_array_almost_equal(a @ vr[:, i], w[i]*vr[:, i]) + for i in range(2): + assert_array_almost_equal(a.conj().T @ vl[:, i], + w[i].conj()*vl[:, i]) + + def test_simple_complex(self): + a = array([[1, 2, 3], [1, 2, 3], [2, 5, 6+1j]]) + w, vl, vr = eig(a, left=1, right=1) + for i in range(3): + assert_array_almost_equal(a @ vr[:, i], w[i]*vr[:, i]) + for i in range(3): + assert_array_almost_equal(a.conj().T @ vl[:, i], + w[i].conj()*vl[:, i]) + + def test_gh_3054(self): + a = [[1]] + b = [[0]] + w, vr = eig(a, b, homogeneous_eigvals=True) + assert_allclose(w[1, 0], 0) + assert_(w[0, 0] != 0) + assert_allclose(vr, 1) + + w, vr = eig(a, b) + assert_equal(w, np.inf) + assert_allclose(vr, 1) + + def _check_gen_eig(self, A, B, atol_homog=1e-13, rtol_homog=1e-13): + if B is not None: + A, B = asarray(A), asarray(B) + B0 = B + else: + A = asarray(A) + B0 = B + B = np.eye(*A.shape) + msg = f"\n{A!r}\n{B!r}" + + # Eigenvalues in homogeneous coordinates + w, vr = eig(A, B0, homogeneous_eigvals=True) + wt = eigvals(A, B0, homogeneous_eigvals=True) + val1 = A @ vr * w[1, :] + val2 = B @ vr * w[0, :] + for i in range(val1.shape[1]): + assert_allclose(val1[:, i], val2[:, i], + rtol=rtol_homog, atol=atol_homog, err_msg=msg) + + if B0 is None: + assert_allclose(w[1, :], 1) + assert_allclose(wt[1, :], 1) + + perm = np.lexsort(w) + permt = np.lexsort(wt) + assert_allclose(w[:, perm], wt[:, permt], atol=1e-7, rtol=1e-7, + err_msg=msg) + + length = np.empty(len(vr)) + + for i in range(len(vr)): + length[i] = norm(vr[:, i]) + + assert_allclose(length, np.ones(length.size), err_msg=msg, + atol=1e-7, rtol=1e-7) + + # Convert homogeneous coordinates + beta_nonzero = (w[1, :] != 0) + wh = w[0, beta_nonzero] / w[1, beta_nonzero] + + # Eigenvalues in standard coordinates + w, vr = eig(A, B0) + wt = eigvals(A, B0) + val1 = A @ vr + val2 = B @ vr * w + res = val1 - val2 + for i in range(res.shape[1]): + if np.all(isfinite(res[:, i])): + assert_allclose(res[:, i], 0, + rtol=1e-13, atol=1e-13, err_msg=msg) + + # try to consistently order eigenvalues, including complex conjugate pairs + w_fin = w[isfinite(w)] + wt_fin = wt[isfinite(wt)] + + # prune noise in the real parts + w_fin = -1j * np.real_if_close(1j*w_fin, tol=1e-10) + wt_fin = -1j * np.real_if_close(1j*wt_fin, tol=1e-10) + + perm = argsort(w_fin) + permt = argsort(wt_fin) + + assert_allclose(w_fin[perm], wt_fin[permt], + atol=1e-7, rtol=1e-7, err_msg=msg) + + length = np.empty(len(vr)) + for i in range(len(vr)): + length[i] = norm(vr[:, i]) + assert_allclose(length, np.ones(length.size), err_msg=msg) + + # Compare homogeneous and nonhomogeneous versions + assert_allclose(sort(wh), sort(w[np.isfinite(w)])) + + def test_singular(self): + # Example taken from + # https://web.archive.org/web/20040903121217/http://www.cs.umu.se/research/nla/singular_pairs/guptri/matlab.html + A = array([[22, 34, 31, 31, 17], + [45, 45, 42, 19, 29], + [39, 47, 49, 26, 34], + [27, 31, 26, 21, 15], + [38, 44, 44, 24, 30]]) + B = array([[13, 26, 25, 17, 24], + [31, 46, 40, 26, 37], + [26, 40, 19, 25, 25], + [16, 25, 27, 14, 23], + [24, 35, 18, 21, 22]]) + + with np.errstate(all='ignore'): + self._check_gen_eig(A, B, atol_homog=5e-13) + + def test_falker(self): + # Test matrices giving some Nan generalized eigenvalues. + M = diag(array([1, 0, 3])) + K = array(([2, -1, -1], [-1, 2, -1], [-1, -1, 2])) + D = array(([1, -1, 0], [-1, 1, 0], [0, 0, 0])) + Z = zeros((3, 3)) + I3 = eye(3) + A = np.block([[I3, Z], [Z, -K]]) + B = np.block([[Z, I3], [M, D]]) + + with np.errstate(all='ignore'): + self._check_gen_eig(A, B) + + def test_bad_geneig(self): + # Ticket #709 (strange return values from DGGEV) + + def matrices(omega): + c1 = -9 + omega**2 + c2 = 2*omega + A = [[1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, c1, 0], + [0, 0, 0, c1]] + B = [[0, 0, 1, 0], + [0, 0, 0, 1], + [1, 0, 0, -c2], + [0, 1, c2, 0]] + return A, B + + # With a buggy LAPACK, this can fail for different omega on different + # machines -- so we need to test several values + with np.errstate(all='ignore'): + for k in range(100): + A, B = matrices(omega=k*5./100) + self._check_gen_eig(A, B) + + def test_make_eigvals(self): + # Step through all paths in _make_eigvals + # Real eigenvalues + rng = np.random.RandomState(1234) + A = symrand(3, rng) + self._check_gen_eig(A, None) + B = symrand(3, rng) + self._check_gen_eig(A, B) + # Complex eigenvalues + A = rng.random((3, 3)) + 1j*rng.random((3, 3)) + self._check_gen_eig(A, None) + B = rng.random((3, 3)) + 1j*rng.random((3, 3)) + self._check_gen_eig(A, B) + + def test_check_finite(self): + a = [[1, 2, 3], [1, 2, 3], [2, 5, 6]] + w, v = eig(a, check_finite=False) + exact_w = [(9+sqrt(93))/2, 0, (9-sqrt(93))/2] + v0 = array([1, 1, (1+sqrt(93)/3)/2]) + v1 = array([3., 0, -1]) + v2 = array([1, 1, (1-sqrt(93)/3)/2]) + v0 = v0 / norm(v0) + v1 = v1 / norm(v1) + v2 = v2 / norm(v2) + assert_array_almost_equal(w, exact_w) + assert_array_almost_equal(v0, v[:, 0]*sign(v[0, 0])) + assert_array_almost_equal(v1, v[:, 1]*sign(v[0, 1])) + assert_array_almost_equal(v2, v[:, 2]*sign(v[0, 2])) + for i in range(3): + assert_array_almost_equal(a @ v[:, i], w[i]*v[:, i]) + + def test_not_square_error(self): + """Check that passing a non-square array raises a ValueError.""" + A = np.arange(6).reshape(3, 2) + assert_raises(ValueError, eig, A) + + def test_shape_mismatch(self): + """Check that passing arrays of with different shapes + raises a ValueError.""" + A = eye(2) + B = np.arange(9.0).reshape(3, 3) + assert_raises(ValueError, eig, A, B) + assert_raises(ValueError, eig, B, A) + + def test_gh_11577(self): + # https://github.com/scipy/scipy/issues/11577 + # `A - lambda B` should have 4 and 8 among the eigenvalues, and this + # was apparently broken on some platforms + A = np.array([[12.0, 28.0, 76.0, 220.0], + [16.0, 32.0, 80.0, 224.0], + [24.0, 40.0, 88.0, 232.0], + [40.0, 56.0, 104.0, 248.0]], dtype='float64') + B = np.array([[2.0, 4.0, 10.0, 28.0], + [3.0, 5.0, 11.0, 29.0], + [5.0, 7.0, 13.0, 31.0], + [9.0, 11.0, 17.0, 35.0]], dtype='float64') + + D, V = eig(A, B) + + # The problem is ill-conditioned, and two other eigenvalues + # depend on ATLAS/OpenBLAS version, compiler version etc + # see gh-11577 for discussion + # + # NB: it is tempting to use `assert_allclose(D[:2], [4, 8])` instead but + # the ordering of eigenvalues also comes out different on different + # systems depending on who knows what. + with np.testing.suppress_warnings() as sup: + # isclose chokes on inf/nan values + sup.filter(RuntimeWarning, "invalid value encountered in multiply") + assert np.isclose(D, 4.0, atol=1e-14).any() + assert np.isclose(D, 8.0, atol=1e-14).any() + + +class TestEigBanded: + def setup_method(self): + self.create_bandmat() + + def create_bandmat(self): + """Create the full matrix `self.fullmat` and + the corresponding band matrix `self.bandmat`.""" + N = 10 + self.KL = 2 # number of subdiagonals (below the diagonal) + self.KU = 2 # number of superdiagonals (above the diagonal) + + # symmetric band matrix + self.sym_mat = (diag(full(N, 1.0)) + + diag(full(N-1, -1.0), -1) + diag(full(N-1, -1.0), 1) + + diag(full(N-2, -2.0), -2) + diag(full(N-2, -2.0), 2)) + + # hermitian band matrix + self.herm_mat = (diag(full(N, -1.0)) + + 1j*diag(full(N-1, 1.0), -1) + - 1j*diag(full(N-1, 1.0), 1) + + diag(full(N-2, -2.0), -2) + + diag(full(N-2, -2.0), 2)) + + # general real band matrix + self.real_mat = (diag(full(N, 1.0)) + + diag(full(N-1, -1.0), -1) + diag(full(N-1, -3.0), 1) + + diag(full(N-2, 2.0), -2) + diag(full(N-2, -2.0), 2)) + + # general complex band matrix + self.comp_mat = (1j*diag(full(N, 1.0)) + + diag(full(N-1, -1.0), -1) + + 1j*diag(full(N-1, -3.0), 1) + + diag(full(N-2, 2.0), -2) + + diag(full(N-2, -2.0), 2)) + + # Eigenvalues and -vectors from linalg.eig + ew, ev = linalg.eig(self.sym_mat) + ew = ew.real + args = argsort(ew) + self.w_sym_lin = ew[args] + self.evec_sym_lin = ev[:, args] + + ew, ev = linalg.eig(self.herm_mat) + ew = ew.real + args = argsort(ew) + self.w_herm_lin = ew[args] + self.evec_herm_lin = ev[:, args] + + # Extract upper bands from symmetric and hermitian band matrices + # (for use in dsbevd, dsbevx, zhbevd, zhbevx + # and their single precision versions) + LDAB = self.KU + 1 + self.bandmat_sym = zeros((LDAB, N), dtype=float) + self.bandmat_herm = zeros((LDAB, N), dtype=complex) + for i in range(LDAB): + self.bandmat_sym[LDAB-i-1, i:N] = diag(self.sym_mat, i) + self.bandmat_herm[LDAB-i-1, i:N] = diag(self.herm_mat, i) + + # Extract bands from general real and complex band matrix + # (for use in dgbtrf, dgbtrs and their single precision versions) + LDAB = 2*self.KL + self.KU + 1 + self.bandmat_real = zeros((LDAB, N), dtype=float) + self.bandmat_real[2*self.KL, :] = diag(self.real_mat) # diagonal + for i in range(self.KL): + # superdiagonals + self.bandmat_real[2*self.KL-1-i, i+1:N] = diag(self.real_mat, i+1) + # subdiagonals + self.bandmat_real[2*self.KL+1+i, 0:N-1-i] = diag(self.real_mat, + -i-1) + + self.bandmat_comp = zeros((LDAB, N), dtype=complex) + self.bandmat_comp[2*self.KL, :] = diag(self.comp_mat) # diagonal + for i in range(self.KL): + # superdiagonals + self.bandmat_comp[2*self.KL-1-i, i+1:N] = diag(self.comp_mat, i+1) + # subdiagonals + self.bandmat_comp[2*self.KL+1+i, 0:N-1-i] = diag(self.comp_mat, + -i-1) + + # absolute value for linear equation system A*x = b + self.b = 1.0*arange(N) + self.bc = self.b * (1 + 1j) + + ##################################################################### + + def test_dsbev(self): + """Compare dsbev eigenvalues and eigenvectors with + the result of linalg.eig.""" + w, evec, info = dsbev(self.bandmat_sym, compute_v=1) + evec_ = evec[:, argsort(w)] + assert_array_almost_equal(sort(w), self.w_sym_lin) + assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin)) + + def test_dsbevd(self): + """Compare dsbevd eigenvalues and eigenvectors with + the result of linalg.eig.""" + w, evec, info = dsbevd(self.bandmat_sym, compute_v=1) + evec_ = evec[:, argsort(w)] + assert_array_almost_equal(sort(w), self.w_sym_lin) + assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin)) + + def test_dsbevx(self): + """Compare dsbevx eigenvalues and eigenvectors + with the result of linalg.eig.""" + N, N = shape(self.sym_mat) + # Achtung: Argumente 0.0,0.0,range? + w, evec, num, ifail, info = dsbevx(self.bandmat_sym, 0.0, 0.0, 1, N, + compute_v=1, range=2) + evec_ = evec[:, argsort(w)] + assert_array_almost_equal(sort(w), self.w_sym_lin) + assert_array_almost_equal(abs(evec_), abs(self.evec_sym_lin)) + + def test_zhbevd(self): + """Compare zhbevd eigenvalues and eigenvectors + with the result of linalg.eig.""" + w, evec, info = zhbevd(self.bandmat_herm, compute_v=1) + evec_ = evec[:, argsort(w)] + assert_array_almost_equal(sort(w), self.w_herm_lin) + assert_array_almost_equal(abs(evec_), abs(self.evec_herm_lin)) + + def test_zhbevx(self): + """Compare zhbevx eigenvalues and eigenvectors + with the result of linalg.eig.""" + N, N = shape(self.herm_mat) + # Achtung: Argumente 0.0,0.0,range? + w, evec, num, ifail, info = zhbevx(self.bandmat_herm, 0.0, 0.0, 1, N, + compute_v=1, range=2) + evec_ = evec[:, argsort(w)] + assert_array_almost_equal(sort(w), self.w_herm_lin) + assert_array_almost_equal(abs(evec_), abs(self.evec_herm_lin)) + + def test_eigvals_banded(self): + """Compare eigenvalues of eigvals_banded with those of linalg.eig.""" + w_sym = eigvals_banded(self.bandmat_sym) + w_sym = w_sym.real + assert_array_almost_equal(sort(w_sym), self.w_sym_lin) + + w_herm = eigvals_banded(self.bandmat_herm) + w_herm = w_herm.real + assert_array_almost_equal(sort(w_herm), self.w_herm_lin) + + # extracting eigenvalues with respect to an index range + ind1 = 2 + ind2 = np.longlong(6) + w_sym_ind = eigvals_banded(self.bandmat_sym, + select='i', select_range=(ind1, ind2)) + assert_array_almost_equal(sort(w_sym_ind), + self.w_sym_lin[ind1:ind2+1]) + w_herm_ind = eigvals_banded(self.bandmat_herm, + select='i', select_range=(ind1, ind2)) + assert_array_almost_equal(sort(w_herm_ind), + self.w_herm_lin[ind1:ind2+1]) + + # extracting eigenvalues with respect to a value range + v_lower = self.w_sym_lin[ind1] - 1.0e-5 + v_upper = self.w_sym_lin[ind2] + 1.0e-5 + w_sym_val = eigvals_banded(self.bandmat_sym, + select='v', select_range=(v_lower, v_upper)) + assert_array_almost_equal(sort(w_sym_val), + self.w_sym_lin[ind1:ind2+1]) + + v_lower = self.w_herm_lin[ind1] - 1.0e-5 + v_upper = self.w_herm_lin[ind2] + 1.0e-5 + w_herm_val = eigvals_banded(self.bandmat_herm, + select='v', + select_range=(v_lower, v_upper)) + assert_array_almost_equal(sort(w_herm_val), + self.w_herm_lin[ind1:ind2+1]) + + w_sym = eigvals_banded(self.bandmat_sym, check_finite=False) + w_sym = w_sym.real + assert_array_almost_equal(sort(w_sym), self.w_sym_lin) + + def test_eig_banded(self): + """Compare eigenvalues and eigenvectors of eig_banded + with those of linalg.eig. """ + w_sym, evec_sym = eig_banded(self.bandmat_sym) + evec_sym_ = evec_sym[:, argsort(w_sym.real)] + assert_array_almost_equal(sort(w_sym), self.w_sym_lin) + assert_array_almost_equal(abs(evec_sym_), abs(self.evec_sym_lin)) + + w_herm, evec_herm = eig_banded(self.bandmat_herm) + evec_herm_ = evec_herm[:, argsort(w_herm.real)] + assert_array_almost_equal(sort(w_herm), self.w_herm_lin) + assert_array_almost_equal(abs(evec_herm_), abs(self.evec_herm_lin)) + + # extracting eigenvalues with respect to an index range + ind1 = 2 + ind2 = 6 + w_sym_ind, evec_sym_ind = eig_banded(self.bandmat_sym, + select='i', + select_range=(ind1, ind2)) + assert_array_almost_equal(sort(w_sym_ind), + self.w_sym_lin[ind1:ind2+1]) + assert_array_almost_equal(abs(evec_sym_ind), + abs(self.evec_sym_lin[:, ind1:ind2+1])) + + w_herm_ind, evec_herm_ind = eig_banded(self.bandmat_herm, + select='i', + select_range=(ind1, ind2)) + assert_array_almost_equal(sort(w_herm_ind), + self.w_herm_lin[ind1:ind2+1]) + assert_array_almost_equal(abs(evec_herm_ind), + abs(self.evec_herm_lin[:, ind1:ind2+1])) + + # extracting eigenvalues with respect to a value range + v_lower = self.w_sym_lin[ind1] - 1.0e-5 + v_upper = self.w_sym_lin[ind2] + 1.0e-5 + w_sym_val, evec_sym_val = eig_banded(self.bandmat_sym, + select='v', + select_range=(v_lower, v_upper)) + assert_array_almost_equal(sort(w_sym_val), + self.w_sym_lin[ind1:ind2+1]) + assert_array_almost_equal(abs(evec_sym_val), + abs(self.evec_sym_lin[:, ind1:ind2+1])) + + v_lower = self.w_herm_lin[ind1] - 1.0e-5 + v_upper = self.w_herm_lin[ind2] + 1.0e-5 + w_herm_val, evec_herm_val = eig_banded(self.bandmat_herm, + select='v', + select_range=(v_lower, v_upper)) + assert_array_almost_equal(sort(w_herm_val), + self.w_herm_lin[ind1:ind2+1]) + assert_array_almost_equal(abs(evec_herm_val), + abs(self.evec_herm_lin[:, ind1:ind2+1])) + + w_sym, evec_sym = eig_banded(self.bandmat_sym, check_finite=False) + evec_sym_ = evec_sym[:, argsort(w_sym.real)] + assert_array_almost_equal(sort(w_sym), self.w_sym_lin) + assert_array_almost_equal(abs(evec_sym_), abs(self.evec_sym_lin)) + + def test_dgbtrf(self): + """Compare dgbtrf LU factorisation with the LU factorisation result + of linalg.lu.""" + M, N = shape(self.real_mat) + lu_symm_band, ipiv, info = dgbtrf(self.bandmat_real, self.KL, self.KU) + + # extract matrix u from lu_symm_band + u = diag(lu_symm_band[2*self.KL, :]) + for i in range(self.KL + self.KU): + u += diag(lu_symm_band[2*self.KL-1-i, i+1:N], i+1) + + p_lin, l_lin, u_lin = lu(self.real_mat, permute_l=0) + assert_array_almost_equal(u, u_lin) + + def test_zgbtrf(self): + """Compare zgbtrf LU factorisation with the LU factorisation result + of linalg.lu.""" + M, N = shape(self.comp_mat) + lu_symm_band, ipiv, info = zgbtrf(self.bandmat_comp, self.KL, self.KU) + + # extract matrix u from lu_symm_band + u = diag(lu_symm_band[2*self.KL, :]) + for i in range(self.KL + self.KU): + u += diag(lu_symm_band[2*self.KL-1-i, i+1:N], i+1) + + p_lin, l_lin, u_lin = lu(self.comp_mat, permute_l=0) + assert_array_almost_equal(u, u_lin) + + def test_dgbtrs(self): + """Compare dgbtrs solutions for linear equation system A*x = b + with solutions of linalg.solve.""" + + lu_symm_band, ipiv, info = dgbtrf(self.bandmat_real, self.KL, self.KU) + y, info = dgbtrs(lu_symm_band, self.KL, self.KU, self.b, ipiv) + + y_lin = linalg.solve(self.real_mat, self.b) + assert_array_almost_equal(y, y_lin) + + def test_zgbtrs(self): + """Compare zgbtrs solutions for linear equation system A*x = b + with solutions of linalg.solve.""" + + lu_symm_band, ipiv, info = zgbtrf(self.bandmat_comp, self.KL, self.KU) + y, info = zgbtrs(lu_symm_band, self.KL, self.KU, self.bc, ipiv) + + y_lin = linalg.solve(self.comp_mat, self.bc) + assert_array_almost_equal(y, y_lin) + + +class TestEigTridiagonal: + def setup_method(self): + self.create_trimat() + + def create_trimat(self): + """Create the full matrix `self.fullmat`, `self.d`, and `self.e`.""" + N = 10 + + # symmetric band matrix + self.d = full(N, 1.0) + self.e = full(N-1, -1.0) + self.full_mat = (diag(self.d) + diag(self.e, -1) + diag(self.e, 1)) + + ew, ev = linalg.eig(self.full_mat) + ew = ew.real + args = argsort(ew) + self.w = ew[args] + self.evec = ev[:, args] + + def test_degenerate(self): + """Test error conditions.""" + # Wrong sizes + assert_raises(ValueError, eigvalsh_tridiagonal, self.d, self.e[:-1]) + # Must be real + assert_raises(TypeError, eigvalsh_tridiagonal, self.d, self.e * 1j) + # Bad driver + assert_raises(TypeError, eigvalsh_tridiagonal, self.d, self.e, + lapack_driver=1.) + assert_raises(ValueError, eigvalsh_tridiagonal, self.d, self.e, + lapack_driver='foo') + # Bad bounds + assert_raises(ValueError, eigvalsh_tridiagonal, self.d, self.e, + select='i', select_range=(0, -1)) + + def test_eigvalsh_tridiagonal(self): + """Compare eigenvalues of eigvalsh_tridiagonal with those of eig.""" + # can't use ?STERF with subselection + for driver in ('sterf', 'stev', 'stebz', 'stemr', 'auto'): + w = eigvalsh_tridiagonal(self.d, self.e, lapack_driver=driver) + assert_array_almost_equal(sort(w), self.w) + + for driver in ('sterf', 'stev'): + assert_raises(ValueError, eigvalsh_tridiagonal, self.d, self.e, + lapack_driver='stev', select='i', + select_range=(0, 1)) + for driver in ('stebz', 'stemr', 'auto'): + # extracting eigenvalues with respect to the full index range + w_ind = eigvalsh_tridiagonal( + self.d, self.e, select='i', select_range=(0, len(self.d)-1), + lapack_driver=driver) + assert_array_almost_equal(sort(w_ind), self.w) + + # extracting eigenvalues with respect to an index range + ind1 = 2 + ind2 = 6 + w_ind = eigvalsh_tridiagonal( + self.d, self.e, select='i', select_range=(ind1, ind2), + lapack_driver=driver) + assert_array_almost_equal(sort(w_ind), self.w[ind1:ind2+1]) + + # extracting eigenvalues with respect to a value range + v_lower = self.w[ind1] - 1.0e-5 + v_upper = self.w[ind2] + 1.0e-5 + w_val = eigvalsh_tridiagonal( + self.d, self.e, select='v', select_range=(v_lower, v_upper), + lapack_driver=driver) + assert_array_almost_equal(sort(w_val), self.w[ind1:ind2+1]) + + def test_eigh_tridiagonal(self): + """Compare eigenvalues and eigenvectors of eigh_tridiagonal + with those of eig. """ + # can't use ?STERF when eigenvectors are requested + assert_raises(ValueError, eigh_tridiagonal, self.d, self.e, + lapack_driver='sterf') + for driver in ('stebz', 'stev', 'stemr', 'auto'): + w, evec = eigh_tridiagonal(self.d, self.e, lapack_driver=driver) + evec_ = evec[:, argsort(w)] + assert_array_almost_equal(sort(w), self.w) + assert_array_almost_equal(abs(evec_), abs(self.evec)) + + assert_raises(ValueError, eigh_tridiagonal, self.d, self.e, + lapack_driver='stev', select='i', select_range=(0, 1)) + for driver in ('stebz', 'stemr', 'auto'): + # extracting eigenvalues with respect to an index range + ind1 = 0 + ind2 = len(self.d)-1 + w, evec = eigh_tridiagonal( + self.d, self.e, select='i', select_range=(ind1, ind2), + lapack_driver=driver) + assert_array_almost_equal(sort(w), self.w) + assert_array_almost_equal(abs(evec), abs(self.evec)) + ind1 = 2 + ind2 = 6 + w, evec = eigh_tridiagonal( + self.d, self.e, select='i', select_range=(ind1, ind2), + lapack_driver=driver) + assert_array_almost_equal(sort(w), self.w[ind1:ind2+1]) + assert_array_almost_equal(abs(evec), + abs(self.evec[:, ind1:ind2+1])) + + # extracting eigenvalues with respect to a value range + v_lower = self.w[ind1] - 1.0e-5 + v_upper = self.w[ind2] + 1.0e-5 + w, evec = eigh_tridiagonal( + self.d, self.e, select='v', select_range=(v_lower, v_upper), + lapack_driver=driver) + assert_array_almost_equal(sort(w), self.w[ind1:ind2+1]) + assert_array_almost_equal(abs(evec), + abs(self.evec[:, ind1:ind2+1])) + + def test_eigh_tridiagonal_1x1(self): + """See gh-20075""" + a = np.array([-2.0]) + b = np.array([]) + x = eigh_tridiagonal(a, b, eigvals_only=True) + assert x.ndim == 1 + assert_allclose(x, a) + x, V = eigh_tridiagonal(a, b, select="i", select_range=(0, 0)) + assert x.ndim == 1 + assert V.ndim == 2 + assert_allclose(x, a) + assert_allclose(V, array([[1.]])) + + x, V = eigh_tridiagonal(a, b, select="v", select_range=(-2, 0)) + assert x.size == 0 + assert x.shape == (0,) + assert V.shape == (1, 0) + + +class TestEigh: + def setup_class(self): + np.random.seed(1234) + + def test_wrong_inputs(self): + # Nonsquare a + assert_raises(ValueError, eigh, np.ones([1, 2])) + # Nonsquare b + assert_raises(ValueError, eigh, np.ones([2, 2]), np.ones([2, 1])) + # Incompatible a, b sizes + assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([2, 2])) + # Wrong type parameter for generalized problem + assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]), + type=4) + # Both value and index subsets requested + assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]), + subset_by_value=[1, 2], subset_by_index=[2, 4]) + with np.testing.suppress_warnings() as sup: + sup.filter(DeprecationWarning, "Keyword argument 'eigvals") + assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]), + subset_by_value=[1, 2], eigvals=[2, 4]) + # Invalid upper index spec + assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]), + subset_by_index=[0, 4]) + with np.testing.suppress_warnings() as sup: + sup.filter(DeprecationWarning, "Keyword argument 'eigvals") + assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]), + eigvals=[0, 4]) + # Invalid lower index + assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]), + subset_by_index=[-2, 2]) + with np.testing.suppress_warnings() as sup: + sup.filter(DeprecationWarning, "Keyword argument 'eigvals") + assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]), + eigvals=[-2, 2]) + # Invalid index spec #2 + assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]), + subset_by_index=[2, 0]) + with np.testing.suppress_warnings() as sup: + sup.filter(DeprecationWarning, "Keyword argument 'eigvals") + assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]), + subset_by_index=[2, 0]) + # Invalid value spec + assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]), + subset_by_value=[2, 0]) + # Invalid driver name + assert_raises(ValueError, eigh, np.ones([2, 2]), driver='wrong') + # Generalized driver selection without b + assert_raises(ValueError, eigh, np.ones([3, 3]), None, driver='gvx') + # Standard driver with b + assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]), + driver='evr') + # Subset request from invalid driver + assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]), + driver='gvd', subset_by_index=[1, 2]) + assert_raises(ValueError, eigh, np.ones([3, 3]), np.ones([3, 3]), + driver='gvd', subset_by_index=[1, 2]) + + def test_nonpositive_b(self): + assert_raises(LinAlgError, eigh, np.ones([3, 3]), np.ones([3, 3])) + + # index based subsets are done in the legacy test_eigh() + def test_value_subsets(self): + for ind, dt in enumerate(DTYPES): + + a = _random_hermitian_matrix(20, dtype=dt) + w, v = eigh(a, subset_by_value=[-2, 2]) + assert_equal(v.shape[1], len(w)) + assert all((w > -2) & (w < 2)) + + b = _random_hermitian_matrix(20, posdef=True, dtype=dt) + w, v = eigh(a, b, subset_by_value=[-2, 2]) + assert_equal(v.shape[1], len(w)) + assert all((w > -2) & (w < 2)) + + def test_eigh_integer(self): + a = array([[1, 2], [2, 7]]) + b = array([[3, 1], [1, 5]]) + w, z = eigh(a) + w, z = eigh(a, b) + + def test_eigh_of_sparse(self): + # This tests the rejection of inputs that eigh cannot currently handle. + import scipy.sparse + a = scipy.sparse.identity(2).tocsc() + b = np.atleast_2d(a) + assert_raises(ValueError, eigh, a) + assert_raises(ValueError, eigh, b) + + @pytest.mark.parametrize('dtype_', DTYPES) + @pytest.mark.parametrize('driver', ("ev", "evd", "evr", "evx")) + def test_various_drivers_standard(self, driver, dtype_): + a = _random_hermitian_matrix(n=20, dtype=dtype_) + w, v = eigh(a, driver=driver) + assert_allclose(a @ v - (v * w), 0., + atol=1000*np.finfo(dtype_).eps, + rtol=0.) + + @pytest.mark.parametrize('type', (1, 2, 3)) + @pytest.mark.parametrize('driver', ("gv", "gvd", "gvx")) + def test_various_drivers_generalized(self, driver, type): + atol = np.spacing(5000.) + a = _random_hermitian_matrix(20) + b = _random_hermitian_matrix(20, posdef=True) + w, v = eigh(a=a, b=b, driver=driver, type=type) + if type == 1: + assert_allclose(a @ v - w*(b @ v), 0., atol=atol, rtol=0.) + elif type == 2: + assert_allclose(a @ b @ v - v * w, 0., atol=atol, rtol=0.) + else: + assert_allclose(b @ a @ v - v * w, 0., atol=atol, rtol=0.) + + def test_eigvalsh_new_args(self): + a = _random_hermitian_matrix(5) + w = eigvalsh(a, subset_by_index=[1, 2]) + assert_equal(len(w), 2) + + w2 = eigvalsh(a, subset_by_index=[1, 2]) + assert_equal(len(w2), 2) + assert_allclose(w, w2) + + b = np.diag([1, 1.2, 1.3, 1.5, 2]) + w3 = eigvalsh(b, subset_by_value=[1, 1.4]) + assert_equal(len(w3), 2) + assert_allclose(w3, np.array([1.2, 1.3])) + + @pytest.mark.parametrize("method", [eigh, eigvalsh]) + def test_deprecation_warnings(self, method): + with pytest.warns(DeprecationWarning, + match="Keyword argument 'turbo'"): + method(np.zeros((2, 2)), turbo=True) + with pytest.warns(DeprecationWarning, + match="Keyword argument 'eigvals'"): + method(np.zeros((2, 2)), eigvals=[0, 1]) + with pytest.deprecated_call(match="use keyword arguments"): + method(np.zeros((2,2)), np.eye(2, 2), True) + + def test_deprecation_results(self): + a = _random_hermitian_matrix(3) + b = _random_hermitian_matrix(3, posdef=True) + + # check turbo gives same result as driver='gvd' + with np.testing.suppress_warnings() as sup: + sup.filter(DeprecationWarning, "Keyword argument 'turbo'") + w_dep, v_dep = eigh(a, b, turbo=True) + w, v = eigh(a, b, driver='gvd') + assert_allclose(w_dep, w) + assert_allclose(v_dep, v) + + # check eigvals gives the same result as subset_by_index + with np.testing.suppress_warnings() as sup: + sup.filter(DeprecationWarning, "Keyword argument 'eigvals'") + w_dep, v_dep = eigh(a, eigvals=[0, 1]) + w, v = eigh(a, subset_by_index=[0, 1]) + assert_allclose(w_dep, w) + assert_allclose(v_dep, v) + + +class TestSVD_GESDD: + lapack_driver = 'gesdd' + + def test_degenerate(self): + assert_raises(TypeError, svd, [[1.]], lapack_driver=1.) + assert_raises(ValueError, svd, [[1.]], lapack_driver='foo') + + def test_simple(self): + a = [[1, 2, 3], [1, 20, 3], [2, 5, 6]] + for full_matrices in (True, False): + u, s, vh = svd(a, full_matrices=full_matrices, + lapack_driver=self.lapack_driver) + assert_array_almost_equal(u.T @ u, eye(3)) + assert_array_almost_equal(vh.T @ vh, eye(3)) + sigma = zeros((u.shape[0], vh.shape[0]), s.dtype.char) + for i in range(len(s)): + sigma[i, i] = s[i] + assert_array_almost_equal(u @ sigma @ vh, a) + + def test_simple_singular(self): + a = [[1, 2, 3], [1, 2, 3], [2, 5, 6]] + for full_matrices in (True, False): + u, s, vh = svd(a, full_matrices=full_matrices, + lapack_driver=self.lapack_driver) + assert_array_almost_equal(u.T @ u, eye(3)) + assert_array_almost_equal(vh.T @ vh, eye(3)) + sigma = zeros((u.shape[0], vh.shape[0]), s.dtype.char) + for i in range(len(s)): + sigma[i, i] = s[i] + assert_array_almost_equal(u @ sigma @ vh, a) + + def test_simple_underdet(self): + a = [[1, 2, 3], [4, 5, 6]] + for full_matrices in (True, False): + u, s, vh = svd(a, full_matrices=full_matrices, + lapack_driver=self.lapack_driver) + assert_array_almost_equal(u.T @ u, eye(u.shape[0])) + sigma = zeros((u.shape[0], vh.shape[0]), s.dtype.char) + for i in range(len(s)): + sigma[i, i] = s[i] + assert_array_almost_equal(u @ sigma @ vh, a) + + def test_simple_overdet(self): + a = [[1, 2], [4, 5], [3, 4]] + for full_matrices in (True, False): + u, s, vh = svd(a, full_matrices=full_matrices, + lapack_driver=self.lapack_driver) + assert_array_almost_equal(u.T @ u, eye(u.shape[1])) + assert_array_almost_equal(vh.T @ vh, eye(2)) + sigma = zeros((u.shape[1], vh.shape[0]), s.dtype.char) + for i in range(len(s)): + sigma[i, i] = s[i] + assert_array_almost_equal(u @ sigma @ vh, a) + + def test_random(self): + rng = np.random.RandomState(1234) + n = 20 + m = 15 + for i in range(3): + for a in [rng.random([n, m]), rng.random([m, n])]: + for full_matrices in (True, False): + u, s, vh = svd(a, full_matrices=full_matrices, + lapack_driver=self.lapack_driver) + assert_array_almost_equal(u.T @ u, eye(u.shape[1])) + assert_array_almost_equal(vh @ vh.T, eye(vh.shape[0])) + sigma = zeros((u.shape[1], vh.shape[0]), s.dtype.char) + for i in range(len(s)): + sigma[i, i] = s[i] + assert_array_almost_equal(u @ sigma @ vh, a) + + def test_simple_complex(self): + a = [[1, 2, 3], [1, 2j, 3], [2, 5, 6]] + for full_matrices in (True, False): + u, s, vh = svd(a, full_matrices=full_matrices, + lapack_driver=self.lapack_driver) + assert_array_almost_equal(u.conj().T @ u, eye(u.shape[1])) + assert_array_almost_equal(vh.conj().T @ vh, eye(vh.shape[0])) + sigma = zeros((u.shape[0], vh.shape[0]), s.dtype.char) + for i in range(len(s)): + sigma[i, i] = s[i] + assert_array_almost_equal(u @ sigma @ vh, a) + + def test_random_complex(self): + rng = np.random.RandomState(1234) + n = 20 + m = 15 + for i in range(3): + for full_matrices in (True, False): + for a in [rng.random([n, m]), rng.random([m, n])]: + a = a + 1j*rng.random(list(a.shape)) + u, s, vh = svd(a, full_matrices=full_matrices, + lapack_driver=self.lapack_driver) + assert_array_almost_equal(u.conj().T @ u, + eye(u.shape[1])) + # This fails when [m,n] + # assert_array_almost_equal(vh.conj().T @ vh, + # eye(len(vh),dtype=vh.dtype.char)) + sigma = zeros((u.shape[1], vh.shape[0]), s.dtype.char) + for i in range(len(s)): + sigma[i, i] = s[i] + assert_array_almost_equal(u @ sigma @ vh, a) + + def test_crash_1580(self): + rng = np.random.RandomState(1234) + sizes = [(13, 23), (30, 50), (60, 100)] + for sz in sizes: + for dt in [np.float32, np.float64, np.complex64, np.complex128]: + a = rng.rand(*sz).astype(dt) + # should not crash + svd(a, lapack_driver=self.lapack_driver) + + def test_check_finite(self): + a = [[1, 2, 3], [1, 20, 3], [2, 5, 6]] + u, s, vh = svd(a, check_finite=False, lapack_driver=self.lapack_driver) + assert_array_almost_equal(u.T @ u, eye(3)) + assert_array_almost_equal(vh.T @ vh, eye(3)) + sigma = zeros((u.shape[0], vh.shape[0]), s.dtype.char) + for i in range(len(s)): + sigma[i, i] = s[i] + assert_array_almost_equal(u @ sigma @ vh, a) + + def test_gh_5039(self): + # This is a smoke test for https://github.com/scipy/scipy/issues/5039 + # + # The following is reported to raise "ValueError: On entry to DGESDD + # parameter number 12 had an illegal value". + # `interp1d([1,2,3,4], [1,2,3,4], kind='cubic')` + # This is reported to only show up on LAPACK 3.0.3. + # + # The matrix below is taken from the call to + # `B = _fitpack._bsplmat(order, xk)` in interpolate._find_smoothest + b = np.array( + [[0.16666667, 0.66666667, 0.16666667, 0., 0., 0.], + [0., 0.16666667, 0.66666667, 0.16666667, 0., 0.], + [0., 0., 0.16666667, 0.66666667, 0.16666667, 0.], + [0., 0., 0., 0.16666667, 0.66666667, 0.16666667]]) + svd(b, lapack_driver=self.lapack_driver) + + @pytest.mark.skipif(not HAS_ILP64, reason="64-bit LAPACK required") + @pytest.mark.slow + def test_large_matrix(self): + check_free_memory(free_mb=17000) + A = np.zeros([1, 2**31], dtype=np.float32) + A[0, -1] = 1 + u, s, vh = svd(A, full_matrices=False) + assert_allclose(s[0], 1.0) + assert_allclose(u[0, 0] * vh[0, -1], 1.0) + + +class TestSVD_GESVD(TestSVD_GESDD): + lapack_driver = 'gesvd' + + +def test_svd_gesdd_nofegfault(): + # svd(a) with {U,VT}.size > INT_MAX does not segfault + # cf https://github.com/scipy/scipy/issues/14001 + df=np.ones((4799, 53130), dtype=np.float64) + with assert_raises(ValueError): + svd(df) + + +class TestSVDVals: + + def test_empty(self): + for a in [[]], np.empty((2, 0)), np.ones((0, 3)): + s = svdvals(a) + assert_equal(s, np.empty(0)) + + def test_simple(self): + a = [[1, 2, 3], [1, 2, 3], [2, 5, 6]] + s = svdvals(a) + assert_(len(s) == 3) + assert_(s[0] >= s[1] >= s[2]) + + def test_simple_underdet(self): + a = [[1, 2, 3], [4, 5, 6]] + s = svdvals(a) + assert_(len(s) == 2) + assert_(s[0] >= s[1]) + + def test_simple_overdet(self): + a = [[1, 2], [4, 5], [3, 4]] + s = svdvals(a) + assert_(len(s) == 2) + assert_(s[0] >= s[1]) + + def test_simple_complex(self): + a = [[1, 2, 3], [1, 20, 3j], [2, 5, 6]] + s = svdvals(a) + assert_(len(s) == 3) + assert_(s[0] >= s[1] >= s[2]) + + def test_simple_underdet_complex(self): + a = [[1, 2, 3], [4, 5j, 6]] + s = svdvals(a) + assert_(len(s) == 2) + assert_(s[0] >= s[1]) + + def test_simple_overdet_complex(self): + a = [[1, 2], [4, 5], [3j, 4]] + s = svdvals(a) + assert_(len(s) == 2) + assert_(s[0] >= s[1]) + + def test_check_finite(self): + a = [[1, 2, 3], [1, 2, 3], [2, 5, 6]] + s = svdvals(a, check_finite=False) + assert_(len(s) == 3) + assert_(s[0] >= s[1] >= s[2]) + + @pytest.mark.slow + def test_crash_2609(self): + np.random.seed(1234) + a = np.random.rand(1500, 2800) + # Shouldn't crash: + svdvals(a) + + +class TestDiagSVD: + + def test_simple(self): + assert_array_almost_equal(diagsvd([1, 0, 0], 3, 3), + [[1, 0, 0], [0, 0, 0], [0, 0, 0]]) + + +class TestQR: + def test_simple(self): + a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]] + q, r = qr(a) + assert_array_almost_equal(q.T @ q, eye(3)) + assert_array_almost_equal(q @ r, a) + + def test_simple_left(self): + a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]] + q, r = qr(a) + c = [1, 2, 3] + qc, r2 = qr_multiply(a, c, "left") + assert_array_almost_equal(q @ c, qc) + assert_array_almost_equal(r, r2) + qc, r2 = qr_multiply(a, eye(3), "left") + assert_array_almost_equal(q, qc) + + def test_simple_right(self): + a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]] + q, r = qr(a) + c = [1, 2, 3] + qc, r2 = qr_multiply(a, c) + assert_array_almost_equal(c @ q, qc) + assert_array_almost_equal(r, r2) + qc, r = qr_multiply(a, eye(3)) + assert_array_almost_equal(q, qc) + + def test_simple_pivoting(self): + a = np.asarray([[8, 2, 3], [2, 9, 3], [5, 3, 6]]) + q, r, p = qr(a, pivoting=True) + d = abs(diag(r)) + assert_(np.all(d[1:] <= d[:-1])) + assert_array_almost_equal(q.T @ q, eye(3)) + assert_array_almost_equal(q @ r, a[:, p]) + q2, r2 = qr(a[:, p]) + assert_array_almost_equal(q, q2) + assert_array_almost_equal(r, r2) + + def test_simple_left_pivoting(self): + a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]] + q, r, jpvt = qr(a, pivoting=True) + c = [1, 2, 3] + qc, r, jpvt = qr_multiply(a, c, "left", True) + assert_array_almost_equal(q @ c, qc) + + def test_simple_right_pivoting(self): + a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]] + q, r, jpvt = qr(a, pivoting=True) + c = [1, 2, 3] + qc, r, jpvt = qr_multiply(a, c, pivoting=True) + assert_array_almost_equal(c @ q, qc) + + def test_simple_trap(self): + a = [[8, 2, 3], [2, 9, 3]] + q, r = qr(a) + assert_array_almost_equal(q.T @ q, eye(2)) + assert_array_almost_equal(q @ r, a) + + def test_simple_trap_pivoting(self): + a = np.asarray([[8, 2, 3], [2, 9, 3]]) + q, r, p = qr(a, pivoting=True) + d = abs(diag(r)) + assert_(np.all(d[1:] <= d[:-1])) + assert_array_almost_equal(q.T @ q, eye(2)) + assert_array_almost_equal(q @ r, a[:, p]) + q2, r2 = qr(a[:, p]) + assert_array_almost_equal(q, q2) + assert_array_almost_equal(r, r2) + + def test_simple_tall(self): + # full version + a = [[8, 2], [2, 9], [5, 3]] + q, r = qr(a) + assert_array_almost_equal(q.T @ q, eye(3)) + assert_array_almost_equal(q @ r, a) + + def test_simple_tall_pivoting(self): + # full version pivoting + a = np.asarray([[8, 2], [2, 9], [5, 3]]) + q, r, p = qr(a, pivoting=True) + d = abs(diag(r)) + assert_(np.all(d[1:] <= d[:-1])) + assert_array_almost_equal(q.T @ q, eye(3)) + assert_array_almost_equal(q @ r, a[:, p]) + q2, r2 = qr(a[:, p]) + assert_array_almost_equal(q, q2) + assert_array_almost_equal(r, r2) + + def test_simple_tall_e(self): + # economy version + a = [[8, 2], [2, 9], [5, 3]] + q, r = qr(a, mode='economic') + assert_array_almost_equal(q.T @ q, eye(2)) + assert_array_almost_equal(q @ r, a) + assert_equal(q.shape, (3, 2)) + assert_equal(r.shape, (2, 2)) + + def test_simple_tall_e_pivoting(self): + # economy version pivoting + a = np.asarray([[8, 2], [2, 9], [5, 3]]) + q, r, p = qr(a, pivoting=True, mode='economic') + d = abs(diag(r)) + assert_(np.all(d[1:] <= d[:-1])) + assert_array_almost_equal(q.T @ q, eye(2)) + assert_array_almost_equal(q @ r, a[:, p]) + q2, r2 = qr(a[:, p], mode='economic') + assert_array_almost_equal(q, q2) + assert_array_almost_equal(r, r2) + + def test_simple_tall_left(self): + a = [[8, 2], [2, 9], [5, 3]] + q, r = qr(a, mode="economic") + c = [1, 2] + qc, r2 = qr_multiply(a, c, "left") + assert_array_almost_equal(q @ c, qc) + assert_array_almost_equal(r, r2) + c = array([1, 2, 0]) + qc, r2 = qr_multiply(a, c, "left", overwrite_c=True) + assert_array_almost_equal(q @ c[:2], qc) + qc, r = qr_multiply(a, eye(2), "left") + assert_array_almost_equal(qc, q) + + def test_simple_tall_left_pivoting(self): + a = [[8, 2], [2, 9], [5, 3]] + q, r, jpvt = qr(a, mode="economic", pivoting=True) + c = [1, 2] + qc, r, kpvt = qr_multiply(a, c, "left", True) + assert_array_equal(jpvt, kpvt) + assert_array_almost_equal(q @ c, qc) + qc, r, jpvt = qr_multiply(a, eye(2), "left", True) + assert_array_almost_equal(qc, q) + + def test_simple_tall_right(self): + a = [[8, 2], [2, 9], [5, 3]] + q, r = qr(a, mode="economic") + c = [1, 2, 3] + cq, r2 = qr_multiply(a, c) + assert_array_almost_equal(c @ q, cq) + assert_array_almost_equal(r, r2) + cq, r = qr_multiply(a, eye(3)) + assert_array_almost_equal(cq, q) + + def test_simple_tall_right_pivoting(self): + a = [[8, 2], [2, 9], [5, 3]] + q, r, jpvt = qr(a, pivoting=True, mode="economic") + c = [1, 2, 3] + cq, r, jpvt = qr_multiply(a, c, pivoting=True) + assert_array_almost_equal(c @ q, cq) + cq, r, jpvt = qr_multiply(a, eye(3), pivoting=True) + assert_array_almost_equal(cq, q) + + def test_simple_fat(self): + # full version + a = [[8, 2, 5], [2, 9, 3]] + q, r = qr(a) + assert_array_almost_equal(q.T @ q, eye(2)) + assert_array_almost_equal(q @ r, a) + assert_equal(q.shape, (2, 2)) + assert_equal(r.shape, (2, 3)) + + def test_simple_fat_pivoting(self): + # full version pivoting + a = np.asarray([[8, 2, 5], [2, 9, 3]]) + q, r, p = qr(a, pivoting=True) + d = abs(diag(r)) + assert_(np.all(d[1:] <= d[:-1])) + assert_array_almost_equal(q.T @ q, eye(2)) + assert_array_almost_equal(q @ r, a[:, p]) + assert_equal(q.shape, (2, 2)) + assert_equal(r.shape, (2, 3)) + q2, r2 = qr(a[:, p]) + assert_array_almost_equal(q, q2) + assert_array_almost_equal(r, r2) + + def test_simple_fat_e(self): + # economy version + a = [[8, 2, 3], [2, 9, 5]] + q, r = qr(a, mode='economic') + assert_array_almost_equal(q.T @ q, eye(2)) + assert_array_almost_equal(q @ r, a) + assert_equal(q.shape, (2, 2)) + assert_equal(r.shape, (2, 3)) + + def test_simple_fat_e_pivoting(self): + # economy version pivoting + a = np.asarray([[8, 2, 3], [2, 9, 5]]) + q, r, p = qr(a, pivoting=True, mode='economic') + d = abs(diag(r)) + assert_(np.all(d[1:] <= d[:-1])) + assert_array_almost_equal(q.T @ q, eye(2)) + assert_array_almost_equal(q @ r, a[:, p]) + assert_equal(q.shape, (2, 2)) + assert_equal(r.shape, (2, 3)) + q2, r2 = qr(a[:, p], mode='economic') + assert_array_almost_equal(q, q2) + assert_array_almost_equal(r, r2) + + def test_simple_fat_left(self): + a = [[8, 2, 3], [2, 9, 5]] + q, r = qr(a, mode="economic") + c = [1, 2] + qc, r2 = qr_multiply(a, c, "left") + assert_array_almost_equal(q @ c, qc) + assert_array_almost_equal(r, r2) + qc, r = qr_multiply(a, eye(2), "left") + assert_array_almost_equal(qc, q) + + def test_simple_fat_left_pivoting(self): + a = [[8, 2, 3], [2, 9, 5]] + q, r, jpvt = qr(a, mode="economic", pivoting=True) + c = [1, 2] + qc, r, jpvt = qr_multiply(a, c, "left", True) + assert_array_almost_equal(q @ c, qc) + qc, r, jpvt = qr_multiply(a, eye(2), "left", True) + assert_array_almost_equal(qc, q) + + def test_simple_fat_right(self): + a = [[8, 2, 3], [2, 9, 5]] + q, r = qr(a, mode="economic") + c = [1, 2] + cq, r2 = qr_multiply(a, c) + assert_array_almost_equal(c @ q, cq) + assert_array_almost_equal(r, r2) + cq, r = qr_multiply(a, eye(2)) + assert_array_almost_equal(cq, q) + + def test_simple_fat_right_pivoting(self): + a = [[8, 2, 3], [2, 9, 5]] + q, r, jpvt = qr(a, pivoting=True, mode="economic") + c = [1, 2] + cq, r, jpvt = qr_multiply(a, c, pivoting=True) + assert_array_almost_equal(c @ q, cq) + cq, r, jpvt = qr_multiply(a, eye(2), pivoting=True) + assert_array_almost_equal(cq, q) + + def test_simple_complex(self): + a = [[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]] + q, r = qr(a) + assert_array_almost_equal(q.conj().T @ q, eye(3)) + assert_array_almost_equal(q @ r, a) + + def test_simple_complex_left(self): + a = [[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]] + q, r = qr(a) + c = [1, 2, 3+4j] + qc, r = qr_multiply(a, c, "left") + assert_array_almost_equal(q @ c, qc) + qc, r = qr_multiply(a, eye(3), "left") + assert_array_almost_equal(q, qc) + + def test_simple_complex_right(self): + a = [[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]] + q, r = qr(a) + c = [1, 2, 3+4j] + qc, r = qr_multiply(a, c) + assert_array_almost_equal(c @ q, qc) + qc, r = qr_multiply(a, eye(3)) + assert_array_almost_equal(q, qc) + + def test_simple_tall_complex_left(self): + a = [[8, 2+3j], [2, 9], [5+7j, 3]] + q, r = qr(a, mode="economic") + c = [1, 2+2j] + qc, r2 = qr_multiply(a, c, "left") + assert_array_almost_equal(q @ c, qc) + assert_array_almost_equal(r, r2) + c = array([1, 2, 0]) + qc, r2 = qr_multiply(a, c, "left", overwrite_c=True) + assert_array_almost_equal(q @ c[:2], qc) + qc, r = qr_multiply(a, eye(2), "left") + assert_array_almost_equal(qc, q) + + def test_simple_complex_left_conjugate(self): + a = [[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]] + q, r = qr(a) + c = [1, 2, 3+4j] + qc, r = qr_multiply(a, c, "left", conjugate=True) + assert_array_almost_equal(q.conj() @ c, qc) + + def test_simple_complex_tall_left_conjugate(self): + a = [[3, 3+4j], [5, 2+2j], [3, 2]] + q, r = qr(a, mode='economic') + c = [1, 3+4j] + qc, r = qr_multiply(a, c, "left", conjugate=True) + assert_array_almost_equal(q.conj() @ c, qc) + + def test_simple_complex_right_conjugate(self): + a = [[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]] + q, r = qr(a) + c = np.array([1, 2, 3+4j]) + qc, r = qr_multiply(a, c, conjugate=True) + assert_array_almost_equal(c @ q.conj(), qc) + + def test_simple_complex_pivoting(self): + a = array([[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]]) + q, r, p = qr(a, pivoting=True) + d = abs(diag(r)) + assert_(np.all(d[1:] <= d[:-1])) + assert_array_almost_equal(q.conj().T @ q, eye(3)) + assert_array_almost_equal(q @ r, a[:, p]) + q2, r2 = qr(a[:, p]) + assert_array_almost_equal(q, q2) + assert_array_almost_equal(r, r2) + + def test_simple_complex_left_pivoting(self): + a = array([[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]]) + q, r, jpvt = qr(a, pivoting=True) + c = [1, 2, 3+4j] + qc, r, jpvt = qr_multiply(a, c, "left", True) + assert_array_almost_equal(q @ c, qc) + + def test_simple_complex_right_pivoting(self): + a = array([[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]]) + q, r, jpvt = qr(a, pivoting=True) + c = [1, 2, 3+4j] + qc, r, jpvt = qr_multiply(a, c, pivoting=True) + assert_array_almost_equal(c @ q, qc) + + def test_random(self): + rng = np.random.RandomState(1234) + n = 20 + for k in range(2): + a = rng.random([n, n]) + q, r = qr(a) + assert_array_almost_equal(q.T @ q, eye(n)) + assert_array_almost_equal(q @ r, a) + + def test_random_left(self): + rng = np.random.RandomState(1234) + n = 20 + for k in range(2): + a = rng.random([n, n]) + q, r = qr(a) + c = rng.random([n]) + qc, r = qr_multiply(a, c, "left") + assert_array_almost_equal(q @ c, qc) + qc, r = qr_multiply(a, eye(n), "left") + assert_array_almost_equal(q, qc) + + def test_random_right(self): + rng = np.random.RandomState(1234) + n = 20 + for k in range(2): + a = rng.random([n, n]) + q, r = qr(a) + c = rng.random([n]) + cq, r = qr_multiply(a, c) + assert_array_almost_equal(c @ q, cq) + cq, r = qr_multiply(a, eye(n)) + assert_array_almost_equal(q, cq) + + def test_random_pivoting(self): + rng = np.random.RandomState(1234) + n = 20 + for k in range(2): + a = rng.random([n, n]) + q, r, p = qr(a, pivoting=True) + d = abs(diag(r)) + assert_(np.all(d[1:] <= d[:-1])) + assert_array_almost_equal(q.T @ q, eye(n)) + assert_array_almost_equal(q @ r, a[:, p]) + q2, r2 = qr(a[:, p]) + assert_array_almost_equal(q, q2) + assert_array_almost_equal(r, r2) + + def test_random_tall(self): + rng = np.random.RandomState(1234) + # full version + m = 200 + n = 100 + for k in range(2): + a = rng.random([m, n]) + q, r = qr(a) + assert_array_almost_equal(q.T @ q, eye(m)) + assert_array_almost_equal(q @ r, a) + + def test_random_tall_left(self): + rng = np.random.RandomState(1234) + # full version + m = 200 + n = 100 + for k in range(2): + a = rng.random([m, n]) + q, r = qr(a, mode="economic") + c = rng.random([n]) + qc, r = qr_multiply(a, c, "left") + assert_array_almost_equal(q @ c, qc) + qc, r = qr_multiply(a, eye(n), "left") + assert_array_almost_equal(qc, q) + + def test_random_tall_right(self): + rng = np.random.RandomState(1234) + # full version + m = 200 + n = 100 + for k in range(2): + a = rng.random([m, n]) + q, r = qr(a, mode="economic") + c = rng.random([m]) + cq, r = qr_multiply(a, c) + assert_array_almost_equal(c @ q, cq) + cq, r = qr_multiply(a, eye(m)) + assert_array_almost_equal(cq, q) + + def test_random_tall_pivoting(self): + rng = np.random.RandomState(1234) + # full version pivoting + m = 200 + n = 100 + for k in range(2): + a = rng.random([m, n]) + q, r, p = qr(a, pivoting=True) + d = abs(diag(r)) + assert_(np.all(d[1:] <= d[:-1])) + assert_array_almost_equal(q.T @ q, eye(m)) + assert_array_almost_equal(q @ r, a[:, p]) + q2, r2 = qr(a[:, p]) + assert_array_almost_equal(q, q2) + assert_array_almost_equal(r, r2) + + def test_random_tall_e(self): + rng = np.random.RandomState(1234) + # economy version + m = 200 + n = 100 + for k in range(2): + a = rng.random([m, n]) + q, r = qr(a, mode='economic') + assert_array_almost_equal(q.T @ q, eye(n)) + assert_array_almost_equal(q @ r, a) + assert_equal(q.shape, (m, n)) + assert_equal(r.shape, (n, n)) + + def test_random_tall_e_pivoting(self): + rng = np.random.RandomState(1234) + # economy version pivoting + m = 200 + n = 100 + for k in range(2): + a = rng.random([m, n]) + q, r, p = qr(a, pivoting=True, mode='economic') + d = abs(diag(r)) + assert_(np.all(d[1:] <= d[:-1])) + assert_array_almost_equal(q.T @ q, eye(n)) + assert_array_almost_equal(q @ r, a[:, p]) + assert_equal(q.shape, (m, n)) + assert_equal(r.shape, (n, n)) + q2, r2 = qr(a[:, p], mode='economic') + assert_array_almost_equal(q, q2) + assert_array_almost_equal(r, r2) + + def test_random_trap(self): + rng = np.random.RandomState(1234) + m = 100 + n = 200 + for k in range(2): + a = rng.random([m, n]) + q, r = qr(a) + assert_array_almost_equal(q.T @ q, eye(m)) + assert_array_almost_equal(q @ r, a) + + def test_random_trap_pivoting(self): + rng = np.random.RandomState(1234) + m = 100 + n = 200 + for k in range(2): + a = rng.random([m, n]) + q, r, p = qr(a, pivoting=True) + d = abs(diag(r)) + assert_(np.all(d[1:] <= d[:-1])) + assert_array_almost_equal(q.T @ q, eye(m)) + assert_array_almost_equal(q @ r, a[:, p]) + q2, r2 = qr(a[:, p]) + assert_array_almost_equal(q, q2) + assert_array_almost_equal(r, r2) + + def test_random_complex(self): + rng = np.random.RandomState(1234) + n = 20 + for k in range(2): + a = rng.random([n, n]) + 1j*rng.random([n, n]) + q, r = qr(a) + assert_array_almost_equal(q.conj().T @ q, eye(n)) + assert_array_almost_equal(q @ r, a) + + def test_random_complex_left(self): + rng = np.random.RandomState(1234) + n = 20 + for k in range(2): + a = rng.random([n, n]) + 1j*rng.random([n, n]) + q, r = qr(a) + c = rng.random([n]) + 1j*rng.random([n]) + qc, r = qr_multiply(a, c, "left") + assert_array_almost_equal(q @ c, qc) + qc, r = qr_multiply(a, eye(n), "left") + assert_array_almost_equal(q, qc) + + def test_random_complex_right(self): + rng = np.random.RandomState(1234) + n = 20 + for k in range(2): + a = rng.random([n, n]) + 1j*rng.random([n, n]) + q, r = qr(a) + c = rng.random([n]) + 1j*rng.random([n]) + cq, r = qr_multiply(a, c) + assert_array_almost_equal(c @ q, cq) + cq, r = qr_multiply(a, eye(n)) + assert_array_almost_equal(q, cq) + + def test_random_complex_pivoting(self): + rng = np.random.RandomState(1234) + n = 20 + for k in range(2): + a = rng.random([n, n]) + 1j*rng.random([n, n]) + q, r, p = qr(a, pivoting=True) + d = abs(diag(r)) + assert_(np.all(d[1:] <= d[:-1])) + assert_array_almost_equal(q.conj().T @ q, eye(n)) + assert_array_almost_equal(q @ r, a[:, p]) + q2, r2 = qr(a[:, p]) + assert_array_almost_equal(q, q2) + assert_array_almost_equal(r, r2) + + def test_check_finite(self): + a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]] + q, r = qr(a, check_finite=False) + assert_array_almost_equal(q.T @ q, eye(3)) + assert_array_almost_equal(q @ r, a) + + def test_lwork(self): + a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]] + # Get comparison values + q, r = qr(a, lwork=None) + + # Test against minimum valid lwork + q2, r2 = qr(a, lwork=3) + assert_array_almost_equal(q2, q) + assert_array_almost_equal(r2, r) + + # Test against larger lwork + q3, r3 = qr(a, lwork=10) + assert_array_almost_equal(q3, q) + assert_array_almost_equal(r3, r) + + # Test against explicit lwork=-1 + q4, r4 = qr(a, lwork=-1) + assert_array_almost_equal(q4, q) + assert_array_almost_equal(r4, r) + + # Test against invalid lwork + assert_raises(Exception, qr, (a,), {'lwork': 0}) + assert_raises(Exception, qr, (a,), {'lwork': 2}) + + +class TestRQ: + def test_simple(self): + a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]] + r, q = rq(a) + assert_array_almost_equal(q @ q.T, eye(3)) + assert_array_almost_equal(r @ q, a) + + def test_r(self): + a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]] + r, q = rq(a) + r2 = rq(a, mode='r') + assert_array_almost_equal(r, r2) + + def test_random(self): + rng = np.random.RandomState(1234) + n = 20 + for k in range(2): + a = rng.random([n, n]) + r, q = rq(a) + assert_array_almost_equal(q @ q.T, eye(n)) + assert_array_almost_equal(r @ q, a) + + def test_simple_trap(self): + a = [[8, 2, 3], [2, 9, 3]] + r, q = rq(a) + assert_array_almost_equal(q.T @ q, eye(3)) + assert_array_almost_equal(r @ q, a) + + def test_simple_tall(self): + a = [[8, 2], [2, 9], [5, 3]] + r, q = rq(a) + assert_array_almost_equal(q.T @ q, eye(2)) + assert_array_almost_equal(r @ q, a) + + def test_simple_fat(self): + a = [[8, 2, 5], [2, 9, 3]] + r, q = rq(a) + assert_array_almost_equal(q @ q.T, eye(3)) + assert_array_almost_equal(r @ q, a) + + def test_simple_complex(self): + a = [[3, 3+4j, 5], [5, 2, 2+7j], [3, 2, 7]] + r, q = rq(a) + assert_array_almost_equal(q @ q.conj().T, eye(3)) + assert_array_almost_equal(r @ q, a) + + def test_random_tall(self): + rng = np.random.RandomState(1234) + m = 200 + n = 100 + for k in range(2): + a = rng.random([m, n]) + r, q = rq(a) + assert_array_almost_equal(q @ q.T, eye(n)) + assert_array_almost_equal(r @ q, a) + + def test_random_trap(self): + rng = np.random.RandomState(1234) + m = 100 + n = 200 + for k in range(2): + a = rng.random([m, n]) + r, q = rq(a) + assert_array_almost_equal(q @ q.T, eye(n)) + assert_array_almost_equal(r @ q, a) + + def test_random_trap_economic(self): + rng = np.random.RandomState(1234) + m = 100 + n = 200 + for k in range(2): + a = rng.random([m, n]) + r, q = rq(a, mode='economic') + assert_array_almost_equal(q @ q.T, eye(m)) + assert_array_almost_equal(r @ q, a) + assert_equal(q.shape, (m, n)) + assert_equal(r.shape, (m, m)) + + def test_random_complex(self): + rng = np.random.RandomState(1234) + n = 20 + for k in range(2): + a = rng.random([n, n]) + 1j*rng.random([n, n]) + r, q = rq(a) + assert_array_almost_equal(q @ q.conj().T, eye(n)) + assert_array_almost_equal(r @ q, a) + + def test_random_complex_economic(self): + rng = np.random.RandomState(1234) + m = 100 + n = 200 + for k in range(2): + a = rng.random([m, n]) + 1j*rng.random([m, n]) + r, q = rq(a, mode='economic') + assert_array_almost_equal(q @ q.conj().T, eye(m)) + assert_array_almost_equal(r @ q, a) + assert_equal(q.shape, (m, n)) + assert_equal(r.shape, (m, m)) + + def test_check_finite(self): + a = [[8, 2, 3], [2, 9, 3], [5, 3, 6]] + r, q = rq(a, check_finite=False) + assert_array_almost_equal(q @ q.T, eye(3)) + assert_array_almost_equal(r @ q, a) + + +class TestSchur: + + def check_schur(self, a, t, u, rtol, atol): + # Check that the Schur decomposition is correct. + assert_allclose(u @ t @ u.conj().T, a, rtol=rtol, atol=atol, + err_msg="Schur decomposition does not match 'a'") + # The expected value of u @ u.H - I is all zeros, so test + # with absolute tolerance only. + assert_allclose(u @ u.conj().T - np.eye(len(u)), 0, rtol=0, atol=atol, + err_msg="u is not unitary") + + def test_simple(self): + a = [[8, 12, 3], [2, 9, 3], [10, 3, 6]] + t, z = schur(a) + self.check_schur(a, t, z, rtol=1e-14, atol=5e-15) + tc, zc = schur(a, 'complex') + assert_(np.any(ravel(iscomplex(zc))) and np.any(ravel(iscomplex(tc)))) + self.check_schur(a, tc, zc, rtol=1e-14, atol=5e-15) + tc2, zc2 = rsf2csf(tc, zc) + self.check_schur(a, tc2, zc2, rtol=1e-14, atol=5e-15) + + @pytest.mark.parametrize( + 'sort, expected_diag', + [('lhp', [-np.sqrt(2), -0.5, np.sqrt(2), 0.5]), + ('rhp', [np.sqrt(2), 0.5, -np.sqrt(2), -0.5]), + ('iuc', [-0.5, 0.5, np.sqrt(2), -np.sqrt(2)]), + ('ouc', [np.sqrt(2), -np.sqrt(2), -0.5, 0.5]), + (lambda x: x >= 0.0, [np.sqrt(2), 0.5, -np.sqrt(2), -0.5])] + ) + def test_sort(self, sort, expected_diag): + # The exact eigenvalues of this matrix are + # -sqrt(2), sqrt(2), -1/2, 1/2. + a = [[4., 3., 1., -1.], + [-4.5, -3.5, -1., 1.], + [9., 6., -4., 4.5], + [6., 4., -3., 3.5]] + t, u, sdim = schur(a, sort=sort) + self.check_schur(a, t, u, rtol=1e-14, atol=5e-15) + assert_allclose(np.diag(t), expected_diag, rtol=1e-12) + assert_equal(2, sdim) + + def test_sort_errors(self): + a = [[4., 3., 1., -1.], + [-4.5, -3.5, -1., 1.], + [9., 6., -4., 4.5], + [6., 4., -3., 3.5]] + assert_raises(ValueError, schur, a, sort='unsupported') + assert_raises(ValueError, schur, a, sort=1) + + def test_check_finite(self): + a = [[8, 12, 3], [2, 9, 3], [10, 3, 6]] + t, z = schur(a, check_finite=False) + assert_array_almost_equal(z @ t @ z.conj().T, a) + + +class TestHessenberg: + + def test_simple(self): + a = [[-149, -50, -154], + [537, 180, 546], + [-27, -9, -25]] + h1 = [[-149.0000, 42.2037, -156.3165], + [-537.6783, 152.5511, -554.9272], + [0, 0.0728, 2.4489]] + h, q = hessenberg(a, calc_q=1) + assert_array_almost_equal(q.T @ a @ q, h) + assert_array_almost_equal(h, h1, decimal=4) + + def test_simple_complex(self): + a = [[-149, -50, -154], + [537, 180j, 546], + [-27j, -9, -25]] + h, q = hessenberg(a, calc_q=1) + assert_array_almost_equal(q.conj().T @ a @ q, h) + + def test_simple2(self): + a = [[1, 2, 3, 4, 5, 6, 7], + [0, 2, 3, 4, 6, 7, 2], + [0, 2, 2, 3, 0, 3, 2], + [0, 0, 2, 8, 0, 0, 2], + [0, 3, 1, 2, 0, 1, 2], + [0, 1, 2, 3, 0, 1, 0], + [0, 0, 0, 0, 0, 1, 2]] + h, q = hessenberg(a, calc_q=1) + assert_array_almost_equal(q.T @ a @ q, h) + + def test_simple3(self): + a = np.eye(3) + a[-1, 0] = 2 + h, q = hessenberg(a, calc_q=1) + assert_array_almost_equal(q.T @ a @ q, h) + + def test_random(self): + rng = np.random.RandomState(1234) + n = 20 + for k in range(2): + a = rng.random([n, n]) + h, q = hessenberg(a, calc_q=1) + assert_array_almost_equal(q.T @ a @ q, h) + + def test_random_complex(self): + rng = np.random.RandomState(1234) + n = 20 + for k in range(2): + a = rng.random([n, n]) + 1j*rng.random([n, n]) + h, q = hessenberg(a, calc_q=1) + assert_array_almost_equal(q.conj().T @ a @ q, h) + + def test_check_finite(self): + a = [[-149, -50, -154], + [537, 180, 546], + [-27, -9, -25]] + h1 = [[-149.0000, 42.2037, -156.3165], + [-537.6783, 152.5511, -554.9272], + [0, 0.0728, 2.4489]] + h, q = hessenberg(a, calc_q=1, check_finite=False) + assert_array_almost_equal(q.T @ a @ q, h) + assert_array_almost_equal(h, h1, decimal=4) + + def test_2x2(self): + a = [[2, 1], [7, 12]] + + h, q = hessenberg(a, calc_q=1) + assert_array_almost_equal(q, np.eye(2)) + assert_array_almost_equal(h, a) + + b = [[2-7j, 1+2j], [7+3j, 12-2j]] + h2, q2 = hessenberg(b, calc_q=1) + assert_array_almost_equal(q2, np.eye(2)) + assert_array_almost_equal(h2, b) + + +blas_provider = blas_version = None +if CONFIG is not None: + blas_provider = CONFIG['Build Dependencies']['blas']['name'] + blas_version = CONFIG['Build Dependencies']['blas']['version'] + + +class TestQZ: + @pytest.mark.xfail( + sys.platform == 'darwin' and + blas_provider == 'openblas' and + blas_version < "0.3.21.dev", + reason="gges[float32] broken for OpenBLAS on macOS, see gh-16949" + ) + def test_qz_single(self): + rng = np.random.RandomState(12345) + n = 5 + A = rng.random([n, n]).astype(float32) + B = rng.random([n, n]).astype(float32) + AA, BB, Q, Z = qz(A, B) + assert_array_almost_equal(Q @ AA @ Z.T, A, decimal=5) + assert_array_almost_equal(Q @ BB @ Z.T, B, decimal=5) + assert_array_almost_equal(Q @ Q.T, eye(n), decimal=5) + assert_array_almost_equal(Z @ Z.T, eye(n), decimal=5) + assert_(np.all(diag(BB) >= 0)) + + def test_qz_double(self): + rng = np.random.RandomState(12345) + n = 5 + A = rng.random([n, n]) + B = rng.random([n, n]) + AA, BB, Q, Z = qz(A, B) + assert_array_almost_equal(Q @ AA @ Z.T, A) + assert_array_almost_equal(Q @ BB @ Z.T, B) + assert_array_almost_equal(Q @ Q.T, eye(n)) + assert_array_almost_equal(Z @ Z.T, eye(n)) + assert_(np.all(diag(BB) >= 0)) + + def test_qz_complex(self): + rng = np.random.RandomState(12345) + n = 5 + A = rng.random([n, n]) + 1j*rng.random([n, n]) + B = rng.random([n, n]) + 1j*rng.random([n, n]) + AA, BB, Q, Z = qz(A, B) + assert_array_almost_equal(Q @ AA @ Z.conj().T, A) + assert_array_almost_equal(Q @ BB @ Z.conj().T, B) + assert_array_almost_equal(Q @ Q.conj().T, eye(n)) + assert_array_almost_equal(Z @ Z.conj().T, eye(n)) + assert_(np.all(diag(BB) >= 0)) + assert_(np.all(diag(BB).imag == 0)) + + def test_qz_complex64(self): + rng = np.random.RandomState(12345) + n = 5 + A = (rng.random([n, n]) + 1j*rng.random([n, n])).astype(complex64) + B = (rng.random([n, n]) + 1j*rng.random([n, n])).astype(complex64) + AA, BB, Q, Z = qz(A, B) + assert_array_almost_equal(Q @ AA @ Z.conj().T, A, decimal=5) + assert_array_almost_equal(Q @ BB @ Z.conj().T, B, decimal=5) + assert_array_almost_equal(Q @ Q.conj().T, eye(n), decimal=5) + assert_array_almost_equal(Z @ Z.conj().T, eye(n), decimal=5) + assert_(np.all(diag(BB) >= 0)) + assert_(np.all(diag(BB).imag == 0)) + + def test_qz_double_complex(self): + rng = np.random.RandomState(12345) + n = 5 + A = rng.random([n, n]) + B = rng.random([n, n]) + AA, BB, Q, Z = qz(A, B, output='complex') + aa = Q @ AA @ Z.conj().T + assert_array_almost_equal(aa.real, A) + assert_array_almost_equal(aa.imag, 0) + bb = Q @ BB @ Z.conj().T + assert_array_almost_equal(bb.real, B) + assert_array_almost_equal(bb.imag, 0) + assert_array_almost_equal(Q @ Q.conj().T, eye(n)) + assert_array_almost_equal(Z @ Z.conj().T, eye(n)) + assert_(np.all(diag(BB) >= 0)) + + def test_qz_double_sort(self): + # from https://www.nag.com/lapack-ex/node119.html + # NOTE: These matrices may be ill-conditioned and lead to a + # seg fault on certain python versions when compiled with + # sse2 or sse3 older ATLAS/LAPACK binaries for windows + # A = np.array([[3.9, 12.5, -34.5, -0.5], + # [ 4.3, 21.5, -47.5, 7.5], + # [ 4.3, 21.5, -43.5, 3.5], + # [ 4.4, 26.0, -46.0, 6.0 ]]) + + # B = np.array([[ 1.0, 2.0, -3.0, 1.0], + # [1.0, 3.0, -5.0, 4.0], + # [1.0, 3.0, -4.0, 3.0], + # [1.0, 3.0, -4.0, 4.0]]) + A = np.array([[3.9, 12.5, -34.5, 2.5], + [4.3, 21.5, -47.5, 7.5], + [4.3, 1.5, -43.5, 3.5], + [4.4, 6.0, -46.0, 6.0]]) + + B = np.array([[1.0, 1.0, -3.0, 1.0], + [1.0, 3.0, -5.0, 4.4], + [1.0, 2.0, -4.0, 1.0], + [1.2, 3.0, -4.0, 4.0]]) + + assert_raises(ValueError, qz, A, B, sort=lambda ar, ai, beta: ai == 0) + if False: + AA, BB, Q, Z, sdim = qz(A, B, sort=lambda ar, ai, beta: ai == 0) + # assert_(sdim == 2) + assert_(sdim == 4) + assert_array_almost_equal(Q @ AA @ Z.T, A) + assert_array_almost_equal(Q @ BB @ Z.T, B) + + # test absolute values bc the sign is ambiguous and + # might be platform dependent + assert_array_almost_equal(np.abs(AA), np.abs(np.array( + [[35.7864, -80.9061, -12.0629, -9.498], + [0., 2.7638, -2.3505, 7.3256], + [0., 0., 0.6258, -0.0398], + [0., 0., 0., -12.8217]])), 4) + assert_array_almost_equal(np.abs(BB), np.abs(np.array( + [[4.5324, -8.7878, 3.2357, -3.5526], + [0., 1.4314, -2.1894, 0.9709], + [0., 0., 1.3126, -0.3468], + [0., 0., 0., 0.559]])), 4) + assert_array_almost_equal(np.abs(Q), np.abs(np.array( + [[-0.4193, -0.605, -0.1894, -0.6498], + [-0.5495, 0.6987, 0.2654, -0.3734], + [-0.4973, -0.3682, 0.6194, 0.4832], + [-0.5243, 0.1008, -0.7142, 0.4526]])), 4) + assert_array_almost_equal(np.abs(Z), np.abs(np.array( + [[-0.9471, -0.2971, -0.1217, 0.0055], + [-0.0367, 0.1209, 0.0358, 0.9913], + [0.3171, -0.9041, -0.2547, 0.1312], + [0.0346, 0.2824, -0.9587, 0.0014]])), 4) + + # test absolute values bc the sign is ambiguous and might be platform + # dependent + # assert_array_almost_equal(abs(AA), abs(np.array([ + # [3.8009, -69.4505, 50.3135, -43.2884], + # [0.0000, 9.2033, -0.2001, 5.9881], + # [0.0000, 0.0000, 1.4279, 4.4453], + # [0.0000, 0.0000, 0.9019, -1.1962]])), 4) + # assert_array_almost_equal(abs(BB), abs(np.array([ + # [1.9005, -10.2285, 0.8658, -5.2134], + # [0.0000, 2.3008, 0.7915, 0.4262], + # [0.0000, 0.0000, 0.8101, 0.0000], + # [0.0000, 0.0000, 0.0000, -0.2823]])), 4) + # assert_array_almost_equal(abs(Q), abs(np.array([ + # [0.4642, 0.7886, 0.2915, -0.2786], + # [0.5002, -0.5986, 0.5638, -0.2713], + # [0.5002, 0.0154, -0.0107, 0.8657], + # [0.5331, -0.1395, -0.7727, -0.3151]])), 4) + # assert_array_almost_equal(dot(Q,Q.T), eye(4)) + # assert_array_almost_equal(abs(Z), abs(np.array([ + # [0.9961, -0.0014, 0.0887, -0.0026], + # [0.0057, -0.0404, -0.0938, -0.9948], + # [0.0626, 0.7194, -0.6908, 0.0363], + # [0.0626, -0.6934, -0.7114, 0.0956]])), 4) + # assert_array_almost_equal(dot(Z,Z.T), eye(4)) + + # def test_qz_complex_sort(self): + # cA = np.array([ + # [-21.10+22.50*1j, 53.50+-50.50*1j, -34.50+127.50*1j, 7.50+ 0.50*1j], + # [-0.46+ -7.78*1j, -3.50+-37.50*1j, -15.50+ 58.50*1j,-10.50+ -1.50*1j], + # [ 4.30+ -5.50*1j, 39.70+-17.10*1j, -68.50+ 12.50*1j, -7.50+ -3.50*1j], + # [ 5.50+ 4.40*1j, 14.40+ 43.30*1j, -32.50+-46.00*1j,-19.00+-32.50*1j]]) + + # cB = np.array([ + # [1.00+ -5.00*1j, 1.60+ 1.20*1j,-3.00+ 0.00*1j, 0.00+ -1.00*1j], + # [0.80+ -0.60*1j, 3.00+ -5.00*1j,-4.00+ 3.00*1j,-2.40+ -3.20*1j], + # [1.00+ 0.00*1j, 2.40+ 1.80*1j,-4.00+ -5.00*1j, 0.00+ -3.00*1j], + # [0.00+ 1.00*1j,-1.80+ 2.40*1j, 0.00+ -4.00*1j, 4.00+ -5.00*1j]]) + + # AAS,BBS,QS,ZS,sdim = qz(cA,cB,sort='lhp') + + # eigenvalues = diag(AAS)/diag(BBS) + # assert_(np.all(np.real(eigenvalues[:sdim] < 0))) + # assert_(np.all(np.real(eigenvalues[sdim:] > 0))) + + def test_check_finite(self): + rng = np.random.RandomState(12345) + n = 5 + A = rng.random([n, n]) + B = rng.random([n, n]) + AA, BB, Q, Z = qz(A, B, check_finite=False) + assert_array_almost_equal(Q @ AA @ Z.T, A) + assert_array_almost_equal(Q @ BB @ Z.T, B) + assert_array_almost_equal(Q @ Q.T, eye(n)) + assert_array_almost_equal(Z @ Z.T, eye(n)) + assert_(np.all(diag(BB) >= 0)) + + +class TestOrdQZ: + @classmethod + def setup_class(cls): + # https://www.nag.com/lapack-ex/node119.html + A1 = np.array([[-21.10 - 22.50j, 53.5 - 50.5j, -34.5 + 127.5j, + 7.5 + 0.5j], + [-0.46 - 7.78j, -3.5 - 37.5j, -15.5 + 58.5j, + -10.5 - 1.5j], + [4.30 - 5.50j, 39.7 - 17.1j, -68.5 + 12.5j, + -7.5 - 3.5j], + [5.50 + 4.40j, 14.4 + 43.3j, -32.5 - 46.0j, + -19.0 - 32.5j]]) + + B1 = np.array([[1.0 - 5.0j, 1.6 + 1.2j, -3 + 0j, 0.0 - 1.0j], + [0.8 - 0.6j, .0 - 5.0j, -4 + 3j, -2.4 - 3.2j], + [1.0 + 0.0j, 2.4 + 1.8j, -4 - 5j, 0.0 - 3.0j], + [0.0 + 1.0j, -1.8 + 2.4j, 0 - 4j, 4.0 - 5.0j]]) + + # https://www.nag.com/numeric/fl/nagdoc_fl23/xhtml/F08/f08yuf.xml + A2 = np.array([[3.9, 12.5, -34.5, -0.5], + [4.3, 21.5, -47.5, 7.5], + [4.3, 21.5, -43.5, 3.5], + [4.4, 26.0, -46.0, 6.0]]) + + B2 = np.array([[1, 2, -3, 1], + [1, 3, -5, 4], + [1, 3, -4, 3], + [1, 3, -4, 4]]) + + # example with the eigenvalues + # -0.33891648, 1.61217396+0.74013521j, 1.61217396-0.74013521j, + # 0.61244091 + # thus featuring: + # * one complex conjugate eigenvalue pair, + # * one eigenvalue in the lhp + # * 2 eigenvalues in the unit circle + # * 2 non-real eigenvalues + A3 = np.array([[5., 1., 3., 3.], + [4., 4., 2., 7.], + [7., 4., 1., 3.], + [0., 4., 8., 7.]]) + B3 = np.array([[8., 10., 6., 10.], + [7., 7., 2., 9.], + [9., 1., 6., 6.], + [5., 1., 4., 7.]]) + + # example with infinite eigenvalues + A4 = np.eye(2) + B4 = np.diag([0, 1]) + + # example with (alpha, beta) = (0, 0) + A5 = np.diag([1, 0]) + + cls.A = [A1, A2, A3, A4, A5] + cls.B = [B1, B2, B3, B4, A5] + + def qz_decomp(self, sort): + with np.errstate(all='raise'): + ret = [ordqz(Ai, Bi, sort=sort) for Ai, Bi in zip(self.A, self.B)] + return tuple(ret) + + def check(self, A, B, sort, AA, BB, alpha, beta, Q, Z): + Id = np.eye(*A.shape) + # make sure Q and Z are orthogonal + assert_array_almost_equal(Q @ Q.T.conj(), Id) + assert_array_almost_equal(Z @ Z.T.conj(), Id) + # check factorization + assert_array_almost_equal(Q @ AA, A @ Z) + assert_array_almost_equal(Q @ BB, B @ Z) + # check shape of AA and BB + assert_array_equal(np.tril(AA, -2), np.zeros(AA.shape)) + assert_array_equal(np.tril(BB, -1), np.zeros(BB.shape)) + # check eigenvalues + for i in range(A.shape[0]): + # does the current diagonal element belong to a 2-by-2 block + # that was already checked? + if i > 0 and A[i, i - 1] != 0: + continue + # take care of 2-by-2 blocks + if i < AA.shape[0] - 1 and AA[i + 1, i] != 0: + evals, _ = eig(AA[i:i + 2, i:i + 2], BB[i:i + 2, i:i + 2]) + # make sure the pair of complex conjugate eigenvalues + # is ordered consistently (positive imaginary part first) + if evals[0].imag < 0: + evals = evals[[1, 0]] + tmp = alpha[i:i + 2]/beta[i:i + 2] + if tmp[0].imag < 0: + tmp = tmp[[1, 0]] + assert_array_almost_equal(evals, tmp) + else: + if alpha[i] == 0 and beta[i] == 0: + assert_equal(AA[i, i], 0) + assert_equal(BB[i, i], 0) + elif beta[i] == 0: + assert_equal(BB[i, i], 0) + else: + assert_almost_equal(AA[i, i]/BB[i, i], alpha[i]/beta[i]) + sortfun = _select_function(sort) + lastsort = True + for i in range(A.shape[0]): + cursort = sortfun(np.array([alpha[i]]), np.array([beta[i]])) + # once the sorting criterion was not matched all subsequent + # eigenvalues also shouldn't match + if not lastsort: + assert not cursort + lastsort = cursort + + def check_all(self, sort): + ret = self.qz_decomp(sort) + + for reti, Ai, Bi in zip(ret, self.A, self.B): + self.check(Ai, Bi, sort, *reti) + + def test_lhp(self): + self.check_all('lhp') + + def test_rhp(self): + self.check_all('rhp') + + def test_iuc(self): + self.check_all('iuc') + + def test_ouc(self): + self.check_all('ouc') + + def test_ref(self): + # real eigenvalues first (top-left corner) + def sort(x, y): + out = np.empty_like(x, dtype=bool) + nonzero = (y != 0) + out[~nonzero] = False + out[nonzero] = (x[nonzero]/y[nonzero]).imag == 0 + return out + + self.check_all(sort) + + def test_cef(self): + # complex eigenvalues first (top-left corner) + def sort(x, y): + out = np.empty_like(x, dtype=bool) + nonzero = (y != 0) + out[~nonzero] = False + out[nonzero] = (x[nonzero]/y[nonzero]).imag != 0 + return out + + self.check_all(sort) + + def test_diff_input_types(self): + ret = ordqz(self.A[1], self.B[2], sort='lhp') + self.check(self.A[1], self.B[2], 'lhp', *ret) + + ret = ordqz(self.B[2], self.A[1], sort='lhp') + self.check(self.B[2], self.A[1], 'lhp', *ret) + + def test_sort_explicit(self): + # Test order of the eigenvalues in the 2 x 2 case where we can + # explicitly compute the solution + A1 = np.eye(2) + B1 = np.diag([-2, 0.5]) + expected1 = [('lhp', [-0.5, 2]), + ('rhp', [2, -0.5]), + ('iuc', [-0.5, 2]), + ('ouc', [2, -0.5])] + A2 = np.eye(2) + B2 = np.diag([-2 + 1j, 0.5 + 0.5j]) + expected2 = [('lhp', [1/(-2 + 1j), 1/(0.5 + 0.5j)]), + ('rhp', [1/(0.5 + 0.5j), 1/(-2 + 1j)]), + ('iuc', [1/(-2 + 1j), 1/(0.5 + 0.5j)]), + ('ouc', [1/(0.5 + 0.5j), 1/(-2 + 1j)])] + # 'lhp' is ambiguous so don't test it + A3 = np.eye(2) + B3 = np.diag([2, 0]) + expected3 = [('rhp', [0.5, np.inf]), + ('iuc', [0.5, np.inf]), + ('ouc', [np.inf, 0.5])] + # 'rhp' is ambiguous so don't test it + A4 = np.eye(2) + B4 = np.diag([-2, 0]) + expected4 = [('lhp', [-0.5, np.inf]), + ('iuc', [-0.5, np.inf]), + ('ouc', [np.inf, -0.5])] + A5 = np.diag([0, 1]) + B5 = np.diag([0, 0.5]) + # 'lhp' and 'iuc' are ambiguous so don't test them + expected5 = [('rhp', [2, np.nan]), + ('ouc', [2, np.nan])] + + A = [A1, A2, A3, A4, A5] + B = [B1, B2, B3, B4, B5] + expected = [expected1, expected2, expected3, expected4, expected5] + for Ai, Bi, expectedi in zip(A, B, expected): + for sortstr, expected_eigvals in expectedi: + _, _, alpha, beta, _, _ = ordqz(Ai, Bi, sort=sortstr) + azero = (alpha == 0) + bzero = (beta == 0) + x = np.empty_like(alpha) + x[azero & bzero] = np.nan + x[~azero & bzero] = np.inf + x[~bzero] = alpha[~bzero]/beta[~bzero] + assert_allclose(expected_eigvals, x) + + +class TestOrdQZWorkspaceSize: + def test_decompose(self): + rng = np.random.RandomState(12345) + N = 202 + # raises error if lwork parameter to dtrsen is too small + for ddtype in [np.float32, np.float64]: + A = rng.random((N, N)).astype(ddtype) + B = rng.random((N, N)).astype(ddtype) + # sort = lambda ar, ai, b: ar**2 + ai**2 < b**2 + _ = ordqz(A, B, sort=lambda alpha, beta: alpha < beta, + output='real') + + for ddtype in [np.complex128, np.complex64]: + A = rng.random((N, N)).astype(ddtype) + B = rng.random((N, N)).astype(ddtype) + _ = ordqz(A, B, sort=lambda alpha, beta: alpha < beta, + output='complex') + + @pytest.mark.slow + def test_decompose_ouc(self): + rng = np.random.RandomState(12345) + N = 202 + # segfaults if lwork parameter to dtrsen is too small + for ddtype in [np.float32, np.float64, np.complex128, np.complex64]: + A = rng.random((N, N)).astype(ddtype) + B = rng.random((N, N)).astype(ddtype) + S, T, alpha, beta, U, V = ordqz(A, B, sort='ouc') + + +class TestDatacopied: + + def test_datacopied(self): + from scipy.linalg._decomp import _datacopied + + M = matrix([[0, 1], [2, 3]]) + A = asarray(M) + L = M.tolist() + M2 = M.copy() + + class Fake1: + def __array__(self, dtype=None, copy=None): + return A + + class Fake2: + __array_interface__ = A.__array_interface__ + + F1 = Fake1() + F2 = Fake2() + + for item, status in [(M, False), (A, False), (L, True), + (M2, False), (F1, False), (F2, False)]: + arr = asarray(item) + assert_equal(_datacopied(arr, item), status, + err_msg=repr(item)) + + +def test_aligned_mem_float(): + """Check linalg works with non-aligned memory (float32)""" + # Allocate 402 bytes of memory (allocated on boundary) + a = arange(402, dtype=np.uint8) + + # Create an array with boundary offset 4 + z = np.frombuffer(a.data, offset=2, count=100, dtype=float32) + z.shape = 10, 10 + + eig(z, overwrite_a=True) + eig(z.T, overwrite_a=True) + + +@pytest.mark.skipif(platform.machine() == 'ppc64le', + reason="crashes on ppc64le") +def test_aligned_mem(): + """Check linalg works with non-aligned memory (float64)""" + # Allocate 804 bytes of memory (allocated on boundary) + a = arange(804, dtype=np.uint8) + + # Create an array with boundary offset 4 + z = np.frombuffer(a.data, offset=4, count=100, dtype=float) + z.shape = 10, 10 + + eig(z, overwrite_a=True) + eig(z.T, overwrite_a=True) + + +def test_aligned_mem_complex(): + """Check that complex objects don't need to be completely aligned""" + # Allocate 1608 bytes of memory (allocated on boundary) + a = zeros(1608, dtype=np.uint8) + + # Create an array with boundary offset 8 + z = np.frombuffer(a.data, offset=8, count=100, dtype=complex) + z.shape = 10, 10 + + eig(z, overwrite_a=True) + # This does not need special handling + eig(z.T, overwrite_a=True) + + +def check_lapack_misaligned(func, args, kwargs): + args = list(args) + for i in range(len(args)): + a = args[:] + if isinstance(a[i], np.ndarray): + # Try misaligning a[i] + aa = np.zeros(a[i].size*a[i].dtype.itemsize+8, dtype=np.uint8) + aa = np.frombuffer(aa.data, offset=4, count=a[i].size, + dtype=a[i].dtype) + aa.shape = a[i].shape + aa[...] = a[i] + a[i] = aa + func(*a, **kwargs) + if len(a[i].shape) > 1: + a[i] = a[i].T + func(*a, **kwargs) + + +@pytest.mark.xfail(run=False, + reason="Ticket #1152, triggers a segfault in rare cases.") +def test_lapack_misaligned(): + M = np.eye(10, dtype=float) + R = np.arange(100) + R.shape = 10, 10 + S = np.arange(20000, dtype=np.uint8) + S = np.frombuffer(S.data, offset=4, count=100, dtype=float) + S.shape = 10, 10 + b = np.ones(10) + LU, piv = lu_factor(S) + for (func, args, kwargs) in [ + (eig, (S,), dict(overwrite_a=True)), # crash + (eigvals, (S,), dict(overwrite_a=True)), # no crash + (lu, (S,), dict(overwrite_a=True)), # no crash + (lu_factor, (S,), dict(overwrite_a=True)), # no crash + (lu_solve, ((LU, piv), b), dict(overwrite_b=True)), + (solve, (S, b), dict(overwrite_a=True, overwrite_b=True)), + (svd, (M,), dict(overwrite_a=True)), # no crash + (svd, (R,), dict(overwrite_a=True)), # no crash + (svd, (S,), dict(overwrite_a=True)), # crash + (svdvals, (S,), dict()), # no crash + (svdvals, (S,), dict(overwrite_a=True)), # crash + (cholesky, (M,), dict(overwrite_a=True)), # no crash + (qr, (S,), dict(overwrite_a=True)), # crash + (rq, (S,), dict(overwrite_a=True)), # crash + (hessenberg, (S,), dict(overwrite_a=True)), # crash + (schur, (S,), dict(overwrite_a=True)), # crash + ]: + check_lapack_misaligned(func, args, kwargs) +# not properly tested +# cholesky, rsf2csf, lu_solve, solve, eig_banded, eigvals_banded, eigh, diagsvd + + +class TestOverwrite: + def test_eig(self): + assert_no_overwrite(eig, [(3, 3)]) + assert_no_overwrite(eig, [(3, 3), (3, 3)]) + + def test_eigh(self): + assert_no_overwrite(eigh, [(3, 3)]) + assert_no_overwrite(eigh, [(3, 3), (3, 3)]) + + def test_eig_banded(self): + assert_no_overwrite(eig_banded, [(3, 2)]) + + def test_eigvals(self): + assert_no_overwrite(eigvals, [(3, 3)]) + + def test_eigvalsh(self): + assert_no_overwrite(eigvalsh, [(3, 3)]) + + def test_eigvals_banded(self): + assert_no_overwrite(eigvals_banded, [(3, 2)]) + + def test_hessenberg(self): + assert_no_overwrite(hessenberg, [(3, 3)]) + + def test_lu_factor(self): + assert_no_overwrite(lu_factor, [(3, 3)]) + + def test_lu_solve(self): + x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 8]]) + xlu = lu_factor(x) + assert_no_overwrite(lambda b: lu_solve(xlu, b), [(3,)]) + + def test_lu(self): + assert_no_overwrite(lu, [(3, 3)]) + + def test_qr(self): + assert_no_overwrite(qr, [(3, 3)]) + + def test_rq(self): + assert_no_overwrite(rq, [(3, 3)]) + + def test_schur(self): + assert_no_overwrite(schur, [(3, 3)]) + + def test_schur_complex(self): + assert_no_overwrite(lambda a: schur(a, 'complex'), [(3, 3)], + dtypes=[np.float32, np.float64]) + + def test_svd(self): + assert_no_overwrite(svd, [(3, 3)]) + assert_no_overwrite(lambda a: svd(a, lapack_driver='gesvd'), [(3, 3)]) + + def test_svdvals(self): + assert_no_overwrite(svdvals, [(3, 3)]) + + +def _check_orth(n, dtype, skip_big=False): + X = np.ones((n, 2), dtype=float).astype(dtype) + + eps = np.finfo(dtype).eps + tol = 1000 * eps + + Y = orth(X) + assert_equal(Y.shape, (n, 1)) + assert_allclose(Y, Y.mean(), atol=tol) + + Y = orth(X.T) + assert_equal(Y.shape, (2, 1)) + assert_allclose(Y, Y.mean(), atol=tol) + + if n > 5 and not skip_big: + np.random.seed(1) + X = np.random.rand(n, 5) @ np.random.rand(5, n) + X = X + 1e-4 * np.random.rand(n, 1) @ np.random.rand(1, n) + X = X.astype(dtype) + + Y = orth(X, rcond=1e-3) + assert_equal(Y.shape, (n, 5)) + + Y = orth(X, rcond=1e-6) + assert_equal(Y.shape, (n, 5 + 1)) + + +@pytest.mark.slow +@pytest.mark.skipif(np.dtype(np.intp).itemsize < 8, + reason="test only on 64-bit, else too slow") +def test_orth_memory_efficiency(): + # Pick n so that 16*n bytes is reasonable but 8*n*n bytes is unreasonable. + # Keep in mind that @pytest.mark.slow tests are likely to be running + # under configurations that support 4Gb+ memory for tests related to + # 32 bit overflow. + n = 10*1000*1000 + try: + _check_orth(n, np.float64, skip_big=True) + except MemoryError as e: + raise AssertionError( + 'memory error perhaps caused by orth regression' + ) from e + + +def test_orth(): + dtypes = [np.float32, np.float64, np.complex64, np.complex128] + sizes = [1, 2, 3, 10, 100] + for dt, n in itertools.product(dtypes, sizes): + _check_orth(n, dt) + + +def test_null_space(): + np.random.seed(1) + + dtypes = [np.float32, np.float64, np.complex64, np.complex128] + sizes = [1, 2, 3, 10, 100] + + for dt, n in itertools.product(dtypes, sizes): + X = np.ones((2, n), dtype=dt) + + eps = np.finfo(dt).eps + tol = 1000 * eps + + Y = null_space(X) + assert_equal(Y.shape, (n, n-1)) + assert_allclose(X @ Y, 0, atol=tol) + + Y = null_space(X.T) + assert_equal(Y.shape, (2, 1)) + assert_allclose(X.T @ Y, 0, atol=tol) + + X = np.random.randn(1 + n//2, n) + Y = null_space(X) + assert_equal(Y.shape, (n, n - 1 - n//2)) + assert_allclose(X @ Y, 0, atol=tol) + + if n > 5: + np.random.seed(1) + X = np.random.rand(n, 5) @ np.random.rand(5, n) + X = X + 1e-4 * np.random.rand(n, 1) @ np.random.rand(1, n) + X = X.astype(dt) + + Y = null_space(X, rcond=1e-3) + assert_equal(Y.shape, (n, n - 5)) + + Y = null_space(X, rcond=1e-6) + assert_equal(Y.shape, (n, n - 6)) + + +def test_subspace_angles(): + H = hadamard(8, float) + A = H[:, :3] + B = H[:, 3:] + assert_allclose(subspace_angles(A, B), [np.pi / 2.] * 3, atol=1e-14) + assert_allclose(subspace_angles(B, A), [np.pi / 2.] * 3, atol=1e-14) + for x in (A, B): + assert_allclose(subspace_angles(x, x), np.zeros(x.shape[1]), + atol=1e-14) + # From MATLAB function "subspace", which effectively only returns the + # last value that we calculate + x = np.array( + [[0.537667139546100, 0.318765239858981, 3.578396939725760, 0.725404224946106], # noqa: E501 + [1.833885014595086, -1.307688296305273, 2.769437029884877, -0.063054873189656], # noqa: E501 + [-2.258846861003648, -0.433592022305684, -1.349886940156521, 0.714742903826096], # noqa: E501 + [0.862173320368121, 0.342624466538650, 3.034923466331855, -0.204966058299775]]) # noqa: E501 + expected = 1.481454682101605 + assert_allclose(subspace_angles(x[:, :2], x[:, 2:])[0], expected, + rtol=1e-12) + assert_allclose(subspace_angles(x[:, 2:], x[:, :2])[0], expected, + rtol=1e-12) + expected = 0.746361174247302 + assert_allclose(subspace_angles(x[:, :2], x[:, [2]]), expected, rtol=1e-12) + assert_allclose(subspace_angles(x[:, [2]], x[:, :2]), expected, rtol=1e-12) + expected = 0.487163718534313 + assert_allclose(subspace_angles(x[:, :3], x[:, [3]]), expected, rtol=1e-12) + assert_allclose(subspace_angles(x[:, [3]], x[:, :3]), expected, rtol=1e-12) + expected = 0.328950515907756 + assert_allclose(subspace_angles(x[:, :2], x[:, 1:]), [expected, 0], + atol=1e-12) + # Degenerate conditions + assert_raises(ValueError, subspace_angles, x[0], x) + assert_raises(ValueError, subspace_angles, x, x[0]) + assert_raises(ValueError, subspace_angles, x[:-1], x) + + # Test branch if mask.any is True: + A = np.array([[1, 0, 0], + [0, 1, 0], + [0, 0, 1], + [0, 0, 0], + [0, 0, 0]]) + B = np.array([[1, 0, 0], + [0, 1, 0], + [0, 0, 0], + [0, 0, 0], + [0, 0, 1]]) + expected = np.array([np.pi/2, 0, 0]) + assert_allclose(subspace_angles(A, B), expected, rtol=1e-12) + + # Complex + # second column in "b" does not affect result, just there so that + # b can have more cols than a, and vice-versa (both conditional code paths) + a = [[1 + 1j], [0]] + b = [[1 - 1j, 0], [0, 1]] + assert_allclose(subspace_angles(a, b), 0., atol=1e-14) + assert_allclose(subspace_angles(b, a), 0., atol=1e-14) + + +class TestCDF2RDF: + + def matmul(self, a, b): + return np.einsum('...ij,...jk->...ik', a, b) + + def assert_eig_valid(self, w, v, x): + assert_array_almost_equal( + self.matmul(v, w), + self.matmul(x, v) + ) + + def test_single_array0x0real(self): + # eig doesn't support 0x0 in old versions of numpy + X = np.empty((0, 0)) + w, v = np.empty(0), np.empty((0, 0)) + wr, vr = cdf2rdf(w, v) + self.assert_eig_valid(wr, vr, X) + + def test_single_array2x2_real(self): + X = np.array([[1, 2], [3, -1]]) + w, v = np.linalg.eig(X) + wr, vr = cdf2rdf(w, v) + self.assert_eig_valid(wr, vr, X) + + def test_single_array2x2_complex(self): + X = np.array([[1, 2], [-2, 1]]) + w, v = np.linalg.eig(X) + wr, vr = cdf2rdf(w, v) + self.assert_eig_valid(wr, vr, X) + + def test_single_array3x3_real(self): + X = np.array([[1, 2, 3], [1, 2, 3], [2, 5, 6]]) + w, v = np.linalg.eig(X) + wr, vr = cdf2rdf(w, v) + self.assert_eig_valid(wr, vr, X) + + def test_single_array3x3_complex(self): + X = np.array([[1, 2, 3], [0, 4, 5], [0, -5, 4]]) + w, v = np.linalg.eig(X) + wr, vr = cdf2rdf(w, v) + self.assert_eig_valid(wr, vr, X) + + def test_random_1d_stacked_arrays(self): + # cannot test M == 0 due to bug in old numpy + for M in range(1, 7): + np.random.seed(999999999) + X = np.random.rand(100, M, M) + w, v = np.linalg.eig(X) + wr, vr = cdf2rdf(w, v) + self.assert_eig_valid(wr, vr, X) + + def test_random_2d_stacked_arrays(self): + # cannot test M == 0 due to bug in old numpy + for M in range(1, 7): + X = np.random.rand(10, 10, M, M) + w, v = np.linalg.eig(X) + wr, vr = cdf2rdf(w, v) + self.assert_eig_valid(wr, vr, X) + + def test_low_dimensionality_error(self): + w, v = np.empty(()), np.array((2,)) + assert_raises(ValueError, cdf2rdf, w, v) + + def test_not_square_error(self): + # Check that passing a non-square array raises a ValueError. + w, v = np.arange(3), np.arange(6).reshape(3, 2) + assert_raises(ValueError, cdf2rdf, w, v) + + def test_swapped_v_w_error(self): + # Check that exchanging places of w and v raises ValueError. + X = np.array([[1, 2, 3], [0, 4, 5], [0, -5, 4]]) + w, v = np.linalg.eig(X) + assert_raises(ValueError, cdf2rdf, v, w) + + def test_non_associated_error(self): + # Check that passing non-associated eigenvectors raises a ValueError. + w, v = np.arange(3), np.arange(16).reshape(4, 4) + assert_raises(ValueError, cdf2rdf, w, v) + + def test_not_conjugate_pairs(self): + # Check that passing non-conjugate pairs raises a ValueError. + X = np.array([[1, 2, 3], [1, 2, 3], [2, 5, 6+1j]]) + w, v = np.linalg.eig(X) + assert_raises(ValueError, cdf2rdf, w, v) + + # different arrays in the stack, so not conjugate + X = np.array([ + [[1, 2, 3], [1, 2, 3], [2, 5, 6+1j]], + [[1, 2, 3], [1, 2, 3], [2, 5, 6-1j]], + ]) + w, v = np.linalg.eig(X) + assert_raises(ValueError, cdf2rdf, w, v) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_decomp_cholesky.py b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_decomp_cholesky.py new file mode 100644 index 0000000000000000000000000000000000000000..9354bf93a967954f8d563d43edcb8f909c747be2 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_decomp_cholesky.py @@ -0,0 +1,219 @@ +import pytest +from numpy.testing import assert_array_almost_equal, assert_array_equal +from pytest import raises as assert_raises + +import numpy as np +from numpy import array, transpose, dot, conjugate, zeros_like, empty +from numpy.random import random +from scipy.linalg import cholesky, cholesky_banded, cho_solve_banded, \ + cho_factor, cho_solve + +from scipy.linalg._testutils import assert_no_overwrite + + +class TestCholesky: + + def test_simple(self): + a = [[8, 2, 3], [2, 9, 3], [3, 3, 6]] + c = cholesky(a) + assert_array_almost_equal(dot(transpose(c), c), a) + c = transpose(c) + a = dot(c, transpose(c)) + assert_array_almost_equal(cholesky(a, lower=1), c) + + def test_check_finite(self): + a = [[8, 2, 3], [2, 9, 3], [3, 3, 6]] + c = cholesky(a, check_finite=False) + assert_array_almost_equal(dot(transpose(c), c), a) + c = transpose(c) + a = dot(c, transpose(c)) + assert_array_almost_equal(cholesky(a, lower=1, check_finite=False), c) + + def test_simple_complex(self): + m = array([[3+1j, 3+4j, 5], [0, 2+2j, 2+7j], [0, 0, 7+4j]]) + a = dot(transpose(conjugate(m)), m) + c = cholesky(a) + a1 = dot(transpose(conjugate(c)), c) + assert_array_almost_equal(a, a1) + c = transpose(c) + a = dot(c, transpose(conjugate(c))) + assert_array_almost_equal(cholesky(a, lower=1), c) + + def test_random(self): + n = 20 + for k in range(2): + m = random([n, n]) + for i in range(n): + m[i, i] = 20*(.1+m[i, i]) + a = dot(transpose(m), m) + c = cholesky(a) + a1 = dot(transpose(c), c) + assert_array_almost_equal(a, a1) + c = transpose(c) + a = dot(c, transpose(c)) + assert_array_almost_equal(cholesky(a, lower=1), c) + + def test_random_complex(self): + n = 20 + for k in range(2): + m = random([n, n])+1j*random([n, n]) + for i in range(n): + m[i, i] = 20*(.1+abs(m[i, i])) + a = dot(transpose(conjugate(m)), m) + c = cholesky(a) + a1 = dot(transpose(conjugate(c)), c) + assert_array_almost_equal(a, a1) + c = transpose(c) + a = dot(c, transpose(conjugate(c))) + assert_array_almost_equal(cholesky(a, lower=1), c) + + @pytest.mark.xslow + def test_int_overflow(self): + # regression test for + # https://github.com/scipy/scipy/issues/17436 + # the problem was an int overflow in zeroing out + # the unused triangular part + n = 47_000 + x = np.eye(n, dtype=np.float64, order='F') + x[:4, :4] = np.array([[4, -2, 3, -1], + [-2, 4, -3, 1], + [3, -3, 5, 0], + [-1, 1, 0, 5]]) + + cholesky(x, check_finite=False, overwrite_a=True) # should not segfault + + +class TestCholeskyBanded: + """Tests for cholesky_banded() and cho_solve_banded.""" + + def test_check_finite(self): + # Symmetric positive definite banded matrix `a` + a = array([[4.0, 1.0, 0.0, 0.0], + [1.0, 4.0, 0.5, 0.0], + [0.0, 0.5, 4.0, 0.2], + [0.0, 0.0, 0.2, 4.0]]) + # Banded storage form of `a`. + ab = array([[-1.0, 1.0, 0.5, 0.2], + [4.0, 4.0, 4.0, 4.0]]) + c = cholesky_banded(ab, lower=False, check_finite=False) + ufac = zeros_like(a) + ufac[list(range(4)), list(range(4))] = c[-1] + ufac[(0, 1, 2), (1, 2, 3)] = c[0, 1:] + assert_array_almost_equal(a, dot(ufac.T, ufac)) + + b = array([0.0, 0.5, 4.2, 4.2]) + x = cho_solve_banded((c, False), b, check_finite=False) + assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0]) + + def test_upper_real(self): + # Symmetric positive definite banded matrix `a` + a = array([[4.0, 1.0, 0.0, 0.0], + [1.0, 4.0, 0.5, 0.0], + [0.0, 0.5, 4.0, 0.2], + [0.0, 0.0, 0.2, 4.0]]) + # Banded storage form of `a`. + ab = array([[-1.0, 1.0, 0.5, 0.2], + [4.0, 4.0, 4.0, 4.0]]) + c = cholesky_banded(ab, lower=False) + ufac = zeros_like(a) + ufac[list(range(4)), list(range(4))] = c[-1] + ufac[(0, 1, 2), (1, 2, 3)] = c[0, 1:] + assert_array_almost_equal(a, dot(ufac.T, ufac)) + + b = array([0.0, 0.5, 4.2, 4.2]) + x = cho_solve_banded((c, False), b) + assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0]) + + def test_upper_complex(self): + # Hermitian positive definite banded matrix `a` + a = array([[4.0, 1.0, 0.0, 0.0], + [1.0, 4.0, 0.5, 0.0], + [0.0, 0.5, 4.0, -0.2j], + [0.0, 0.0, 0.2j, 4.0]]) + # Banded storage form of `a`. + ab = array([[-1.0, 1.0, 0.5, -0.2j], + [4.0, 4.0, 4.0, 4.0]]) + c = cholesky_banded(ab, lower=False) + ufac = zeros_like(a) + ufac[list(range(4)), list(range(4))] = c[-1] + ufac[(0, 1, 2), (1, 2, 3)] = c[0, 1:] + assert_array_almost_equal(a, dot(ufac.conj().T, ufac)) + + b = array([0.0, 0.5, 4.0-0.2j, 0.2j + 4.0]) + x = cho_solve_banded((c, False), b) + assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0]) + + def test_lower_real(self): + # Symmetric positive definite banded matrix `a` + a = array([[4.0, 1.0, 0.0, 0.0], + [1.0, 4.0, 0.5, 0.0], + [0.0, 0.5, 4.0, 0.2], + [0.0, 0.0, 0.2, 4.0]]) + # Banded storage form of `a`. + ab = array([[4.0, 4.0, 4.0, 4.0], + [1.0, 0.5, 0.2, -1.0]]) + c = cholesky_banded(ab, lower=True) + lfac = zeros_like(a) + lfac[list(range(4)), list(range(4))] = c[0] + lfac[(1, 2, 3), (0, 1, 2)] = c[1, :3] + assert_array_almost_equal(a, dot(lfac, lfac.T)) + + b = array([0.0, 0.5, 4.2, 4.2]) + x = cho_solve_banded((c, True), b) + assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0]) + + def test_lower_complex(self): + # Hermitian positive definite banded matrix `a` + a = array([[4.0, 1.0, 0.0, 0.0], + [1.0, 4.0, 0.5, 0.0], + [0.0, 0.5, 4.0, -0.2j], + [0.0, 0.0, 0.2j, 4.0]]) + # Banded storage form of `a`. + ab = array([[4.0, 4.0, 4.0, 4.0], + [1.0, 0.5, 0.2j, -1.0]]) + c = cholesky_banded(ab, lower=True) + lfac = zeros_like(a) + lfac[list(range(4)), list(range(4))] = c[0] + lfac[(1, 2, 3), (0, 1, 2)] = c[1, :3] + assert_array_almost_equal(a, dot(lfac, lfac.conj().T)) + + b = array([0.0, 0.5j, 3.8j, 3.8]) + x = cho_solve_banded((c, True), b) + assert_array_almost_equal(x, [0.0, 0.0, 1.0j, 1.0]) + + +class TestOverwrite: + def test_cholesky(self): + assert_no_overwrite(cholesky, [(3, 3)]) + + def test_cho_factor(self): + assert_no_overwrite(cho_factor, [(3, 3)]) + + def test_cho_solve(self): + x = array([[2, -1, 0], [-1, 2, -1], [0, -1, 2]]) + xcho = cho_factor(x) + assert_no_overwrite(lambda b: cho_solve(xcho, b), [(3,)]) + + def test_cholesky_banded(self): + assert_no_overwrite(cholesky_banded, [(2, 3)]) + + def test_cho_solve_banded(self): + x = array([[0, -1, -1], [2, 2, 2]]) + xcho = cholesky_banded(x) + assert_no_overwrite(lambda b: cho_solve_banded((xcho, False), b), + [(3,)]) + + +class TestEmptyArray: + def test_cho_factor_empty_square(self): + a = empty((0, 0)) + b = array([]) + c = array([[]]) + d = [] + e = [[]] + + x, _ = cho_factor(a) + assert_array_equal(x, a) + + for x in ([b, c, d, e]): + assert_raises(ValueError, cho_factor, x) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_decomp_cossin.py b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_decomp_cossin.py new file mode 100644 index 0000000000000000000000000000000000000000..3302eaa5bfe618a8d980cd292c97858ed1e1ef0e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_decomp_cossin.py @@ -0,0 +1,157 @@ +import pytest +import numpy as np +from numpy.random import default_rng +from numpy.testing import assert_allclose + +from scipy.linalg.lapack import _compute_lwork +from scipy.stats import ortho_group, unitary_group +from scipy.linalg import cossin, get_lapack_funcs + +REAL_DTYPES = (np.float32, np.float64) +COMPLEX_DTYPES = (np.complex64, np.complex128) +DTYPES = REAL_DTYPES + COMPLEX_DTYPES + + +@pytest.mark.parametrize('dtype_', DTYPES) +@pytest.mark.parametrize('m, p, q', + [ + (2, 1, 1), + (3, 2, 1), + (3, 1, 2), + (4, 2, 2), + (4, 1, 2), + (40, 12, 20), + (40, 30, 1), + (40, 1, 30), + (100, 50, 1), + (100, 50, 50), + ]) +@pytest.mark.parametrize('swap_sign', [True, False]) +def test_cossin(dtype_, m, p, q, swap_sign): + rng = default_rng(1708093570726217) + if dtype_ in COMPLEX_DTYPES: + x = np.array(unitary_group.rvs(m, random_state=rng), dtype=dtype_) + else: + x = np.array(ortho_group.rvs(m, random_state=rng), dtype=dtype_) + + u, cs, vh = cossin(x, p, q, + swap_sign=swap_sign) + assert_allclose(x, u @ cs @ vh, rtol=0., atol=m*1e3*np.finfo(dtype_).eps) + assert u.dtype == dtype_ + # Test for float32 or float 64 + assert cs.dtype == np.real(u).dtype + assert vh.dtype == dtype_ + + u, cs, vh = cossin([x[:p, :q], x[:p, q:], x[p:, :q], x[p:, q:]], + swap_sign=swap_sign) + assert_allclose(x, u @ cs @ vh, rtol=0., atol=m*1e3*np.finfo(dtype_).eps) + assert u.dtype == dtype_ + assert cs.dtype == np.real(u).dtype + assert vh.dtype == dtype_ + + _, cs2, vh2 = cossin(x, p, q, + compute_u=False, + swap_sign=swap_sign) + assert_allclose(cs, cs2, rtol=0., atol=10*np.finfo(dtype_).eps) + assert_allclose(vh, vh2, rtol=0., atol=10*np.finfo(dtype_).eps) + + u2, cs2, _ = cossin(x, p, q, + compute_vh=False, + swap_sign=swap_sign) + assert_allclose(u, u2, rtol=0., atol=10*np.finfo(dtype_).eps) + assert_allclose(cs, cs2, rtol=0., atol=10*np.finfo(dtype_).eps) + + _, cs2, _ = cossin(x, p, q, + compute_u=False, + compute_vh=False, + swap_sign=swap_sign) + assert_allclose(cs, cs2, rtol=0., atol=10*np.finfo(dtype_).eps) + + +def test_cossin_mixed_types(): + rng = default_rng(1708093736390459) + x = np.array(ortho_group.rvs(4, random_state=rng), dtype=np.float64) + u, cs, vh = cossin([x[:2, :2], + np.array(x[:2, 2:], dtype=np.complex128), + x[2:, :2], + x[2:, 2:]]) + + assert u.dtype == np.complex128 + assert cs.dtype == np.float64 + assert vh.dtype == np.complex128 + assert_allclose(x, u @ cs @ vh, rtol=0., + atol=1e4 * np.finfo(np.complex128).eps) + + +def test_cossin_error_incorrect_subblocks(): + with pytest.raises(ValueError, match="be due to missing p, q arguments."): + cossin(([1, 2], [3, 4, 5], [6, 7], [8, 9, 10])) + + +def test_cossin_error_empty_subblocks(): + with pytest.raises(ValueError, match="x11.*empty"): + cossin(([], [], [], [])) + with pytest.raises(ValueError, match="x12.*empty"): + cossin(([1, 2], [], [6, 7], [8, 9, 10])) + with pytest.raises(ValueError, match="x21.*empty"): + cossin(([1, 2], [3, 4, 5], [], [8, 9, 10])) + with pytest.raises(ValueError, match="x22.*empty"): + cossin(([1, 2], [3, 4, 5], [2], [])) + + +def test_cossin_error_missing_partitioning(): + with pytest.raises(ValueError, match=".*exactly four arrays.* got 2"): + cossin(unitary_group.rvs(2)) + + with pytest.raises(ValueError, match=".*might be due to missing p, q"): + cossin(unitary_group.rvs(4)) + + +def test_cossin_error_non_iterable(): + with pytest.raises(ValueError, match="containing the subblocks of X"): + cossin(12j) + + +def test_cossin_error_non_square(): + with pytest.raises(ValueError, match="only supports square"): + cossin(np.array([[1, 2]]), 1, 1) + + +def test_cossin_error_partitioning(): + x = np.array(ortho_group.rvs(4), dtype=np.float64) + with pytest.raises(ValueError, match="invalid p=0.*0= n: + assert_allclose(u.conj().T.dot(u), np.eye(n), atol=1e-15) + else: + assert_allclose(u.dot(u.conj().T), np.eye(m), atol=1e-15) + # p is Hermitian positive semidefinite. + assert_allclose(p.conj().T, p) + evals = eigh(p, eigvals_only=True) + nonzero_evals = evals[abs(evals) > 1e-14] + assert_((nonzero_evals >= 0).all()) + + u, p = polar(a, side='left') + assert_equal(u.shape, (m, n)) + assert_equal(p.shape, (m, m)) + # a = pu + assert_allclose(p.dot(u), a, atol=product_atol) + if m >= n: + assert_allclose(u.conj().T.dot(u), np.eye(n), atol=1e-15) + else: + assert_allclose(u.dot(u.conj().T), np.eye(m), atol=1e-15) + # p is Hermitian positive semidefinite. + assert_allclose(p.conj().T, p) + evals = eigh(p, eigvals_only=True) + nonzero_evals = evals[abs(evals) > 1e-14] + assert_((nonzero_evals >= 0).all()) + + +def test_precomputed_cases(): + for a, side, expected_u, expected_p in precomputed_cases: + check_precomputed_polar(a, side, expected_u, expected_p) + + +def test_verify_cases(): + for a in verify_cases: + verify_polar(a) + diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_decomp_update.py b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_decomp_update.py new file mode 100644 index 0000000000000000000000000000000000000000..2222c25ae6aa8f46ca35aa276e4fc4e85b4e7100 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_decomp_update.py @@ -0,0 +1,1700 @@ +import itertools + +import numpy as np +from numpy.testing import assert_, assert_allclose, assert_equal +from pytest import raises as assert_raises +from scipy import linalg +import scipy.linalg._decomp_update as _decomp_update +from scipy.linalg._decomp_update import qr_delete, qr_update, qr_insert + +def assert_unitary(a, rtol=None, atol=None, assert_sqr=True): + if rtol is None: + rtol = 10.0 ** -(np.finfo(a.dtype).precision-2) + if atol is None: + atol = 10*np.finfo(a.dtype).eps + + if assert_sqr: + assert_(a.shape[0] == a.shape[1], 'unitary matrices must be square') + aTa = np.dot(a.T.conj(), a) + assert_allclose(aTa, np.eye(a.shape[1]), rtol=rtol, atol=atol) + +def assert_upper_tri(a, rtol=None, atol=None): + if rtol is None: + rtol = 10.0 ** -(np.finfo(a.dtype).precision-2) + if atol is None: + atol = 2*np.finfo(a.dtype).eps + mask = np.tri(a.shape[0], a.shape[1], -1, np.bool_) + assert_allclose(a[mask], 0.0, rtol=rtol, atol=atol) + +def check_qr(q, r, a, rtol, atol, assert_sqr=True): + assert_unitary(q, rtol, atol, assert_sqr) + assert_upper_tri(r, rtol, atol) + assert_allclose(q.dot(r), a, rtol=rtol, atol=atol) + +def make_strided(arrs): + strides = [(3, 7), (2, 2), (3, 4), (4, 2), (5, 4), (2, 3), (2, 1), (4, 5)] + kmax = len(strides) + k = 0 + ret = [] + for a in arrs: + if a.ndim == 1: + s = strides[k % kmax] + k += 1 + base = np.zeros(s[0]*a.shape[0]+s[1], a.dtype) + view = base[s[1]::s[0]] + view[...] = a + elif a.ndim == 2: + s = strides[k % kmax] + t = strides[(k+1) % kmax] + k += 2 + base = np.zeros((s[0]*a.shape[0]+s[1], t[0]*a.shape[1]+t[1]), + a.dtype) + view = base[s[1]::s[0], t[1]::t[0]] + view[...] = a + else: + raise ValueError('make_strided only works for ndim = 1 or' + ' 2 arrays') + ret.append(view) + return ret + +def negate_strides(arrs): + ret = [] + for a in arrs: + b = np.zeros_like(a) + if b.ndim == 2: + b = b[::-1, ::-1] + elif b.ndim == 1: + b = b[::-1] + else: + raise ValueError('negate_strides only works for ndim = 1 or' + ' 2 arrays') + b[...] = a + ret.append(b) + return ret + +def nonitemsize_strides(arrs): + out = [] + for a in arrs: + a_dtype = a.dtype + b = np.zeros(a.shape, [('a', a_dtype), ('junk', 'S1')]) + c = b.getfield(a_dtype) + c[...] = a + out.append(c) + return out + + +def make_nonnative(arrs): + return [a.astype(a.dtype.newbyteorder()) for a in arrs] + + +class BaseQRdeltas: + def setup_method(self): + self.rtol = 10.0 ** -(np.finfo(self.dtype).precision-2) + self.atol = 10 * np.finfo(self.dtype).eps + + def generate(self, type, mode='full'): + np.random.seed(29382) + shape = {'sqr': (8, 8), 'tall': (12, 7), 'fat': (7, 12), + 'Mx1': (8, 1), '1xN': (1, 8), '1x1': (1, 1)}[type] + a = np.random.random(shape) + if np.iscomplexobj(self.dtype.type(1)): + b = np.random.random(shape) + a = a + 1j * b + a = a.astype(self.dtype) + q, r = linalg.qr(a, mode=mode) + return a, q, r + +class BaseQRdelete(BaseQRdeltas): + def test_sqr_1_row(self): + a, q, r = self.generate('sqr') + for row in range(r.shape[0]): + q1, r1 = qr_delete(q, r, row, overwrite_qr=False) + a1 = np.delete(a, row, 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_sqr_p_row(self): + a, q, r = self.generate('sqr') + for ndel in range(2, 6): + for row in range(a.shape[0]-ndel): + q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False) + a1 = np.delete(a, slice(row, row+ndel), 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_sqr_1_col(self): + a, q, r = self.generate('sqr') + for col in range(r.shape[1]): + q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False) + a1 = np.delete(a, col, 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_sqr_p_col(self): + a, q, r = self.generate('sqr') + for ndel in range(2, 6): + for col in range(r.shape[1]-ndel): + q1, r1 = qr_delete(q, r, col, ndel, which='col', + overwrite_qr=False) + a1 = np.delete(a, slice(col, col+ndel), 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_tall_1_row(self): + a, q, r = self.generate('tall') + for row in range(r.shape[0]): + q1, r1 = qr_delete(q, r, row, overwrite_qr=False) + a1 = np.delete(a, row, 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_tall_p_row(self): + a, q, r = self.generate('tall') + for ndel in range(2, 6): + for row in range(a.shape[0]-ndel): + q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False) + a1 = np.delete(a, slice(row, row+ndel), 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_tall_1_col(self): + a, q, r = self.generate('tall') + for col in range(r.shape[1]): + q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False) + a1 = np.delete(a, col, 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_tall_p_col(self): + a, q, r = self.generate('tall') + for ndel in range(2, 6): + for col in range(r.shape[1]-ndel): + q1, r1 = qr_delete(q, r, col, ndel, which='col', + overwrite_qr=False) + a1 = np.delete(a, slice(col, col+ndel), 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_fat_1_row(self): + a, q, r = self.generate('fat') + for row in range(r.shape[0]): + q1, r1 = qr_delete(q, r, row, overwrite_qr=False) + a1 = np.delete(a, row, 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_fat_p_row(self): + a, q, r = self.generate('fat') + for ndel in range(2, 6): + for row in range(a.shape[0]-ndel): + q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False) + a1 = np.delete(a, slice(row, row+ndel), 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_fat_1_col(self): + a, q, r = self.generate('fat') + for col in range(r.shape[1]): + q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False) + a1 = np.delete(a, col, 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_fat_p_col(self): + a, q, r = self.generate('fat') + for ndel in range(2, 6): + for col in range(r.shape[1]-ndel): + q1, r1 = qr_delete(q, r, col, ndel, which='col', + overwrite_qr=False) + a1 = np.delete(a, slice(col, col+ndel), 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_economic_1_row(self): + # this test always starts and ends with an economic decomp. + a, q, r = self.generate('tall', 'economic') + for row in range(r.shape[0]): + q1, r1 = qr_delete(q, r, row, overwrite_qr=False) + a1 = np.delete(a, row, 0) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + + # for economic row deletes + # eco - prow = eco + # eco - prow = sqr + # eco - prow = fat + def base_economic_p_row_xxx(self, ndel): + a, q, r = self.generate('tall', 'economic') + for row in range(a.shape[0]-ndel): + q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False) + a1 = np.delete(a, slice(row, row+ndel), 0) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + + def test_economic_p_row_economic(self): + # (12, 7) - (3, 7) = (9,7) --> stays economic + self.base_economic_p_row_xxx(3) + + def test_economic_p_row_sqr(self): + # (12, 7) - (5, 7) = (7, 7) --> becomes square + self.base_economic_p_row_xxx(5) + + def test_economic_p_row_fat(self): + # (12, 7) - (7,7) = (5, 7) --> becomes fat + self.base_economic_p_row_xxx(7) + + def test_economic_1_col(self): + a, q, r = self.generate('tall', 'economic') + for col in range(r.shape[1]): + q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False) + a1 = np.delete(a, col, 1) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + + def test_economic_p_col(self): + a, q, r = self.generate('tall', 'economic') + for ndel in range(2, 6): + for col in range(r.shape[1]-ndel): + q1, r1 = qr_delete(q, r, col, ndel, which='col', + overwrite_qr=False) + a1 = np.delete(a, slice(col, col+ndel), 1) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + + def test_Mx1_1_row(self): + a, q, r = self.generate('Mx1') + for row in range(r.shape[0]): + q1, r1 = qr_delete(q, r, row, overwrite_qr=False) + a1 = np.delete(a, row, 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_Mx1_p_row(self): + a, q, r = self.generate('Mx1') + for ndel in range(2, 6): + for row in range(a.shape[0]-ndel): + q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False) + a1 = np.delete(a, slice(row, row+ndel), 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_1xN_1_col(self): + a, q, r = self.generate('1xN') + for col in range(r.shape[1]): + q1, r1 = qr_delete(q, r, col, which='col', overwrite_qr=False) + a1 = np.delete(a, col, 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_1xN_p_col(self): + a, q, r = self.generate('1xN') + for ndel in range(2, 6): + for col in range(r.shape[1]-ndel): + q1, r1 = qr_delete(q, r, col, ndel, which='col', + overwrite_qr=False) + a1 = np.delete(a, slice(col, col+ndel), 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_Mx1_economic_1_row(self): + a, q, r = self.generate('Mx1', 'economic') + for row in range(r.shape[0]): + q1, r1 = qr_delete(q, r, row, overwrite_qr=False) + a1 = np.delete(a, row, 0) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + + def test_Mx1_economic_p_row(self): + a, q, r = self.generate('Mx1', 'economic') + for ndel in range(2, 6): + for row in range(a.shape[0]-ndel): + q1, r1 = qr_delete(q, r, row, ndel, overwrite_qr=False) + a1 = np.delete(a, slice(row, row+ndel), 0) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + + def test_delete_last_1_row(self): + # full and eco are the same for 1xN + a, q, r = self.generate('1xN') + q1, r1 = qr_delete(q, r, 0, 1, 'row') + assert_equal(q1, np.ndarray(shape=(0, 0), dtype=q.dtype)) + assert_equal(r1, np.ndarray(shape=(0, r.shape[1]), dtype=r.dtype)) + + def test_delete_last_p_row(self): + a, q, r = self.generate('tall', 'full') + q1, r1 = qr_delete(q, r, 0, a.shape[0], 'row') + assert_equal(q1, np.ndarray(shape=(0, 0), dtype=q.dtype)) + assert_equal(r1, np.ndarray(shape=(0, r.shape[1]), dtype=r.dtype)) + + a, q, r = self.generate('tall', 'economic') + q1, r1 = qr_delete(q, r, 0, a.shape[0], 'row') + assert_equal(q1, np.ndarray(shape=(0, 0), dtype=q.dtype)) + assert_equal(r1, np.ndarray(shape=(0, r.shape[1]), dtype=r.dtype)) + + def test_delete_last_1_col(self): + a, q, r = self.generate('Mx1', 'economic') + q1, r1 = qr_delete(q, r, 0, 1, 'col') + assert_equal(q1, np.ndarray(shape=(q.shape[0], 0), dtype=q.dtype)) + assert_equal(r1, np.ndarray(shape=(0, 0), dtype=r.dtype)) + + a, q, r = self.generate('Mx1', 'full') + q1, r1 = qr_delete(q, r, 0, 1, 'col') + assert_unitary(q1) + assert_(q1.dtype == q.dtype) + assert_(q1.shape == q.shape) + assert_equal(r1, np.ndarray(shape=(r.shape[0], 0), dtype=r.dtype)) + + def test_delete_last_p_col(self): + a, q, r = self.generate('tall', 'full') + q1, r1 = qr_delete(q, r, 0, a.shape[1], 'col') + assert_unitary(q1) + assert_(q1.dtype == q.dtype) + assert_(q1.shape == q.shape) + assert_equal(r1, np.ndarray(shape=(r.shape[0], 0), dtype=r.dtype)) + + a, q, r = self.generate('tall', 'economic') + q1, r1 = qr_delete(q, r, 0, a.shape[1], 'col') + assert_equal(q1, np.ndarray(shape=(q.shape[0], 0), dtype=q.dtype)) + assert_equal(r1, np.ndarray(shape=(0, 0), dtype=r.dtype)) + + def test_delete_1x1_row_col(self): + a, q, r = self.generate('1x1') + q1, r1 = qr_delete(q, r, 0, 1, 'row') + assert_equal(q1, np.ndarray(shape=(0, 0), dtype=q.dtype)) + assert_equal(r1, np.ndarray(shape=(0, r.shape[1]), dtype=r.dtype)) + + a, q, r = self.generate('1x1') + q1, r1 = qr_delete(q, r, 0, 1, 'col') + assert_unitary(q1) + assert_(q1.dtype == q.dtype) + assert_(q1.shape == q.shape) + assert_equal(r1, np.ndarray(shape=(r.shape[0], 0), dtype=r.dtype)) + + # all full qr, row deletes and single column deletes should be able to + # handle any non negative strides. (only row and column vector + # operations are used.) p column delete require fortran ordered + # Q and R and will make a copy as necessary. Economic qr row deletes + # require a contiguous q. + + def base_non_simple_strides(self, adjust_strides, ks, p, which, + overwriteable): + if which == 'row': + qind = (slice(p,None), slice(p,None)) + rind = (slice(p,None), slice(None)) + else: + qind = (slice(None), slice(None)) + rind = (slice(None), slice(None,-p)) + + for type, k in itertools.product(['sqr', 'tall', 'fat'], ks): + a, q0, r0, = self.generate(type) + qs, rs = adjust_strides((q0, r0)) + if p == 1: + a1 = np.delete(a, k, 0 if which == 'row' else 1) + else: + s = slice(k,k+p) + if k < 0: + s = slice(k, k + p + + (a.shape[0] if which == 'row' else a.shape[1])) + a1 = np.delete(a, s, 0 if which == 'row' else 1) + + # for each variable, q, r we try with it strided and + # overwrite=False. Then we try with overwrite=True, and make + # sure that q and r are still overwritten. + + q = q0.copy('F') + r = r0.copy('F') + q1, r1 = qr_delete(qs, r, k, p, which, False) + check_qr(q1, r1, a1, self.rtol, self.atol) + q1o, r1o = qr_delete(qs, r, k, p, which, True) + check_qr(q1o, r1o, a1, self.rtol, self.atol) + if overwriteable: + assert_allclose(q1o, qs[qind], rtol=self.rtol, atol=self.atol) + assert_allclose(r1o, r[rind], rtol=self.rtol, atol=self.atol) + + q = q0.copy('F') + r = r0.copy('F') + q2, r2 = qr_delete(q, rs, k, p, which, False) + check_qr(q2, r2, a1, self.rtol, self.atol) + q2o, r2o = qr_delete(q, rs, k, p, which, True) + check_qr(q2o, r2o, a1, self.rtol, self.atol) + if overwriteable: + assert_allclose(q2o, q[qind], rtol=self.rtol, atol=self.atol) + assert_allclose(r2o, rs[rind], rtol=self.rtol, atol=self.atol) + + q = q0.copy('F') + r = r0.copy('F') + # since some of these were consumed above + qs, rs = adjust_strides((q, r)) + q3, r3 = qr_delete(qs, rs, k, p, which, False) + check_qr(q3, r3, a1, self.rtol, self.atol) + q3o, r3o = qr_delete(qs, rs, k, p, which, True) + check_qr(q3o, r3o, a1, self.rtol, self.atol) + if overwriteable: + assert_allclose(q2o, qs[qind], rtol=self.rtol, atol=self.atol) + assert_allclose(r3o, rs[rind], rtol=self.rtol, atol=self.atol) + + def test_non_unit_strides_1_row(self): + self.base_non_simple_strides(make_strided, [0], 1, 'row', True) + + def test_non_unit_strides_p_row(self): + self.base_non_simple_strides(make_strided, [0], 3, 'row', True) + + def test_non_unit_strides_1_col(self): + self.base_non_simple_strides(make_strided, [0], 1, 'col', True) + + def test_non_unit_strides_p_col(self): + self.base_non_simple_strides(make_strided, [0], 3, 'col', False) + + def test_neg_strides_1_row(self): + self.base_non_simple_strides(negate_strides, [0], 1, 'row', False) + + def test_neg_strides_p_row(self): + self.base_non_simple_strides(negate_strides, [0], 3, 'row', False) + + def test_neg_strides_1_col(self): + self.base_non_simple_strides(negate_strides, [0], 1, 'col', False) + + def test_neg_strides_p_col(self): + self.base_non_simple_strides(negate_strides, [0], 3, 'col', False) + + def test_non_itemize_strides_1_row(self): + self.base_non_simple_strides(nonitemsize_strides, [0], 1, 'row', False) + + def test_non_itemize_strides_p_row(self): + self.base_non_simple_strides(nonitemsize_strides, [0], 3, 'row', False) + + def test_non_itemize_strides_1_col(self): + self.base_non_simple_strides(nonitemsize_strides, [0], 1, 'col', False) + + def test_non_itemize_strides_p_col(self): + self.base_non_simple_strides(nonitemsize_strides, [0], 3, 'col', False) + + def test_non_native_byte_order_1_row(self): + self.base_non_simple_strides(make_nonnative, [0], 1, 'row', False) + + def test_non_native_byte_order_p_row(self): + self.base_non_simple_strides(make_nonnative, [0], 3, 'row', False) + + def test_non_native_byte_order_1_col(self): + self.base_non_simple_strides(make_nonnative, [0], 1, 'col', False) + + def test_non_native_byte_order_p_col(self): + self.base_non_simple_strides(make_nonnative, [0], 3, 'col', False) + + def test_neg_k(self): + a, q, r = self.generate('sqr') + for k, p, w in itertools.product([-3, -7], [1, 3], ['row', 'col']): + q1, r1 = qr_delete(q, r, k, p, w, overwrite_qr=False) + if w == 'row': + a1 = np.delete(a, slice(k+a.shape[0], k+p+a.shape[0]), 0) + else: + a1 = np.delete(a, slice(k+a.shape[0], k+p+a.shape[1]), 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def base_overwrite_qr(self, which, p, test_C, test_F, mode='full'): + assert_sqr = True if mode == 'full' else False + if which == 'row': + qind = (slice(p,None), slice(p,None)) + rind = (slice(p,None), slice(None)) + else: + qind = (slice(None), slice(None)) + rind = (slice(None), slice(None,-p)) + a, q0, r0 = self.generate('sqr', mode) + if p == 1: + a1 = np.delete(a, 3, 0 if which == 'row' else 1) + else: + a1 = np.delete(a, slice(3, 3+p), 0 if which == 'row' else 1) + + # don't overwrite + q = q0.copy('F') + r = r0.copy('F') + q1, r1 = qr_delete(q, r, 3, p, which, False) + check_qr(q1, r1, a1, self.rtol, self.atol, assert_sqr) + check_qr(q, r, a, self.rtol, self.atol, assert_sqr) + + if test_F: + q = q0.copy('F') + r = r0.copy('F') + q2, r2 = qr_delete(q, r, 3, p, which, True) + check_qr(q2, r2, a1, self.rtol, self.atol, assert_sqr) + # verify the overwriting + assert_allclose(q2, q[qind], rtol=self.rtol, atol=self.atol) + assert_allclose(r2, r[rind], rtol=self.rtol, atol=self.atol) + + if test_C: + q = q0.copy('C') + r = r0.copy('C') + q3, r3 = qr_delete(q, r, 3, p, which, True) + check_qr(q3, r3, a1, self.rtol, self.atol, assert_sqr) + assert_allclose(q3, q[qind], rtol=self.rtol, atol=self.atol) + assert_allclose(r3, r[rind], rtol=self.rtol, atol=self.atol) + + def test_overwrite_qr_1_row(self): + # any positively strided q and r. + self.base_overwrite_qr('row', 1, True, True) + + def test_overwrite_economic_qr_1_row(self): + # Any contiguous q and positively strided r. + self.base_overwrite_qr('row', 1, True, True, 'economic') + + def test_overwrite_qr_1_col(self): + # any positively strided q and r. + # full and eco share code paths + self.base_overwrite_qr('col', 1, True, True) + + def test_overwrite_qr_p_row(self): + # any positively strided q and r. + self.base_overwrite_qr('row', 3, True, True) + + def test_overwrite_economic_qr_p_row(self): + # any contiguous q and positively strided r + self.base_overwrite_qr('row', 3, True, True, 'economic') + + def test_overwrite_qr_p_col(self): + # only F ordered q and r can be overwritten for cols + # full and eco share code paths + self.base_overwrite_qr('col', 3, False, True) + + def test_bad_which(self): + a, q, r = self.generate('sqr') + assert_raises(ValueError, qr_delete, q, r, 0, which='foo') + + def test_bad_k(self): + a, q, r = self.generate('tall') + assert_raises(ValueError, qr_delete, q, r, q.shape[0], 1) + assert_raises(ValueError, qr_delete, q, r, -q.shape[0]-1, 1) + assert_raises(ValueError, qr_delete, q, r, r.shape[0], 1, 'col') + assert_raises(ValueError, qr_delete, q, r, -r.shape[0]-1, 1, 'col') + + def test_bad_p(self): + a, q, r = self.generate('tall') + # p must be positive + assert_raises(ValueError, qr_delete, q, r, 0, -1) + assert_raises(ValueError, qr_delete, q, r, 0, -1, 'col') + + # and nonzero + assert_raises(ValueError, qr_delete, q, r, 0, 0) + assert_raises(ValueError, qr_delete, q, r, 0, 0, 'col') + + # must have at least k+p rows or cols, depending. + assert_raises(ValueError, qr_delete, q, r, 3, q.shape[0]-2) + assert_raises(ValueError, qr_delete, q, r, 3, r.shape[1]-2, 'col') + + def test_empty_q(self): + a, q, r = self.generate('tall') + # same code path for 'row' and 'col' + assert_raises(ValueError, qr_delete, np.array([]), r, 0, 1) + + def test_empty_r(self): + a, q, r = self.generate('tall') + # same code path for 'row' and 'col' + assert_raises(ValueError, qr_delete, q, np.array([]), 0, 1) + + def test_mismatched_q_and_r(self): + a, q, r = self.generate('tall') + r = r[1:] + assert_raises(ValueError, qr_delete, q, r, 0, 1) + + def test_unsupported_dtypes(self): + dts = ['int8', 'int16', 'int32', 'int64', + 'uint8', 'uint16', 'uint32', 'uint64', + 'float16', 'longdouble', 'clongdouble', + 'bool'] + a, q0, r0 = self.generate('tall') + for dtype in dts: + q = q0.real.astype(dtype) + with np.errstate(invalid="ignore"): + r = r0.real.astype(dtype) + assert_raises(ValueError, qr_delete, q, r0, 0, 1, 'row') + assert_raises(ValueError, qr_delete, q, r0, 0, 2, 'row') + assert_raises(ValueError, qr_delete, q, r0, 0, 1, 'col') + assert_raises(ValueError, qr_delete, q, r0, 0, 2, 'col') + + assert_raises(ValueError, qr_delete, q0, r, 0, 1, 'row') + assert_raises(ValueError, qr_delete, q0, r, 0, 2, 'row') + assert_raises(ValueError, qr_delete, q0, r, 0, 1, 'col') + assert_raises(ValueError, qr_delete, q0, r, 0, 2, 'col') + + def test_check_finite(self): + a0, q0, r0 = self.generate('tall') + + q = q0.copy('F') + q[1,1] = np.nan + assert_raises(ValueError, qr_delete, q, r0, 0, 1, 'row') + assert_raises(ValueError, qr_delete, q, r0, 0, 3, 'row') + assert_raises(ValueError, qr_delete, q, r0, 0, 1, 'col') + assert_raises(ValueError, qr_delete, q, r0, 0, 3, 'col') + + r = r0.copy('F') + r[1,1] = np.nan + assert_raises(ValueError, qr_delete, q0, r, 0, 1, 'row') + assert_raises(ValueError, qr_delete, q0, r, 0, 3, 'row') + assert_raises(ValueError, qr_delete, q0, r, 0, 1, 'col') + assert_raises(ValueError, qr_delete, q0, r, 0, 3, 'col') + + def test_qr_scalar(self): + a, q, r = self.generate('1x1') + assert_raises(ValueError, qr_delete, q[0, 0], r, 0, 1, 'row') + assert_raises(ValueError, qr_delete, q, r[0, 0], 0, 1, 'row') + assert_raises(ValueError, qr_delete, q[0, 0], r, 0, 1, 'col') + assert_raises(ValueError, qr_delete, q, r[0, 0], 0, 1, 'col') + +class TestQRdelete_f(BaseQRdelete): + dtype = np.dtype('f') + +class TestQRdelete_F(BaseQRdelete): + dtype = np.dtype('F') + +class TestQRdelete_d(BaseQRdelete): + dtype = np.dtype('d') + +class TestQRdelete_D(BaseQRdelete): + dtype = np.dtype('D') + +class BaseQRinsert(BaseQRdeltas): + def generate(self, type, mode='full', which='row', p=1): + a, q, r = super().generate(type, mode) + + assert_(p > 0) + + # super call set the seed... + if which == 'row': + if p == 1: + u = np.random.random(a.shape[1]) + else: + u = np.random.random((p, a.shape[1])) + elif which == 'col': + if p == 1: + u = np.random.random(a.shape[0]) + else: + u = np.random.random((a.shape[0], p)) + else: + ValueError('which should be either "row" or "col"') + + if np.iscomplexobj(self.dtype.type(1)): + b = np.random.random(u.shape) + u = u + 1j * b + + u = u.astype(self.dtype) + return a, q, r, u + + def test_sqr_1_row(self): + a, q, r, u = self.generate('sqr', which='row') + for row in range(r.shape[0] + 1): + q1, r1 = qr_insert(q, r, u, row) + a1 = np.insert(a, row, u, 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_sqr_p_row(self): + # sqr + rows --> fat always + a, q, r, u = self.generate('sqr', which='row', p=3) + for row in range(r.shape[0] + 1): + q1, r1 = qr_insert(q, r, u, row) + a1 = np.insert(a, np.full(3, row, np.intp), u, 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_sqr_1_col(self): + a, q, r, u = self.generate('sqr', which='col') + for col in range(r.shape[1] + 1): + q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) + a1 = np.insert(a, col, u, 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_sqr_p_col(self): + # sqr + cols --> fat always + a, q, r, u = self.generate('sqr', which='col', p=3) + for col in range(r.shape[1] + 1): + q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) + a1 = np.insert(a, np.full(3, col, np.intp), u, 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_tall_1_row(self): + a, q, r, u = self.generate('tall', which='row') + for row in range(r.shape[0] + 1): + q1, r1 = qr_insert(q, r, u, row) + a1 = np.insert(a, row, u, 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_tall_p_row(self): + # tall + rows --> tall always + a, q, r, u = self.generate('tall', which='row', p=3) + for row in range(r.shape[0] + 1): + q1, r1 = qr_insert(q, r, u, row) + a1 = np.insert(a, np.full(3, row, np.intp), u, 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_tall_1_col(self): + a, q, r, u = self.generate('tall', which='col') + for col in range(r.shape[1] + 1): + q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) + a1 = np.insert(a, col, u, 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + # for column adds to tall matrices there are three cases to test + # tall + pcol --> tall + # tall + pcol --> sqr + # tall + pcol --> fat + def base_tall_p_col_xxx(self, p): + a, q, r, u = self.generate('tall', which='col', p=p) + for col in range(r.shape[1] + 1): + q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) + a1 = np.insert(a, np.full(p, col, np.intp), u, 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_tall_p_col_tall(self): + # 12x7 + 12x3 = 12x10 --> stays tall + self.base_tall_p_col_xxx(3) + + def test_tall_p_col_sqr(self): + # 12x7 + 12x5 = 12x12 --> becomes sqr + self.base_tall_p_col_xxx(5) + + def test_tall_p_col_fat(self): + # 12x7 + 12x7 = 12x14 --> becomes fat + self.base_tall_p_col_xxx(7) + + def test_fat_1_row(self): + a, q, r, u = self.generate('fat', which='row') + for row in range(r.shape[0] + 1): + q1, r1 = qr_insert(q, r, u, row) + a1 = np.insert(a, row, u, 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + # for row adds to fat matrices there are three cases to test + # fat + prow --> fat + # fat + prow --> sqr + # fat + prow --> tall + def base_fat_p_row_xxx(self, p): + a, q, r, u = self.generate('fat', which='row', p=p) + for row in range(r.shape[0] + 1): + q1, r1 = qr_insert(q, r, u, row) + a1 = np.insert(a, np.full(p, row, np.intp), u, 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_fat_p_row_fat(self): + # 7x12 + 3x12 = 10x12 --> stays fat + self.base_fat_p_row_xxx(3) + + def test_fat_p_row_sqr(self): + # 7x12 + 5x12 = 12x12 --> becomes sqr + self.base_fat_p_row_xxx(5) + + def test_fat_p_row_tall(self): + # 7x12 + 7x12 = 14x12 --> becomes tall + self.base_fat_p_row_xxx(7) + + def test_fat_1_col(self): + a, q, r, u = self.generate('fat', which='col') + for col in range(r.shape[1] + 1): + q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) + a1 = np.insert(a, col, u, 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_fat_p_col(self): + # fat + cols --> fat always + a, q, r, u = self.generate('fat', which='col', p=3) + for col in range(r.shape[1] + 1): + q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) + a1 = np.insert(a, np.full(3, col, np.intp), u, 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_economic_1_row(self): + a, q, r, u = self.generate('tall', 'economic', 'row') + for row in range(r.shape[0] + 1): + q1, r1 = qr_insert(q, r, u, row, overwrite_qru=False) + a1 = np.insert(a, row, u, 0) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + + def test_economic_p_row(self): + # tall + rows --> tall always + a, q, r, u = self.generate('tall', 'economic', 'row', 3) + for row in range(r.shape[0] + 1): + q1, r1 = qr_insert(q, r, u, row, overwrite_qru=False) + a1 = np.insert(a, np.full(3, row, np.intp), u, 0) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + + def test_economic_1_col(self): + a, q, r, u = self.generate('tall', 'economic', which='col') + for col in range(r.shape[1] + 1): + q1, r1 = qr_insert(q, r, u.copy(), col, 'col', overwrite_qru=False) + a1 = np.insert(a, col, u, 1) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + + def test_economic_1_col_bad_update(self): + # When the column to be added lies in the span of Q, the update is + # not meaningful. This is detected, and a LinAlgError is issued. + q = np.eye(5, 3, dtype=self.dtype) + r = np.eye(3, dtype=self.dtype) + u = np.array([1, 0, 0, 0, 0], self.dtype) + assert_raises(linalg.LinAlgError, qr_insert, q, r, u, 0, 'col') + + # for column adds to economic matrices there are three cases to test + # eco + pcol --> eco + # eco + pcol --> sqr + # eco + pcol --> fat + def base_economic_p_col_xxx(self, p): + a, q, r, u = self.generate('tall', 'economic', which='col', p=p) + for col in range(r.shape[1] + 1): + q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) + a1 = np.insert(a, np.full(p, col, np.intp), u, 1) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + + def test_economic_p_col_eco(self): + # 12x7 + 12x3 = 12x10 --> stays eco + self.base_economic_p_col_xxx(3) + + def test_economic_p_col_sqr(self): + # 12x7 + 12x5 = 12x12 --> becomes sqr + self.base_economic_p_col_xxx(5) + + def test_economic_p_col_fat(self): + # 12x7 + 12x7 = 12x14 --> becomes fat + self.base_economic_p_col_xxx(7) + + def test_Mx1_1_row(self): + a, q, r, u = self.generate('Mx1', which='row') + for row in range(r.shape[0] + 1): + q1, r1 = qr_insert(q, r, u, row) + a1 = np.insert(a, row, u, 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_Mx1_p_row(self): + a, q, r, u = self.generate('Mx1', which='row', p=3) + for row in range(r.shape[0] + 1): + q1, r1 = qr_insert(q, r, u, row) + a1 = np.insert(a, np.full(3, row, np.intp), u, 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_Mx1_1_col(self): + a, q, r, u = self.generate('Mx1', which='col') + for col in range(r.shape[1] + 1): + q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) + a1 = np.insert(a, col, u, 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_Mx1_p_col(self): + a, q, r, u = self.generate('Mx1', which='col', p=3) + for col in range(r.shape[1] + 1): + q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) + a1 = np.insert(a, np.full(3, col, np.intp), u, 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_Mx1_economic_1_row(self): + a, q, r, u = self.generate('Mx1', 'economic', 'row') + for row in range(r.shape[0] + 1): + q1, r1 = qr_insert(q, r, u, row) + a1 = np.insert(a, row, u, 0) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + + def test_Mx1_economic_p_row(self): + a, q, r, u = self.generate('Mx1', 'economic', 'row', 3) + for row in range(r.shape[0] + 1): + q1, r1 = qr_insert(q, r, u, row) + a1 = np.insert(a, np.full(3, row, np.intp), u, 0) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + + def test_Mx1_economic_1_col(self): + a, q, r, u = self.generate('Mx1', 'economic', 'col') + for col in range(r.shape[1] + 1): + q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) + a1 = np.insert(a, col, u, 1) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + + def test_Mx1_economic_p_col(self): + a, q, r, u = self.generate('Mx1', 'economic', 'col', 3) + for col in range(r.shape[1] + 1): + q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) + a1 = np.insert(a, np.full(3, col, np.intp), u, 1) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + + def test_1xN_1_row(self): + a, q, r, u = self.generate('1xN', which='row') + for row in range(r.shape[0] + 1): + q1, r1 = qr_insert(q, r, u, row) + a1 = np.insert(a, row, u, 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_1xN_p_row(self): + a, q, r, u = self.generate('1xN', which='row', p=3) + for row in range(r.shape[0] + 1): + q1, r1 = qr_insert(q, r, u, row) + a1 = np.insert(a, np.full(3, row, np.intp), u, 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_1xN_1_col(self): + a, q, r, u = self.generate('1xN', which='col') + for col in range(r.shape[1] + 1): + q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) + a1 = np.insert(a, col, u, 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_1xN_p_col(self): + a, q, r, u = self.generate('1xN', which='col', p=3) + for col in range(r.shape[1] + 1): + q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) + a1 = np.insert(a, np.full(3, col, np.intp), u, 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_1x1_1_row(self): + a, q, r, u = self.generate('1x1', which='row') + for row in range(r.shape[0] + 1): + q1, r1 = qr_insert(q, r, u, row) + a1 = np.insert(a, row, u, 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_1x1_p_row(self): + a, q, r, u = self.generate('1x1', which='row', p=3) + for row in range(r.shape[0] + 1): + q1, r1 = qr_insert(q, r, u, row) + a1 = np.insert(a, np.full(3, row, np.intp), u, 0) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_1x1_1_col(self): + a, q, r, u = self.generate('1x1', which='col') + for col in range(r.shape[1] + 1): + q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) + a1 = np.insert(a, col, u, 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_1x1_p_col(self): + a, q, r, u = self.generate('1x1', which='col', p=3) + for col in range(r.shape[1] + 1): + q1, r1 = qr_insert(q, r, u, col, 'col', overwrite_qru=False) + a1 = np.insert(a, np.full(3, col, np.intp), u, 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_1x1_1_scalar(self): + a, q, r, u = self.generate('1x1', which='row') + assert_raises(ValueError, qr_insert, q[0, 0], r, u, 0, 'row') + assert_raises(ValueError, qr_insert, q, r[0, 0], u, 0, 'row') + assert_raises(ValueError, qr_insert, q, r, u[0], 0, 'row') + + assert_raises(ValueError, qr_insert, q[0, 0], r, u, 0, 'col') + assert_raises(ValueError, qr_insert, q, r[0, 0], u, 0, 'col') + assert_raises(ValueError, qr_insert, q, r, u[0], 0, 'col') + + def base_non_simple_strides(self, adjust_strides, k, p, which): + for type in ['sqr', 'tall', 'fat']: + a, q0, r0, u0 = self.generate(type, which=which, p=p) + qs, rs, us = adjust_strides((q0, r0, u0)) + if p == 1: + ai = np.insert(a, k, u0, 0 if which == 'row' else 1) + else: + ai = np.insert(a, np.full(p, k, np.intp), + u0 if which == 'row' else u0, + 0 if which == 'row' else 1) + + # for each variable, q, r, u we try with it strided and + # overwrite=False. Then we try with overwrite=True. Nothing + # is checked to see if it can be overwritten, since only + # F ordered Q can be overwritten when adding columns. + + q = q0.copy('F') + r = r0.copy('F') + u = u0.copy('F') + q1, r1 = qr_insert(qs, r, u, k, which, overwrite_qru=False) + check_qr(q1, r1, ai, self.rtol, self.atol) + q1o, r1o = qr_insert(qs, r, u, k, which, overwrite_qru=True) + check_qr(q1o, r1o, ai, self.rtol, self.atol) + + q = q0.copy('F') + r = r0.copy('F') + u = u0.copy('F') + q2, r2 = qr_insert(q, rs, u, k, which, overwrite_qru=False) + check_qr(q2, r2, ai, self.rtol, self.atol) + q2o, r2o = qr_insert(q, rs, u, k, which, overwrite_qru=True) + check_qr(q2o, r2o, ai, self.rtol, self.atol) + + q = q0.copy('F') + r = r0.copy('F') + u = u0.copy('F') + q3, r3 = qr_insert(q, r, us, k, which, overwrite_qru=False) + check_qr(q3, r3, ai, self.rtol, self.atol) + q3o, r3o = qr_insert(q, r, us, k, which, overwrite_qru=True) + check_qr(q3o, r3o, ai, self.rtol, self.atol) + + q = q0.copy('F') + r = r0.copy('F') + u = u0.copy('F') + # since some of these were consumed above + qs, rs, us = adjust_strides((q, r, u)) + q5, r5 = qr_insert(qs, rs, us, k, which, overwrite_qru=False) + check_qr(q5, r5, ai, self.rtol, self.atol) + q5o, r5o = qr_insert(qs, rs, us, k, which, overwrite_qru=True) + check_qr(q5o, r5o, ai, self.rtol, self.atol) + + def test_non_unit_strides_1_row(self): + self.base_non_simple_strides(make_strided, 0, 1, 'row') + + def test_non_unit_strides_p_row(self): + self.base_non_simple_strides(make_strided, 0, 3, 'row') + + def test_non_unit_strides_1_col(self): + self.base_non_simple_strides(make_strided, 0, 1, 'col') + + def test_non_unit_strides_p_col(self): + self.base_non_simple_strides(make_strided, 0, 3, 'col') + + def test_neg_strides_1_row(self): + self.base_non_simple_strides(negate_strides, 0, 1, 'row') + + def test_neg_strides_p_row(self): + self.base_non_simple_strides(negate_strides, 0, 3, 'row') + + def test_neg_strides_1_col(self): + self.base_non_simple_strides(negate_strides, 0, 1, 'col') + + def test_neg_strides_p_col(self): + self.base_non_simple_strides(negate_strides, 0, 3, 'col') + + def test_non_itemsize_strides_1_row(self): + self.base_non_simple_strides(nonitemsize_strides, 0, 1, 'row') + + def test_non_itemsize_strides_p_row(self): + self.base_non_simple_strides(nonitemsize_strides, 0, 3, 'row') + + def test_non_itemsize_strides_1_col(self): + self.base_non_simple_strides(nonitemsize_strides, 0, 1, 'col') + + def test_non_itemsize_strides_p_col(self): + self.base_non_simple_strides(nonitemsize_strides, 0, 3, 'col') + + def test_non_native_byte_order_1_row(self): + self.base_non_simple_strides(make_nonnative, 0, 1, 'row') + + def test_non_native_byte_order_p_row(self): + self.base_non_simple_strides(make_nonnative, 0, 3, 'row') + + def test_non_native_byte_order_1_col(self): + self.base_non_simple_strides(make_nonnative, 0, 1, 'col') + + def test_non_native_byte_order_p_col(self): + self.base_non_simple_strides(make_nonnative, 0, 3, 'col') + + def test_overwrite_qu_rank_1(self): + # when inserting rows, the size of both Q and R change, so only + # column inserts can overwrite q. Only complex column inserts + # with C ordered Q overwrite u. Any contiguous Q is overwritten + # when inserting 1 column + a, q0, r, u, = self.generate('sqr', which='col', p=1) + q = q0.copy('C') + u0 = u.copy() + # don't overwrite + q1, r1 = qr_insert(q, r, u, 0, 'col', overwrite_qru=False) + a1 = np.insert(a, 0, u0, 1) + check_qr(q1, r1, a1, self.rtol, self.atol) + check_qr(q, r, a, self.rtol, self.atol) + + # try overwriting + q2, r2 = qr_insert(q, r, u, 0, 'col', overwrite_qru=True) + check_qr(q2, r2, a1, self.rtol, self.atol) + # verify the overwriting + assert_allclose(q2, q, rtol=self.rtol, atol=self.atol) + assert_allclose(u, u0.conj(), self.rtol, self.atol) + + # now try with a fortran ordered Q + qF = q0.copy('F') + u1 = u0.copy() + q3, r3 = qr_insert(qF, r, u1, 0, 'col', overwrite_qru=False) + check_qr(q3, r3, a1, self.rtol, self.atol) + check_qr(qF, r, a, self.rtol, self.atol) + + # try overwriting + q4, r4 = qr_insert(qF, r, u1, 0, 'col', overwrite_qru=True) + check_qr(q4, r4, a1, self.rtol, self.atol) + assert_allclose(q4, qF, rtol=self.rtol, atol=self.atol) + + def test_overwrite_qu_rank_p(self): + # when inserting rows, the size of both Q and R change, so only + # column inserts can potentially overwrite Q. In practice, only + # F ordered Q are overwritten with a rank p update. + a, q0, r, u, = self.generate('sqr', which='col', p=3) + q = q0.copy('F') + a1 = np.insert(a, np.zeros(3, np.intp), u, 1) + + # don't overwrite + q1, r1 = qr_insert(q, r, u, 0, 'col', overwrite_qru=False) + check_qr(q1, r1, a1, self.rtol, self.atol) + check_qr(q, r, a, self.rtol, self.atol) + + # try overwriting + q2, r2 = qr_insert(q, r, u, 0, 'col', overwrite_qru=True) + check_qr(q2, r2, a1, self.rtol, self.atol) + assert_allclose(q2, q, rtol=self.rtol, atol=self.atol) + + def test_empty_inputs(self): + a, q, r, u = self.generate('sqr', which='row') + assert_raises(ValueError, qr_insert, np.array([]), r, u, 0, 'row') + assert_raises(ValueError, qr_insert, q, np.array([]), u, 0, 'row') + assert_raises(ValueError, qr_insert, q, r, np.array([]), 0, 'row') + assert_raises(ValueError, qr_insert, np.array([]), r, u, 0, 'col') + assert_raises(ValueError, qr_insert, q, np.array([]), u, 0, 'col') + assert_raises(ValueError, qr_insert, q, r, np.array([]), 0, 'col') + + def test_mismatched_shapes(self): + a, q, r, u = self.generate('tall', which='row') + assert_raises(ValueError, qr_insert, q, r[1:], u, 0, 'row') + assert_raises(ValueError, qr_insert, q[:-2], r, u, 0, 'row') + assert_raises(ValueError, qr_insert, q, r, u[1:], 0, 'row') + assert_raises(ValueError, qr_insert, q, r[1:], u, 0, 'col') + assert_raises(ValueError, qr_insert, q[:-2], r, u, 0, 'col') + assert_raises(ValueError, qr_insert, q, r, u[1:], 0, 'col') + + def test_unsupported_dtypes(self): + dts = ['int8', 'int16', 'int32', 'int64', + 'uint8', 'uint16', 'uint32', 'uint64', + 'float16', 'longdouble', 'clongdouble', + 'bool'] + a, q0, r0, u0 = self.generate('sqr', which='row') + for dtype in dts: + q = q0.real.astype(dtype) + with np.errstate(invalid="ignore"): + r = r0.real.astype(dtype) + u = u0.real.astype(dtype) + assert_raises(ValueError, qr_insert, q, r0, u0, 0, 'row') + assert_raises(ValueError, qr_insert, q, r0, u0, 0, 'col') + assert_raises(ValueError, qr_insert, q0, r, u0, 0, 'row') + assert_raises(ValueError, qr_insert, q0, r, u0, 0, 'col') + assert_raises(ValueError, qr_insert, q0, r0, u, 0, 'row') + assert_raises(ValueError, qr_insert, q0, r0, u, 0, 'col') + + def test_check_finite(self): + a0, q0, r0, u0 = self.generate('sqr', which='row', p=3) + + q = q0.copy('F') + q[1,1] = np.nan + assert_raises(ValueError, qr_insert, q, r0, u0[:,0], 0, 'row') + assert_raises(ValueError, qr_insert, q, r0, u0, 0, 'row') + assert_raises(ValueError, qr_insert, q, r0, u0[:,0], 0, 'col') + assert_raises(ValueError, qr_insert, q, r0, u0, 0, 'col') + + r = r0.copy('F') + r[1,1] = np.nan + assert_raises(ValueError, qr_insert, q0, r, u0[:,0], 0, 'row') + assert_raises(ValueError, qr_insert, q0, r, u0, 0, 'row') + assert_raises(ValueError, qr_insert, q0, r, u0[:,0], 0, 'col') + assert_raises(ValueError, qr_insert, q0, r, u0, 0, 'col') + + u = u0.copy('F') + u[0,0] = np.nan + assert_raises(ValueError, qr_insert, q0, r0, u[:,0], 0, 'row') + assert_raises(ValueError, qr_insert, q0, r0, u, 0, 'row') + assert_raises(ValueError, qr_insert, q0, r0, u[:,0], 0, 'col') + assert_raises(ValueError, qr_insert, q0, r0, u, 0, 'col') + +class TestQRinsert_f(BaseQRinsert): + dtype = np.dtype('f') + +class TestQRinsert_F(BaseQRinsert): + dtype = np.dtype('F') + +class TestQRinsert_d(BaseQRinsert): + dtype = np.dtype('d') + +class TestQRinsert_D(BaseQRinsert): + dtype = np.dtype('D') + +class BaseQRupdate(BaseQRdeltas): + def generate(self, type, mode='full', p=1): + a, q, r = super().generate(type, mode) + + # super call set the seed... + if p == 1: + u = np.random.random(q.shape[0]) + v = np.random.random(r.shape[1]) + else: + u = np.random.random((q.shape[0], p)) + v = np.random.random((r.shape[1], p)) + + if np.iscomplexobj(self.dtype.type(1)): + b = np.random.random(u.shape) + u = u + 1j * b + + c = np.random.random(v.shape) + v = v + 1j * c + + u = u.astype(self.dtype) + v = v.astype(self.dtype) + return a, q, r, u, v + + def test_sqr_rank_1(self): + a, q, r, u, v = self.generate('sqr') + q1, r1 = qr_update(q, r, u, v, False) + a1 = a + np.outer(u, v.conj()) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_sqr_rank_p(self): + # test ndim = 2, rank 1 updates here too + for p in [1, 2, 3, 5]: + a, q, r, u, v = self.generate('sqr', p=p) + if p == 1: + u = u.reshape(u.size, 1) + v = v.reshape(v.size, 1) + q1, r1 = qr_update(q, r, u, v, False) + a1 = a + np.dot(u, v.T.conj()) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_tall_rank_1(self): + a, q, r, u, v = self.generate('tall') + q1, r1 = qr_update(q, r, u, v, False) + a1 = a + np.outer(u, v.conj()) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_tall_rank_p(self): + for p in [1, 2, 3, 5]: + a, q, r, u, v = self.generate('tall', p=p) + if p == 1: + u = u.reshape(u.size, 1) + v = v.reshape(v.size, 1) + q1, r1 = qr_update(q, r, u, v, False) + a1 = a + np.dot(u, v.T.conj()) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_fat_rank_1(self): + a, q, r, u, v = self.generate('fat') + q1, r1 = qr_update(q, r, u, v, False) + a1 = a + np.outer(u, v.conj()) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_fat_rank_p(self): + for p in [1, 2, 3, 5]: + a, q, r, u, v = self.generate('fat', p=p) + if p == 1: + u = u.reshape(u.size, 1) + v = v.reshape(v.size, 1) + q1, r1 = qr_update(q, r, u, v, False) + a1 = a + np.dot(u, v.T.conj()) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_economic_rank_1(self): + a, q, r, u, v = self.generate('tall', 'economic') + q1, r1 = qr_update(q, r, u, v, False) + a1 = a + np.outer(u, v.conj()) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + + def test_economic_rank_p(self): + for p in [1, 2, 3, 5]: + a, q, r, u, v = self.generate('tall', 'economic', p) + if p == 1: + u = u.reshape(u.size, 1) + v = v.reshape(v.size, 1) + q1, r1 = qr_update(q, r, u, v, False) + a1 = a + np.dot(u, v.T.conj()) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + + def test_Mx1_rank_1(self): + a, q, r, u, v = self.generate('Mx1') + q1, r1 = qr_update(q, r, u, v, False) + a1 = a + np.outer(u, v.conj()) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_Mx1_rank_p(self): + # when M or N == 1, only a rank 1 update is allowed. This isn't + # fundamental limitation, but the code does not support it. + a, q, r, u, v = self.generate('Mx1', p=1) + u = u.reshape(u.size, 1) + v = v.reshape(v.size, 1) + q1, r1 = qr_update(q, r, u, v, False) + a1 = a + np.dot(u, v.T.conj()) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_Mx1_economic_rank_1(self): + a, q, r, u, v = self.generate('Mx1', 'economic') + q1, r1 = qr_update(q, r, u, v, False) + a1 = a + np.outer(u, v.conj()) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + + def test_Mx1_economic_rank_p(self): + # when M or N == 1, only a rank 1 update is allowed. This isn't + # fundamental limitation, but the code does not support it. + a, q, r, u, v = self.generate('Mx1', 'economic', p=1) + u = u.reshape(u.size, 1) + v = v.reshape(v.size, 1) + q1, r1 = qr_update(q, r, u, v, False) + a1 = a + np.dot(u, v.T.conj()) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + + def test_1xN_rank_1(self): + a, q, r, u, v = self.generate('1xN') + q1, r1 = qr_update(q, r, u, v, False) + a1 = a + np.outer(u, v.conj()) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_1xN_rank_p(self): + # when M or N == 1, only a rank 1 update is allowed. This isn't + # fundamental limitation, but the code does not support it. + a, q, r, u, v = self.generate('1xN', p=1) + u = u.reshape(u.size, 1) + v = v.reshape(v.size, 1) + q1, r1 = qr_update(q, r, u, v, False) + a1 = a + np.dot(u, v.T.conj()) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_1x1_rank_1(self): + a, q, r, u, v = self.generate('1x1') + q1, r1 = qr_update(q, r, u, v, False) + a1 = a + np.outer(u, v.conj()) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_1x1_rank_p(self): + # when M or N == 1, only a rank 1 update is allowed. This isn't + # fundamental limitation, but the code does not support it. + a, q, r, u, v = self.generate('1x1', p=1) + u = u.reshape(u.size, 1) + v = v.reshape(v.size, 1) + q1, r1 = qr_update(q, r, u, v, False) + a1 = a + np.dot(u, v.T.conj()) + check_qr(q1, r1, a1, self.rtol, self.atol) + + def test_1x1_rank_1_scalar(self): + a, q, r, u, v = self.generate('1x1') + assert_raises(ValueError, qr_update, q[0, 0], r, u, v) + assert_raises(ValueError, qr_update, q, r[0, 0], u, v) + assert_raises(ValueError, qr_update, q, r, u[0], v) + assert_raises(ValueError, qr_update, q, r, u, v[0]) + + def base_non_simple_strides(self, adjust_strides, mode, p, overwriteable): + assert_sqr = False if mode == 'economic' else True + for type in ['sqr', 'tall', 'fat']: + a, q0, r0, u0, v0 = self.generate(type, mode, p) + qs, rs, us, vs = adjust_strides((q0, r0, u0, v0)) + if p == 1: + aup = a + np.outer(u0, v0.conj()) + else: + aup = a + np.dot(u0, v0.T.conj()) + + # for each variable, q, r, u, v we try with it strided and + # overwrite=False. Then we try with overwrite=True, and make + # sure that if p == 1, r and v are still overwritten. + # a strided q and u must always be copied. + + q = q0.copy('F') + r = r0.copy('F') + u = u0.copy('F') + v = v0.copy('C') + q1, r1 = qr_update(qs, r, u, v, False) + check_qr(q1, r1, aup, self.rtol, self.atol, assert_sqr) + q1o, r1o = qr_update(qs, r, u, v, True) + check_qr(q1o, r1o, aup, self.rtol, self.atol, assert_sqr) + if overwriteable: + assert_allclose(r1o, r, rtol=self.rtol, atol=self.atol) + assert_allclose(v, v0.conj(), rtol=self.rtol, atol=self.atol) + + q = q0.copy('F') + r = r0.copy('F') + u = u0.copy('F') + v = v0.copy('C') + q2, r2 = qr_update(q, rs, u, v, False) + check_qr(q2, r2, aup, self.rtol, self.atol, assert_sqr) + q2o, r2o = qr_update(q, rs, u, v, True) + check_qr(q2o, r2o, aup, self.rtol, self.atol, assert_sqr) + if overwriteable: + assert_allclose(r2o, rs, rtol=self.rtol, atol=self.atol) + assert_allclose(v, v0.conj(), rtol=self.rtol, atol=self.atol) + + q = q0.copy('F') + r = r0.copy('F') + u = u0.copy('F') + v = v0.copy('C') + q3, r3 = qr_update(q, r, us, v, False) + check_qr(q3, r3, aup, self.rtol, self.atol, assert_sqr) + q3o, r3o = qr_update(q, r, us, v, True) + check_qr(q3o, r3o, aup, self.rtol, self.atol, assert_sqr) + if overwriteable: + assert_allclose(r3o, r, rtol=self.rtol, atol=self.atol) + assert_allclose(v, v0.conj(), rtol=self.rtol, atol=self.atol) + + q = q0.copy('F') + r = r0.copy('F') + u = u0.copy('F') + v = v0.copy('C') + q4, r4 = qr_update(q, r, u, vs, False) + check_qr(q4, r4, aup, self.rtol, self.atol, assert_sqr) + q4o, r4o = qr_update(q, r, u, vs, True) + check_qr(q4o, r4o, aup, self.rtol, self.atol, assert_sqr) + if overwriteable: + assert_allclose(r4o, r, rtol=self.rtol, atol=self.atol) + assert_allclose(vs, v0.conj(), rtol=self.rtol, atol=self.atol) + + q = q0.copy('F') + r = r0.copy('F') + u = u0.copy('F') + v = v0.copy('C') + # since some of these were consumed above + qs, rs, us, vs = adjust_strides((q, r, u, v)) + q5, r5 = qr_update(qs, rs, us, vs, False) + check_qr(q5, r5, aup, self.rtol, self.atol, assert_sqr) + q5o, r5o = qr_update(qs, rs, us, vs, True) + check_qr(q5o, r5o, aup, self.rtol, self.atol, assert_sqr) + if overwriteable: + assert_allclose(r5o, rs, rtol=self.rtol, atol=self.atol) + assert_allclose(vs, v0.conj(), rtol=self.rtol, atol=self.atol) + + def test_non_unit_strides_rank_1(self): + self.base_non_simple_strides(make_strided, 'full', 1, True) + + def test_non_unit_strides_economic_rank_1(self): + self.base_non_simple_strides(make_strided, 'economic', 1, True) + + def test_non_unit_strides_rank_p(self): + self.base_non_simple_strides(make_strided, 'full', 3, False) + + def test_non_unit_strides_economic_rank_p(self): + self.base_non_simple_strides(make_strided, 'economic', 3, False) + + def test_neg_strides_rank_1(self): + self.base_non_simple_strides(negate_strides, 'full', 1, False) + + def test_neg_strides_economic_rank_1(self): + self.base_non_simple_strides(negate_strides, 'economic', 1, False) + + def test_neg_strides_rank_p(self): + self.base_non_simple_strides(negate_strides, 'full', 3, False) + + def test_neg_strides_economic_rank_p(self): + self.base_non_simple_strides(negate_strides, 'economic', 3, False) + + def test_non_itemsize_strides_rank_1(self): + self.base_non_simple_strides(nonitemsize_strides, 'full', 1, False) + + def test_non_itemsize_strides_economic_rank_1(self): + self.base_non_simple_strides(nonitemsize_strides, 'economic', 1, False) + + def test_non_itemsize_strides_rank_p(self): + self.base_non_simple_strides(nonitemsize_strides, 'full', 3, False) + + def test_non_itemsize_strides_economic_rank_p(self): + self.base_non_simple_strides(nonitemsize_strides, 'economic', 3, False) + + def test_non_native_byte_order_rank_1(self): + self.base_non_simple_strides(make_nonnative, 'full', 1, False) + + def test_non_native_byte_order_economic_rank_1(self): + self.base_non_simple_strides(make_nonnative, 'economic', 1, False) + + def test_non_native_byte_order_rank_p(self): + self.base_non_simple_strides(make_nonnative, 'full', 3, False) + + def test_non_native_byte_order_economic_rank_p(self): + self.base_non_simple_strides(make_nonnative, 'economic', 3, False) + + def test_overwrite_qruv_rank_1(self): + # Any positive strided q, r, u, and v can be overwritten for a rank 1 + # update, only checking C and F contiguous. + a, q0, r0, u0, v0 = self.generate('sqr') + a1 = a + np.outer(u0, v0.conj()) + q = q0.copy('F') + r = r0.copy('F') + u = u0.copy('F') + v = v0.copy('F') + + # don't overwrite + q1, r1 = qr_update(q, r, u, v, False) + check_qr(q1, r1, a1, self.rtol, self.atol) + check_qr(q, r, a, self.rtol, self.atol) + + q2, r2 = qr_update(q, r, u, v, True) + check_qr(q2, r2, a1, self.rtol, self.atol) + # verify the overwriting, no good way to check u and v. + assert_allclose(q2, q, rtol=self.rtol, atol=self.atol) + assert_allclose(r2, r, rtol=self.rtol, atol=self.atol) + + q = q0.copy('C') + r = r0.copy('C') + u = u0.copy('C') + v = v0.copy('C') + q3, r3 = qr_update(q, r, u, v, True) + check_qr(q3, r3, a1, self.rtol, self.atol) + assert_allclose(q3, q, rtol=self.rtol, atol=self.atol) + assert_allclose(r3, r, rtol=self.rtol, atol=self.atol) + + def test_overwrite_qruv_rank_1_economic(self): + # updating economic decompositions can overwrite any contiguous r, + # and positively strided r and u. V is only ever read. + # only checking C and F contiguous. + a, q0, r0, u0, v0 = self.generate('tall', 'economic') + a1 = a + np.outer(u0, v0.conj()) + q = q0.copy('F') + r = r0.copy('F') + u = u0.copy('F') + v = v0.copy('F') + + # don't overwrite + q1, r1 = qr_update(q, r, u, v, False) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + check_qr(q, r, a, self.rtol, self.atol, False) + + q2, r2 = qr_update(q, r, u, v, True) + check_qr(q2, r2, a1, self.rtol, self.atol, False) + # verify the overwriting, no good way to check u and v. + assert_allclose(q2, q, rtol=self.rtol, atol=self.atol) + assert_allclose(r2, r, rtol=self.rtol, atol=self.atol) + + q = q0.copy('C') + r = r0.copy('C') + u = u0.copy('C') + v = v0.copy('C') + q3, r3 = qr_update(q, r, u, v, True) + check_qr(q3, r3, a1, self.rtol, self.atol, False) + assert_allclose(q3, q, rtol=self.rtol, atol=self.atol) + assert_allclose(r3, r, rtol=self.rtol, atol=self.atol) + + def test_overwrite_qruv_rank_p(self): + # for rank p updates, q r must be F contiguous, v must be C (v.T --> F) + # and u can be C or F, but is only overwritten if Q is C and complex + a, q0, r0, u0, v0 = self.generate('sqr', p=3) + a1 = a + np.dot(u0, v0.T.conj()) + q = q0.copy('F') + r = r0.copy('F') + u = u0.copy('F') + v = v0.copy('C') + + # don't overwrite + q1, r1 = qr_update(q, r, u, v, False) + check_qr(q1, r1, a1, self.rtol, self.atol) + check_qr(q, r, a, self.rtol, self.atol) + + q2, r2 = qr_update(q, r, u, v, True) + check_qr(q2, r2, a1, self.rtol, self.atol) + # verify the overwriting, no good way to check u and v. + assert_allclose(q2, q, rtol=self.rtol, atol=self.atol) + assert_allclose(r2, r, rtol=self.rtol, atol=self.atol) + + def test_empty_inputs(self): + a, q, r, u, v = self.generate('tall') + assert_raises(ValueError, qr_update, np.array([]), r, u, v) + assert_raises(ValueError, qr_update, q, np.array([]), u, v) + assert_raises(ValueError, qr_update, q, r, np.array([]), v) + assert_raises(ValueError, qr_update, q, r, u, np.array([])) + + def test_mismatched_shapes(self): + a, q, r, u, v = self.generate('tall') + assert_raises(ValueError, qr_update, q, r[1:], u, v) + assert_raises(ValueError, qr_update, q[:-2], r, u, v) + assert_raises(ValueError, qr_update, q, r, u[1:], v) + assert_raises(ValueError, qr_update, q, r, u, v[1:]) + + def test_unsupported_dtypes(self): + dts = ['int8', 'int16', 'int32', 'int64', + 'uint8', 'uint16', 'uint32', 'uint64', + 'float16', 'longdouble', 'clongdouble', + 'bool'] + a, q0, r0, u0, v0 = self.generate('tall') + for dtype in dts: + q = q0.real.astype(dtype) + with np.errstate(invalid="ignore"): + r = r0.real.astype(dtype) + u = u0.real.astype(dtype) + v = v0.real.astype(dtype) + assert_raises(ValueError, qr_update, q, r0, u0, v0) + assert_raises(ValueError, qr_update, q0, r, u0, v0) + assert_raises(ValueError, qr_update, q0, r0, u, v0) + assert_raises(ValueError, qr_update, q0, r0, u0, v) + + def test_integer_input(self): + q = np.arange(16).reshape(4, 4) + r = q.copy() # doesn't matter + u = q[:, 0].copy() + v = r[0, :].copy() + assert_raises(ValueError, qr_update, q, r, u, v) + + def test_check_finite(self): + a0, q0, r0, u0, v0 = self.generate('tall', p=3) + + q = q0.copy('F') + q[1,1] = np.nan + assert_raises(ValueError, qr_update, q, r0, u0[:,0], v0[:,0]) + assert_raises(ValueError, qr_update, q, r0, u0, v0) + + r = r0.copy('F') + r[1,1] = np.nan + assert_raises(ValueError, qr_update, q0, r, u0[:,0], v0[:,0]) + assert_raises(ValueError, qr_update, q0, r, u0, v0) + + u = u0.copy('F') + u[0,0] = np.nan + assert_raises(ValueError, qr_update, q0, r0, u[:,0], v0[:,0]) + assert_raises(ValueError, qr_update, q0, r0, u, v0) + + v = v0.copy('F') + v[0,0] = np.nan + assert_raises(ValueError, qr_update, q0, r0, u[:,0], v[:,0]) + assert_raises(ValueError, qr_update, q0, r0, u, v) + + def test_economic_check_finite(self): + a0, q0, r0, u0, v0 = self.generate('tall', mode='economic', p=3) + + q = q0.copy('F') + q[1,1] = np.nan + assert_raises(ValueError, qr_update, q, r0, u0[:,0], v0[:,0]) + assert_raises(ValueError, qr_update, q, r0, u0, v0) + + r = r0.copy('F') + r[1,1] = np.nan + assert_raises(ValueError, qr_update, q0, r, u0[:,0], v0[:,0]) + assert_raises(ValueError, qr_update, q0, r, u0, v0) + + u = u0.copy('F') + u[0,0] = np.nan + assert_raises(ValueError, qr_update, q0, r0, u[:,0], v0[:,0]) + assert_raises(ValueError, qr_update, q0, r0, u, v0) + + v = v0.copy('F') + v[0,0] = np.nan + assert_raises(ValueError, qr_update, q0, r0, u[:,0], v[:,0]) + assert_raises(ValueError, qr_update, q0, r0, u, v) + + def test_u_exactly_in_span_q(self): + q = np.array([[0, 0], [0, 0], [1, 0], [0, 1]], self.dtype) + r = np.array([[1, 0], [0, 1]], self.dtype) + u = np.array([0, 0, 0, -1], self.dtype) + v = np.array([1, 2], self.dtype) + q1, r1 = qr_update(q, r, u, v) + a1 = np.dot(q, r) + np.outer(u, v.conj()) + check_qr(q1, r1, a1, self.rtol, self.atol, False) + +class TestQRupdate_f(BaseQRupdate): + dtype = np.dtype('f') + +class TestQRupdate_F(BaseQRupdate): + dtype = np.dtype('F') + +class TestQRupdate_d(BaseQRupdate): + dtype = np.dtype('d') + +class TestQRupdate_D(BaseQRupdate): + dtype = np.dtype('D') + +def test_form_qTu(): + # We want to ensure that all of the code paths through this function are + # tested. Most of them should be hit with the rest of test suite, but + # explicit tests make clear precisely what is being tested. + # + # This function expects that Q is either C or F contiguous and square. + # Economic mode decompositions (Q is (M, N), M != N) do not go through this + # function. U may have any positive strides. + # + # Some of these test are duplicates, since contiguous 1d arrays are both C + # and F. + + q_order = ['F', 'C'] + q_shape = [(8, 8), ] + u_order = ['F', 'C', 'A'] # here A means is not F not C + u_shape = [1, 3] + dtype = ['f', 'd', 'F', 'D'] + + for qo, qs, uo, us, d in \ + itertools.product(q_order, q_shape, u_order, u_shape, dtype): + if us == 1: + check_form_qTu(qo, qs, uo, us, 1, d) + check_form_qTu(qo, qs, uo, us, 2, d) + else: + check_form_qTu(qo, qs, uo, us, 2, d) + +def check_form_qTu(q_order, q_shape, u_order, u_shape, u_ndim, dtype): + np.random.seed(47) + if u_shape == 1 and u_ndim == 1: + u_shape = (q_shape[0],) + else: + u_shape = (q_shape[0], u_shape) + dtype = np.dtype(dtype) + + if dtype.char in 'fd': + q = np.random.random(q_shape) + u = np.random.random(u_shape) + elif dtype.char in 'FD': + q = np.random.random(q_shape) + 1j*np.random.random(q_shape) + u = np.random.random(u_shape) + 1j*np.random.random(u_shape) + else: + ValueError("form_qTu doesn't support this dtype") + + q = np.require(q, dtype, q_order) + if u_order != 'A': + u = np.require(u, dtype, u_order) + else: + u, = make_strided((u.astype(dtype),)) + + rtol = 10.0 ** -(np.finfo(dtype).precision-2) + atol = 2*np.finfo(dtype).eps + + expected = np.dot(q.T.conj(), u) + res = _decomp_update._form_qTu(q, u) + assert_allclose(res, expected, rtol=rtol, atol=atol) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_fblas.py b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_fblas.py new file mode 100644 index 0000000000000000000000000000000000000000..7c5ada830043af0eecb6d04bf39aef13d29d777c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_fblas.py @@ -0,0 +1,607 @@ +# Test interfaces to fortran blas. +# +# The tests are more of interface than they are of the underlying blas. +# Only very small matrices checked -- N=3 or so. +# +# !! Complex calculations really aren't checked that carefully. +# !! Only real valued complex numbers are used in tests. + +from numpy import float32, float64, complex64, complex128, arange, array, \ + zeros, shape, transpose, newaxis, common_type, conjugate + +from scipy.linalg import _fblas as fblas + +from numpy.testing import assert_array_equal, \ + assert_allclose, assert_array_almost_equal, assert_ + +import pytest + +# decimal accuracy to require between Python and LAPACK/BLAS calculations +accuracy = 5 + +# Since numpy.dot likely uses the same blas, use this routine +# to check. + + +def matrixmultiply(a, b): + if len(b.shape) == 1: + b_is_vector = True + b = b[:, newaxis] + else: + b_is_vector = False + assert_(a.shape[1] == b.shape[0]) + c = zeros((a.shape[0], b.shape[1]), common_type(a, b)) + for i in range(a.shape[0]): + for j in range(b.shape[1]): + s = 0 + for k in range(a.shape[1]): + s += a[i, k] * b[k, j] + c[i, j] = s + if b_is_vector: + c = c.reshape((a.shape[0],)) + return c + +################################################## +# Test blas ?axpy + + +class BaseAxpy: + ''' Mixin class for axpy tests ''' + + def test_default_a(self): + x = arange(3., dtype=self.dtype) + y = arange(3., dtype=x.dtype) + real_y = x*1.+y + y = self.blas_func(x, y) + assert_array_equal(real_y, y) + + def test_simple(self): + x = arange(3., dtype=self.dtype) + y = arange(3., dtype=x.dtype) + real_y = x*3.+y + y = self.blas_func(x, y, a=3.) + assert_array_equal(real_y, y) + + def test_x_stride(self): + x = arange(6., dtype=self.dtype) + y = zeros(3, x.dtype) + y = arange(3., dtype=x.dtype) + real_y = x[::2]*3.+y + y = self.blas_func(x, y, a=3., n=3, incx=2) + assert_array_equal(real_y, y) + + def test_y_stride(self): + x = arange(3., dtype=self.dtype) + y = zeros(6, x.dtype) + real_y = x*3.+y[::2] + y = self.blas_func(x, y, a=3., n=3, incy=2) + assert_array_equal(real_y, y[::2]) + + def test_x_and_y_stride(self): + x = arange(12., dtype=self.dtype) + y = zeros(6, x.dtype) + real_y = x[::4]*3.+y[::2] + y = self.blas_func(x, y, a=3., n=3, incx=4, incy=2) + assert_array_equal(real_y, y[::2]) + + def test_x_bad_size(self): + x = arange(12., dtype=self.dtype) + y = zeros(6, x.dtype) + with pytest.raises(Exception, match='failed for 1st keyword'): + self.blas_func(x, y, n=4, incx=5) + + def test_y_bad_size(self): + x = arange(12., dtype=self.dtype) + y = zeros(6, x.dtype) + with pytest.raises(Exception, match='failed for 1st keyword'): + self.blas_func(x, y, n=3, incy=5) + + +try: + class TestSaxpy(BaseAxpy): + blas_func = fblas.saxpy + dtype = float32 +except AttributeError: + class TestSaxpy: + pass + + +class TestDaxpy(BaseAxpy): + blas_func = fblas.daxpy + dtype = float64 + + +try: + class TestCaxpy(BaseAxpy): + blas_func = fblas.caxpy + dtype = complex64 +except AttributeError: + class TestCaxpy: + pass + + +class TestZaxpy(BaseAxpy): + blas_func = fblas.zaxpy + dtype = complex128 + + +################################################## +# Test blas ?scal + +class BaseScal: + ''' Mixin class for scal testing ''' + + def test_simple(self): + x = arange(3., dtype=self.dtype) + real_x = x*3. + x = self.blas_func(3., x) + assert_array_equal(real_x, x) + + def test_x_stride(self): + x = arange(6., dtype=self.dtype) + real_x = x.copy() + real_x[::2] = x[::2]*array(3., self.dtype) + x = self.blas_func(3., x, n=3, incx=2) + assert_array_equal(real_x, x) + + def test_x_bad_size(self): + x = arange(12., dtype=self.dtype) + with pytest.raises(Exception, match='failed for 1st keyword'): + self.blas_func(2., x, n=4, incx=5) + + +try: + class TestSscal(BaseScal): + blas_func = fblas.sscal + dtype = float32 +except AttributeError: + class TestSscal: + pass + + +class TestDscal(BaseScal): + blas_func = fblas.dscal + dtype = float64 + + +try: + class TestCscal(BaseScal): + blas_func = fblas.cscal + dtype = complex64 +except AttributeError: + class TestCscal: + pass + + +class TestZscal(BaseScal): + blas_func = fblas.zscal + dtype = complex128 + + +################################################## +# Test blas ?copy + +class BaseCopy: + ''' Mixin class for copy testing ''' + + def test_simple(self): + x = arange(3., dtype=self.dtype) + y = zeros(shape(x), x.dtype) + y = self.blas_func(x, y) + assert_array_equal(x, y) + + def test_x_stride(self): + x = arange(6., dtype=self.dtype) + y = zeros(3, x.dtype) + y = self.blas_func(x, y, n=3, incx=2) + assert_array_equal(x[::2], y) + + def test_y_stride(self): + x = arange(3., dtype=self.dtype) + y = zeros(6, x.dtype) + y = self.blas_func(x, y, n=3, incy=2) + assert_array_equal(x, y[::2]) + + def test_x_and_y_stride(self): + x = arange(12., dtype=self.dtype) + y = zeros(6, x.dtype) + y = self.blas_func(x, y, n=3, incx=4, incy=2) + assert_array_equal(x[::4], y[::2]) + + def test_x_bad_size(self): + x = arange(12., dtype=self.dtype) + y = zeros(6, x.dtype) + with pytest.raises(Exception, match='failed for 1st keyword'): + self.blas_func(x, y, n=4, incx=5) + + def test_y_bad_size(self): + x = arange(12., dtype=self.dtype) + y = zeros(6, x.dtype) + with pytest.raises(Exception, match='failed for 1st keyword'): + self.blas_func(x, y, n=3, incy=5) + + # def test_y_bad_type(self): + ## Hmmm. Should this work? What should be the output. + # x = arange(3.,dtype=self.dtype) + # y = zeros(shape(x)) + # self.blas_func(x,y) + # assert_array_equal(x,y) + + +try: + class TestScopy(BaseCopy): + blas_func = fblas.scopy + dtype = float32 +except AttributeError: + class TestScopy: + pass + + +class TestDcopy(BaseCopy): + blas_func = fblas.dcopy + dtype = float64 + + +try: + class TestCcopy(BaseCopy): + blas_func = fblas.ccopy + dtype = complex64 +except AttributeError: + class TestCcopy: + pass + + +class TestZcopy(BaseCopy): + blas_func = fblas.zcopy + dtype = complex128 + + +################################################## +# Test blas ?swap + +class BaseSwap: + ''' Mixin class for swap tests ''' + + def test_simple(self): + x = arange(3., dtype=self.dtype) + y = zeros(shape(x), x.dtype) + desired_x = y.copy() + desired_y = x.copy() + x, y = self.blas_func(x, y) + assert_array_equal(desired_x, x) + assert_array_equal(desired_y, y) + + def test_x_stride(self): + x = arange(6., dtype=self.dtype) + y = zeros(3, x.dtype) + desired_x = y.copy() + desired_y = x.copy()[::2] + x, y = self.blas_func(x, y, n=3, incx=2) + assert_array_equal(desired_x, x[::2]) + assert_array_equal(desired_y, y) + + def test_y_stride(self): + x = arange(3., dtype=self.dtype) + y = zeros(6, x.dtype) + desired_x = y.copy()[::2] + desired_y = x.copy() + x, y = self.blas_func(x, y, n=3, incy=2) + assert_array_equal(desired_x, x) + assert_array_equal(desired_y, y[::2]) + + def test_x_and_y_stride(self): + x = arange(12., dtype=self.dtype) + y = zeros(6, x.dtype) + desired_x = y.copy()[::2] + desired_y = x.copy()[::4] + x, y = self.blas_func(x, y, n=3, incx=4, incy=2) + assert_array_equal(desired_x, x[::4]) + assert_array_equal(desired_y, y[::2]) + + def test_x_bad_size(self): + x = arange(12., dtype=self.dtype) + y = zeros(6, x.dtype) + with pytest.raises(Exception, match='failed for 1st keyword'): + self.blas_func(x, y, n=4, incx=5) + + def test_y_bad_size(self): + x = arange(12., dtype=self.dtype) + y = zeros(6, x.dtype) + with pytest.raises(Exception, match='failed for 1st keyword'): + self.blas_func(x, y, n=3, incy=5) + + +try: + class TestSswap(BaseSwap): + blas_func = fblas.sswap + dtype = float32 +except AttributeError: + class TestSswap: + pass + + +class TestDswap(BaseSwap): + blas_func = fblas.dswap + dtype = float64 + + +try: + class TestCswap(BaseSwap): + blas_func = fblas.cswap + dtype = complex64 +except AttributeError: + class TestCswap: + pass + + +class TestZswap(BaseSwap): + blas_func = fblas.zswap + dtype = complex128 + +################################################## +# Test blas ?gemv +# This will be a mess to test all cases. + + +class BaseGemv: + ''' Mixin class for gemv tests ''' + + def get_data(self, x_stride=1, y_stride=1): + mult = array(1, dtype=self.dtype) + if self.dtype in [complex64, complex128]: + mult = array(1+1j, dtype=self.dtype) + from numpy.random import normal, seed + seed(1234) + alpha = array(1., dtype=self.dtype) * mult + beta = array(1., dtype=self.dtype) * mult + a = normal(0., 1., (3, 3)).astype(self.dtype) * mult + x = arange(shape(a)[0]*x_stride, dtype=self.dtype) * mult + y = arange(shape(a)[1]*y_stride, dtype=self.dtype) * mult + return alpha, beta, a, x, y + + def test_simple(self): + alpha, beta, a, x, y = self.get_data() + desired_y = alpha*matrixmultiply(a, x)+beta*y + y = self.blas_func(alpha, a, x, beta, y) + assert_array_almost_equal(desired_y, y) + + def test_default_beta_y(self): + alpha, beta, a, x, y = self.get_data() + desired_y = matrixmultiply(a, x) + y = self.blas_func(1, a, x) + assert_array_almost_equal(desired_y, y) + + def test_simple_transpose(self): + alpha, beta, a, x, y = self.get_data() + desired_y = alpha*matrixmultiply(transpose(a), x)+beta*y + y = self.blas_func(alpha, a, x, beta, y, trans=1) + assert_array_almost_equal(desired_y, y) + + def test_simple_transpose_conj(self): + alpha, beta, a, x, y = self.get_data() + desired_y = alpha*matrixmultiply(transpose(conjugate(a)), x)+beta*y + y = self.blas_func(alpha, a, x, beta, y, trans=2) + assert_array_almost_equal(desired_y, y) + + def test_x_stride(self): + alpha, beta, a, x, y = self.get_data(x_stride=2) + desired_y = alpha*matrixmultiply(a, x[::2])+beta*y + y = self.blas_func(alpha, a, x, beta, y, incx=2) + assert_array_almost_equal(desired_y, y) + + def test_x_stride_transpose(self): + alpha, beta, a, x, y = self.get_data(x_stride=2) + desired_y = alpha*matrixmultiply(transpose(a), x[::2])+beta*y + y = self.blas_func(alpha, a, x, beta, y, trans=1, incx=2) + assert_array_almost_equal(desired_y, y) + + def test_x_stride_assert(self): + # What is the use of this test? + alpha, beta, a, x, y = self.get_data(x_stride=2) + with pytest.raises(Exception, match='failed for 3rd argument'): + y = self.blas_func(1, a, x, 1, y, trans=0, incx=3) + with pytest.raises(Exception, match='failed for 3rd argument'): + y = self.blas_func(1, a, x, 1, y, trans=1, incx=3) + + def test_y_stride(self): + alpha, beta, a, x, y = self.get_data(y_stride=2) + desired_y = y.copy() + desired_y[::2] = alpha*matrixmultiply(a, x)+beta*y[::2] + y = self.blas_func(alpha, a, x, beta, y, incy=2) + assert_array_almost_equal(desired_y, y) + + def test_y_stride_transpose(self): + alpha, beta, a, x, y = self.get_data(y_stride=2) + desired_y = y.copy() + desired_y[::2] = alpha*matrixmultiply(transpose(a), x)+beta*y[::2] + y = self.blas_func(alpha, a, x, beta, y, trans=1, incy=2) + assert_array_almost_equal(desired_y, y) + + def test_y_stride_assert(self): + # What is the use of this test? + alpha, beta, a, x, y = self.get_data(y_stride=2) + with pytest.raises(Exception, match='failed for 2nd keyword'): + y = self.blas_func(1, a, x, 1, y, trans=0, incy=3) + with pytest.raises(Exception, match='failed for 2nd keyword'): + y = self.blas_func(1, a, x, 1, y, trans=1, incy=3) + + +try: + class TestSgemv(BaseGemv): + blas_func = fblas.sgemv + dtype = float32 + + def test_sgemv_on_osx(self): + from itertools import product + import sys + import numpy as np + + if sys.platform != 'darwin': + return + + def aligned_array(shape, align, dtype, order='C'): + # Make array shape `shape` with aligned at `align` bytes + d = dtype() + # Make array of correct size with `align` extra bytes + N = np.prod(shape) + tmp = np.zeros(N * d.nbytes + align, dtype=np.uint8) + address = tmp.__array_interface__["data"][0] + # Find offset into array giving desired alignment + for offset in range(align): + if (address + offset) % align == 0: + break + tmp = tmp[offset:offset+N*d.nbytes].view(dtype=dtype) + return tmp.reshape(shape, order=order) + + def as_aligned(arr, align, dtype, order='C'): + # Copy `arr` into an aligned array with same shape + aligned = aligned_array(arr.shape, align, dtype, order) + aligned[:] = arr[:] + return aligned + + def assert_dot_close(A, X, desired): + assert_allclose(self.blas_func(1.0, A, X), desired, + rtol=1e-5, atol=1e-7) + + testdata = product((15, 32), (10000,), (200, 89), ('C', 'F')) + for align, m, n, a_order in testdata: + A_d = np.random.rand(m, n) + X_d = np.random.rand(n) + desired = np.dot(A_d, X_d) + # Calculation with aligned single precision + A_f = as_aligned(A_d, align, np.float32, order=a_order) + X_f = as_aligned(X_d, align, np.float32, order=a_order) + assert_dot_close(A_f, X_f, desired) + +except AttributeError: + class TestSgemv: + pass + + +class TestDgemv(BaseGemv): + blas_func = fblas.dgemv + dtype = float64 + + +try: + class TestCgemv(BaseGemv): + blas_func = fblas.cgemv + dtype = complex64 +except AttributeError: + class TestCgemv: + pass + + +class TestZgemv(BaseGemv): + blas_func = fblas.zgemv + dtype = complex128 + + +""" +################################################## +### Test blas ?ger +### This will be a mess to test all cases. + +class BaseGer: + def get_data(self,x_stride=1,y_stride=1): + from numpy.random import normal, seed + seed(1234) + alpha = array(1., dtype = self.dtype) + a = normal(0.,1.,(3,3)).astype(self.dtype) + x = arange(shape(a)[0]*x_stride,dtype=self.dtype) + y = arange(shape(a)[1]*y_stride,dtype=self.dtype) + return alpha,a,x,y + def test_simple(self): + alpha,a,x,y = self.get_data() + # transpose takes care of Fortran vs. C(and Python) memory layout + desired_a = alpha*transpose(x[:,newaxis]*y) + a + self.blas_func(x,y,a) + assert_array_almost_equal(desired_a,a) + def test_x_stride(self): + alpha,a,x,y = self.get_data(x_stride=2) + desired_a = alpha*transpose(x[::2,newaxis]*y) + a + self.blas_func(x,y,a,incx=2) + assert_array_almost_equal(desired_a,a) + def test_x_stride_assert(self): + alpha,a,x,y = self.get_data(x_stride=2) + with pytest.raises(ValueError, match='foo'): + self.blas_func(x,y,a,incx=3) + def test_y_stride(self): + alpha,a,x,y = self.get_data(y_stride=2) + desired_a = alpha*transpose(x[:,newaxis]*y[::2]) + a + self.blas_func(x,y,a,incy=2) + assert_array_almost_equal(desired_a,a) + + def test_y_stride_assert(self): + alpha,a,x,y = self.get_data(y_stride=2) + with pytest.raises(ValueError, match='foo'): + self.blas_func(a,x,y,incy=3) + +class TestSger(BaseGer): + blas_func = fblas.sger + dtype = float32 +class TestDger(BaseGer): + blas_func = fblas.dger + dtype = float64 +""" +################################################## +# Test blas ?gerc +# This will be a mess to test all cases. + +""" +class BaseGerComplex(BaseGer): + def get_data(self,x_stride=1,y_stride=1): + from numpy.random import normal, seed + seed(1234) + alpha = array(1+1j, dtype = self.dtype) + a = normal(0.,1.,(3,3)).astype(self.dtype) + a = a + normal(0.,1.,(3,3)) * array(1j, dtype = self.dtype) + x = normal(0.,1.,shape(a)[0]*x_stride).astype(self.dtype) + x = x + x * array(1j, dtype = self.dtype) + y = normal(0.,1.,shape(a)[1]*y_stride).astype(self.dtype) + y = y + y * array(1j, dtype = self.dtype) + return alpha,a,x,y + def test_simple(self): + alpha,a,x,y = self.get_data() + # transpose takes care of Fortran vs. C(and Python) memory layout + a = a * array(0.,dtype = self.dtype) + #desired_a = alpha*transpose(x[:,newaxis]*self.transform(y)) + a + desired_a = alpha*transpose(x[:,newaxis]*y) + a + #self.blas_func(x,y,a,alpha = alpha) + fblas.cgeru(x,y,a,alpha = alpha) + assert_array_almost_equal(desired_a,a) + + #def test_x_stride(self): + # alpha,a,x,y = self.get_data(x_stride=2) + # desired_a = alpha*transpose(x[::2,newaxis]*self.transform(y)) + a + # self.blas_func(x,y,a,incx=2) + # assert_array_almost_equal(desired_a,a) + #def test_y_stride(self): + # alpha,a,x,y = self.get_data(y_stride=2) + # desired_a = alpha*transpose(x[:,newaxis]*self.transform(y[::2])) + a + # self.blas_func(x,y,a,incy=2) + # assert_array_almost_equal(desired_a,a) + +class TestCgeru(BaseGerComplex): + blas_func = fblas.cgeru + dtype = complex64 + def transform(self,x): + return x +class TestZgeru(BaseGerComplex): + blas_func = fblas.zgeru + dtype = complex128 + def transform(self,x): + return x + +class TestCgerc(BaseGerComplex): + blas_func = fblas.cgerc + dtype = complex64 + def transform(self,x): + return conjugate(x) + +class TestZgerc(BaseGerComplex): + blas_func = fblas.zgerc + dtype = complex128 + def transform(self,x): + return conjugate(x) +""" diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_interpolative.py b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_interpolative.py new file mode 100644 index 0000000000000000000000000000000000000000..ddc56f7c7366fe2bf8967a722bcdd0f32dd19036 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_interpolative.py @@ -0,0 +1,241 @@ +#****************************************************************************** +# Copyright (C) 2013 Kenneth L. Ho +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. Redistributions in binary +# form must reproduce the above copyright notice, this list of conditions and +# the following disclaimer in the documentation and/or other materials +# provided with the distribution. +# +# None of the names of the copyright holders may be used to endorse or +# promote products derived from this software without specific prior written +# permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +#****************************************************************************** + +import scipy.linalg.interpolative as pymatrixid +import numpy as np +from scipy.linalg import hilbert, svdvals, norm +from scipy.sparse.linalg import aslinearoperator +from scipy.linalg.interpolative import interp_decomp + +from numpy.testing import (assert_, assert_allclose, assert_equal, + assert_array_equal) +import pytest +from pytest import raises as assert_raises +import sys +_IS_32BIT = (sys.maxsize < 2**32) + + +@pytest.fixture() +def eps(): + yield 1e-12 + + +@pytest.fixture(params=[np.float64, np.complex128]) +def A(request): + # construct Hilbert matrix + # set parameters + n = 300 + yield hilbert(n).astype(request.param) + + +@pytest.fixture() +def L(A): + yield aslinearoperator(A) + + +@pytest.fixture() +def rank(A, eps): + S = np.linalg.svd(A, compute_uv=False) + try: + rank = np.nonzero(S < eps)[0][0] + except IndexError: + rank = A.shape[0] + return rank + + +class TestInterpolativeDecomposition: + + @pytest.mark.parametrize( + "rand,lin_op", + [(False, False), (True, False), (True, True)]) + def test_real_id_fixed_precision(self, A, L, eps, rand, lin_op): + if _IS_32BIT and A.dtype == np.complex128 and rand: + pytest.xfail("bug in external fortran code") + # Test ID routines on a Hilbert matrix. + A_or_L = A if not lin_op else L + + k, idx, proj = pymatrixid.interp_decomp(A_or_L, eps, rand=rand) + B = pymatrixid.reconstruct_matrix_from_id(A[:, idx[:k]], idx, proj) + assert_allclose(A, B, rtol=eps, atol=1e-08) + + @pytest.mark.parametrize( + "rand,lin_op", + [(False, False), (True, False), (True, True)]) + def test_real_id_fixed_rank(self, A, L, eps, rank, rand, lin_op): + if _IS_32BIT and A.dtype == np.complex128 and rand: + pytest.xfail("bug in external fortran code") + k = rank + A_or_L = A if not lin_op else L + + idx, proj = pymatrixid.interp_decomp(A_or_L, k, rand=rand) + B = pymatrixid.reconstruct_matrix_from_id(A[:, idx[:k]], idx, proj) + assert_allclose(A, B, rtol=eps, atol=1e-08) + + @pytest.mark.parametrize("rand,lin_op", [(False, False)]) + def test_real_id_skel_and_interp_matrices( + self, A, L, eps, rank, rand, lin_op): + k = rank + A_or_L = A if not lin_op else L + + idx, proj = pymatrixid.interp_decomp(A_or_L, k, rand=rand) + P = pymatrixid.reconstruct_interp_matrix(idx, proj) + B = pymatrixid.reconstruct_skel_matrix(A, k, idx) + assert_allclose(B, A[:, idx[:k]], rtol=eps, atol=1e-08) + assert_allclose(B @ P, A, rtol=eps, atol=1e-08) + + @pytest.mark.parametrize( + "rand,lin_op", + [(False, False), (True, False), (True, True)]) + def test_svd_fixed_precison(self, A, L, eps, rand, lin_op): + if _IS_32BIT and A.dtype == np.complex128 and rand: + pytest.xfail("bug in external fortran code") + A_or_L = A if not lin_op else L + + U, S, V = pymatrixid.svd(A_or_L, eps, rand=rand) + B = U * S @ V.T.conj() + assert_allclose(A, B, rtol=eps, atol=1e-08) + + @pytest.mark.parametrize( + "rand,lin_op", + [(False, False), (True, False), (True, True)]) + def test_svd_fixed_rank(self, A, L, eps, rank, rand, lin_op): + if _IS_32BIT and A.dtype == np.complex128 and rand: + pytest.xfail("bug in external fortran code") + k = rank + A_or_L = A if not lin_op else L + + U, S, V = pymatrixid.svd(A_or_L, k, rand=rand) + B = U * S @ V.T.conj() + assert_allclose(A, B, rtol=eps, atol=1e-08) + + def test_id_to_svd(self, A, eps, rank): + k = rank + + idx, proj = pymatrixid.interp_decomp(A, k, rand=False) + U, S, V = pymatrixid.id_to_svd(A[:, idx[:k]], idx, proj) + B = U * S @ V.T.conj() + assert_allclose(A, B, rtol=eps, atol=1e-08) + + def test_estimate_spectral_norm(self, A): + s = svdvals(A) + norm_2_est = pymatrixid.estimate_spectral_norm(A) + assert_allclose(norm_2_est, s[0], rtol=1e-6, atol=1e-8) + + def test_estimate_spectral_norm_diff(self, A): + B = A.copy() + B[:, 0] *= 1.2 + s = svdvals(A - B) + norm_2_est = pymatrixid.estimate_spectral_norm_diff(A, B) + assert_allclose(norm_2_est, s[0], rtol=1e-6, atol=1e-8) + + def test_rank_estimates_array(self, A): + B = np.array([[1, 1, 0], [0, 0, 1], [0, 0, 1]], dtype=A.dtype) + + for M in [A, B]: + rank_tol = 1e-9 + rank_np = np.linalg.matrix_rank(M, norm(M, 2) * rank_tol) + rank_est = pymatrixid.estimate_rank(M, rank_tol) + assert_(rank_est >= rank_np) + assert_(rank_est <= rank_np + 10) + + def test_rank_estimates_lin_op(self, A): + B = np.array([[1, 1, 0], [0, 0, 1], [0, 0, 1]], dtype=A.dtype) + + for M in [A, B]: + ML = aslinearoperator(M) + rank_tol = 1e-9 + rank_np = np.linalg.matrix_rank(M, norm(M, 2) * rank_tol) + rank_est = pymatrixid.estimate_rank(ML, rank_tol) + assert_(rank_est >= rank_np - 4) + assert_(rank_est <= rank_np + 4) + + def test_rand(self): + pymatrixid.seed('default') + assert_allclose(pymatrixid.rand(2), [0.8932059, 0.64500803], + rtol=1e-4, atol=1e-8) + + pymatrixid.seed(1234) + x1 = pymatrixid.rand(2) + assert_allclose(x1, [0.7513823, 0.06861718], rtol=1e-4, atol=1e-8) + + np.random.seed(1234) + pymatrixid.seed() + x2 = pymatrixid.rand(2) + + np.random.seed(1234) + pymatrixid.seed(np.random.rand(55)) + x3 = pymatrixid.rand(2) + + assert_allclose(x1, x2) + assert_allclose(x1, x3) + + def test_badcall(self): + A = hilbert(5).astype(np.float32) + with assert_raises(ValueError): + pymatrixid.interp_decomp(A, 1e-6, rand=False) + + def test_rank_too_large(self): + # svd(array, k) should not segfault + a = np.ones((4, 3)) + with assert_raises(ValueError): + pymatrixid.svd(a, 4) + + def test_full_rank(self): + eps = 1.0e-12 + + # fixed precision + A = np.random.rand(16, 8) + k, idx, proj = pymatrixid.interp_decomp(A, eps) + assert_equal(k, A.shape[1]) + + P = pymatrixid.reconstruct_interp_matrix(idx, proj) + B = pymatrixid.reconstruct_skel_matrix(A, k, idx) + assert_allclose(A, B @ P) + + # fixed rank + idx, proj = pymatrixid.interp_decomp(A, k) + + P = pymatrixid.reconstruct_interp_matrix(idx, proj) + B = pymatrixid.reconstruct_skel_matrix(A, k, idx) + assert_allclose(A, B @ P) + + @pytest.mark.parametrize("dtype", [np.float64, np.complex128]) + @pytest.mark.parametrize("rand", [True, False]) + @pytest.mark.parametrize("eps", [1, 0.1]) + def test_bug_9793(self, dtype, rand, eps): + if _IS_32BIT and dtype == np.complex128 and rand: + pytest.xfail("bug in external fortran code") + A = np.array([[-1, -1, -1, 0, 0, 0], + [0, 0, 0, 1, 1, 1], + [1, 0, 0, 1, 0, 0], + [0, 1, 0, 0, 1, 0], + [0, 0, 1, 0, 0, 1]], + dtype=dtype, order="C") + B = A.copy() + interp_decomp(A.T, eps, rand=rand) + assert_array_equal(A, B) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_lapack.py b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_lapack.py new file mode 100644 index 0000000000000000000000000000000000000000..4792a86066b46b56ba28c87427fc288b78049a8c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_lapack.py @@ -0,0 +1,3399 @@ +# +# Created by: Pearu Peterson, September 2002 +# + +import sys +from functools import reduce + +from numpy.testing import (assert_equal, assert_array_almost_equal, assert_, + assert_allclose, assert_almost_equal, + assert_array_equal) +import pytest +from pytest import raises as assert_raises + +import numpy as np +from numpy import (eye, ones, zeros, zeros_like, triu, tril, tril_indices, + triu_indices) + +from numpy.random import rand, randint, seed + +from scipy.linalg import (_flapack as flapack, lapack, inv, svd, cholesky, + solve, ldl, norm, block_diag, qr, eigh, qz) + +from scipy.linalg.lapack import _compute_lwork +from scipy.stats import ortho_group, unitary_group + +import scipy.sparse as sps +try: + from scipy.__config__ import CONFIG +except ImportError: + CONFIG = None + +try: + from scipy.linalg import _clapack as clapack +except ImportError: + clapack = None +from scipy.linalg.lapack import get_lapack_funcs +from scipy.linalg.blas import get_blas_funcs + +REAL_DTYPES = [np.float32, np.float64] +COMPLEX_DTYPES = [np.complex64, np.complex128] +DTYPES = REAL_DTYPES + COMPLEX_DTYPES + +blas_provider = blas_version = None +if CONFIG is not None: + blas_provider = CONFIG['Build Dependencies']['blas']['name'] + blas_version = CONFIG['Build Dependencies']['blas']['version'] + + +def generate_random_dtype_array(shape, dtype): + # generates a random matrix of desired data type of shape + if dtype in COMPLEX_DTYPES: + return (np.random.rand(*shape) + + np.random.rand(*shape)*1.0j).astype(dtype) + return np.random.rand(*shape).astype(dtype) + + +def test_lapack_documented(): + """Test that all entries are in the doc.""" + if lapack.__doc__ is None: # just in case there is a python -OO + pytest.skip('lapack.__doc__ is None') + names = set(lapack.__doc__.split()) + ignore_list = { + 'absolute_import', 'clapack', 'division', 'find_best_lapack_type', + 'flapack', 'print_function', 'HAS_ILP64', + } + missing = list() + for name in dir(lapack): + if (not name.startswith('_') and name not in ignore_list and + name not in names): + missing.append(name) + assert missing == [], 'Name(s) missing from lapack.__doc__ or ignore_list' + + +class TestFlapackSimple: + + def test_gebal(self): + a = [[1, 2, 3], [4, 5, 6], [7, 8, 9]] + a1 = [[1, 0, 0, 3e-4], + [4, 0, 0, 2e-3], + [7, 1, 0, 0], + [0, 1, 0, 0]] + for p in 'sdzc': + f = getattr(flapack, p+'gebal', None) + if f is None: + continue + ba, lo, hi, pivscale, info = f(a) + assert_(not info, repr(info)) + assert_array_almost_equal(ba, a) + assert_equal((lo, hi), (0, len(a[0])-1)) + assert_array_almost_equal(pivscale, np.ones(len(a))) + + ba, lo, hi, pivscale, info = f(a1, permute=1, scale=1) + assert_(not info, repr(info)) + # print(a1) + # print(ba, lo, hi, pivscale) + + def test_gehrd(self): + a = [[-149, -50, -154], + [537, 180, 546], + [-27, -9, -25]] + for p in 'd': + f = getattr(flapack, p+'gehrd', None) + if f is None: + continue + ht, tau, info = f(a) + assert_(not info, repr(info)) + + def test_trsyl(self): + a = np.array([[1, 2], [0, 4]]) + b = np.array([[5, 6], [0, 8]]) + c = np.array([[9, 10], [11, 12]]) + trans = 'T' + + # Test single and double implementations, including most + # of the options + for dtype in 'fdFD': + a1, b1, c1 = a.astype(dtype), b.astype(dtype), c.astype(dtype) + trsyl, = get_lapack_funcs(('trsyl',), (a1,)) + if dtype.isupper(): # is complex dtype + a1[0] += 1j + trans = 'C' + + x, scale, info = trsyl(a1, b1, c1) + assert_array_almost_equal(np.dot(a1, x) + np.dot(x, b1), + scale * c1) + + x, scale, info = trsyl(a1, b1, c1, trana=trans, tranb=trans) + assert_array_almost_equal( + np.dot(a1.conjugate().T, x) + np.dot(x, b1.conjugate().T), + scale * c1, decimal=4) + + x, scale, info = trsyl(a1, b1, c1, isgn=-1) + assert_array_almost_equal(np.dot(a1, x) - np.dot(x, b1), + scale * c1, decimal=4) + + def test_lange(self): + a = np.array([ + [-149, -50, -154], + [537, 180, 546], + [-27, -9, -25]]) + + for dtype in 'fdFD': + for norm_str in 'Mm1OoIiFfEe': + a1 = a.astype(dtype) + if dtype.isupper(): + # is complex dtype + a1[0, 0] += 1j + + lange, = get_lapack_funcs(('lange',), (a1,)) + value = lange(norm_str, a1) + + if norm_str in 'FfEe': + if dtype in 'Ff': + decimal = 3 + else: + decimal = 7 + ref = np.sqrt(np.sum(np.square(np.abs(a1)))) + assert_almost_equal(value, ref, decimal) + else: + if norm_str in 'Mm': + ref = np.max(np.abs(a1)) + elif norm_str in '1Oo': + ref = np.max(np.sum(np.abs(a1), axis=0)) + elif norm_str in 'Ii': + ref = np.max(np.sum(np.abs(a1), axis=1)) + + assert_equal(value, ref) + + +class TestLapack: + + def test_flapack(self): + if hasattr(flapack, 'empty_module'): + # flapack module is empty + pass + + def test_clapack(self): + if hasattr(clapack, 'empty_module'): + # clapack module is empty + pass + + +class TestLeastSquaresSolvers: + + def test_gels(self): + seed(1234) + # Test fat/tall matrix argument handling - gh-issue #8329 + for ind, dtype in enumerate(DTYPES): + m = 10 + n = 20 + nrhs = 1 + a1 = rand(m, n).astype(dtype) + b1 = rand(n).astype(dtype) + gls, glslw = get_lapack_funcs(('gels', 'gels_lwork'), dtype=dtype) + + # Request of sizes + lwork = _compute_lwork(glslw, m, n, nrhs) + _, _, info = gls(a1, b1, lwork=lwork) + assert_(info >= 0) + _, _, info = gls(a1, b1, trans='TTCC'[ind], lwork=lwork) + assert_(info >= 0) + + for dtype in REAL_DTYPES: + a1 = np.array([[1.0, 2.0], + [4.0, 5.0], + [7.0, 8.0]], dtype=dtype) + b1 = np.array([16.0, 17.0, 20.0], dtype=dtype) + gels, gels_lwork, geqrf = get_lapack_funcs( + ('gels', 'gels_lwork', 'geqrf'), (a1, b1)) + + m, n = a1.shape + if len(b1.shape) == 2: + nrhs = b1.shape[1] + else: + nrhs = 1 + + # Request of sizes + lwork = _compute_lwork(gels_lwork, m, n, nrhs) + + lqr, x, info = gels(a1, b1, lwork=lwork) + assert_allclose(x[:-1], np.array([-14.333333333333323, + 14.999999999999991], + dtype=dtype), + rtol=25*np.finfo(dtype).eps) + lqr_truth, _, _, _ = geqrf(a1) + assert_array_equal(lqr, lqr_truth) + + for dtype in COMPLEX_DTYPES: + a1 = np.array([[1.0+4.0j, 2.0], + [4.0+0.5j, 5.0-3.0j], + [7.0-2.0j, 8.0+0.7j]], dtype=dtype) + b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype) + gels, gels_lwork, geqrf = get_lapack_funcs( + ('gels', 'gels_lwork', 'geqrf'), (a1, b1)) + + m, n = a1.shape + if len(b1.shape) == 2: + nrhs = b1.shape[1] + else: + nrhs = 1 + + # Request of sizes + lwork = _compute_lwork(gels_lwork, m, n, nrhs) + + lqr, x, info = gels(a1, b1, lwork=lwork) + assert_allclose(x[:-1], + np.array([1.161753632288328-1.901075709391912j, + 1.735882340522193+1.521240901196909j], + dtype=dtype), rtol=25*np.finfo(dtype).eps) + lqr_truth, _, _, _ = geqrf(a1) + assert_array_equal(lqr, lqr_truth) + + def test_gelsd(self): + for dtype in REAL_DTYPES: + a1 = np.array([[1.0, 2.0], + [4.0, 5.0], + [7.0, 8.0]], dtype=dtype) + b1 = np.array([16.0, 17.0, 20.0], dtype=dtype) + gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'), + (a1, b1)) + + m, n = a1.shape + if len(b1.shape) == 2: + nrhs = b1.shape[1] + else: + nrhs = 1 + + # Request of sizes + work, iwork, info = gelsd_lwork(m, n, nrhs, -1) + lwork = int(np.real(work)) + iwork_size = iwork + + x, s, rank, info = gelsd(a1, b1, lwork, iwork_size, + -1, False, False) + assert_allclose(x[:-1], np.array([-14.333333333333323, + 14.999999999999991], + dtype=dtype), + rtol=25*np.finfo(dtype).eps) + assert_allclose(s, np.array([12.596017180511966, + 0.583396253199685], dtype=dtype), + rtol=25*np.finfo(dtype).eps) + + for dtype in COMPLEX_DTYPES: + a1 = np.array([[1.0+4.0j, 2.0], + [4.0+0.5j, 5.0-3.0j], + [7.0-2.0j, 8.0+0.7j]], dtype=dtype) + b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype) + gelsd, gelsd_lwork = get_lapack_funcs(('gelsd', 'gelsd_lwork'), + (a1, b1)) + + m, n = a1.shape + if len(b1.shape) == 2: + nrhs = b1.shape[1] + else: + nrhs = 1 + + # Request of sizes + work, rwork, iwork, info = gelsd_lwork(m, n, nrhs, -1) + lwork = int(np.real(work)) + rwork_size = int(rwork) + iwork_size = iwork + + x, s, rank, info = gelsd(a1, b1, lwork, rwork_size, iwork_size, + -1, False, False) + assert_allclose(x[:-1], + np.array([1.161753632288328-1.901075709391912j, + 1.735882340522193+1.521240901196909j], + dtype=dtype), rtol=25*np.finfo(dtype).eps) + assert_allclose(s, + np.array([13.035514762572043, 4.337666985231382], + dtype=dtype), rtol=25*np.finfo(dtype).eps) + + def test_gelss(self): + + for dtype in REAL_DTYPES: + a1 = np.array([[1.0, 2.0], + [4.0, 5.0], + [7.0, 8.0]], dtype=dtype) + b1 = np.array([16.0, 17.0, 20.0], dtype=dtype) + gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'), + (a1, b1)) + + m, n = a1.shape + if len(b1.shape) == 2: + nrhs = b1.shape[1] + else: + nrhs = 1 + + # Request of sizes + work, info = gelss_lwork(m, n, nrhs, -1) + lwork = int(np.real(work)) + + v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False) + assert_allclose(x[:-1], np.array([-14.333333333333323, + 14.999999999999991], + dtype=dtype), + rtol=25*np.finfo(dtype).eps) + assert_allclose(s, np.array([12.596017180511966, + 0.583396253199685], dtype=dtype), + rtol=25*np.finfo(dtype).eps) + + for dtype in COMPLEX_DTYPES: + a1 = np.array([[1.0+4.0j, 2.0], + [4.0+0.5j, 5.0-3.0j], + [7.0-2.0j, 8.0+0.7j]], dtype=dtype) + b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype) + gelss, gelss_lwork = get_lapack_funcs(('gelss', 'gelss_lwork'), + (a1, b1)) + + m, n = a1.shape + if len(b1.shape) == 2: + nrhs = b1.shape[1] + else: + nrhs = 1 + + # Request of sizes + work, info = gelss_lwork(m, n, nrhs, -1) + lwork = int(np.real(work)) + + v, x, s, rank, work, info = gelss(a1, b1, -1, lwork, False, False) + assert_allclose(x[:-1], + np.array([1.161753632288328-1.901075709391912j, + 1.735882340522193+1.521240901196909j], + dtype=dtype), + rtol=25*np.finfo(dtype).eps) + assert_allclose(s, np.array([13.035514762572043, + 4.337666985231382], dtype=dtype), + rtol=25*np.finfo(dtype).eps) + + def test_gelsy(self): + + for dtype in REAL_DTYPES: + a1 = np.array([[1.0, 2.0], + [4.0, 5.0], + [7.0, 8.0]], dtype=dtype) + b1 = np.array([16.0, 17.0, 20.0], dtype=dtype) + gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'), + (a1, b1)) + + m, n = a1.shape + if len(b1.shape) == 2: + nrhs = b1.shape[1] + else: + nrhs = 1 + + # Request of sizes + work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps) + lwork = int(np.real(work)) + + jptv = np.zeros((a1.shape[1], 1), dtype=np.int32) + v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps, + lwork, False, False) + assert_allclose(x[:-1], np.array([-14.333333333333323, + 14.999999999999991], + dtype=dtype), + rtol=25*np.finfo(dtype).eps) + + for dtype in COMPLEX_DTYPES: + a1 = np.array([[1.0+4.0j, 2.0], + [4.0+0.5j, 5.0-3.0j], + [7.0-2.0j, 8.0+0.7j]], dtype=dtype) + b1 = np.array([16.0, 17.0+2.0j, 20.0-4.0j], dtype=dtype) + gelsy, gelsy_lwork = get_lapack_funcs(('gelsy', 'gelss_lwork'), + (a1, b1)) + + m, n = a1.shape + if len(b1.shape) == 2: + nrhs = b1.shape[1] + else: + nrhs = 1 + + # Request of sizes + work, info = gelsy_lwork(m, n, nrhs, 10*np.finfo(dtype).eps) + lwork = int(np.real(work)) + + jptv = np.zeros((a1.shape[1], 1), dtype=np.int32) + v, x, j, rank, info = gelsy(a1, b1, jptv, np.finfo(dtype).eps, + lwork, False, False) + assert_allclose(x[:-1], + np.array([1.161753632288328-1.901075709391912j, + 1.735882340522193+1.521240901196909j], + dtype=dtype), + rtol=25*np.finfo(dtype).eps) + + +@pytest.mark.parametrize('dtype', DTYPES) +@pytest.mark.parametrize('shape', [(3, 4), (5, 2), (2**18, 2**18)]) +def test_geqrf_lwork(dtype, shape): + geqrf_lwork = get_lapack_funcs(('geqrf_lwork'), dtype=dtype) + m, n = shape + lwork, info = geqrf_lwork(m=m, n=n) + assert_equal(info, 0) + + +class TestRegression: + + def test_ticket_1645(self): + # Check that RQ routines have correct lwork + for dtype in DTYPES: + a = np.zeros((300, 2), dtype=dtype) + + gerqf, = get_lapack_funcs(['gerqf'], [a]) + assert_raises(Exception, gerqf, a, lwork=2) + rq, tau, work, info = gerqf(a) + + if dtype in REAL_DTYPES: + orgrq, = get_lapack_funcs(['orgrq'], [a]) + assert_raises(Exception, orgrq, rq[-2:], tau, lwork=1) + orgrq(rq[-2:], tau, lwork=2) + elif dtype in COMPLEX_DTYPES: + ungrq, = get_lapack_funcs(['ungrq'], [a]) + assert_raises(Exception, ungrq, rq[-2:], tau, lwork=1) + ungrq(rq[-2:], tau, lwork=2) + + +class TestDpotr: + def test_gh_2691(self): + # 'lower' argument of dportf/dpotri + for lower in [True, False]: + for clean in [True, False]: + np.random.seed(42) + x = np.random.normal(size=(3, 3)) + a = x.dot(x.T) + + dpotrf, dpotri = get_lapack_funcs(("potrf", "potri"), (a, )) + + c, info = dpotrf(a, lower, clean=clean) + dpt = dpotri(c, lower)[0] + + if lower: + assert_allclose(np.tril(dpt), np.tril(inv(a))) + else: + assert_allclose(np.triu(dpt), np.triu(inv(a))) + + +class TestDlasd4: + def test_sing_val_update(self): + + sigmas = np.array([4., 3., 2., 0]) + m_vec = np.array([3.12, 5.7, -4.8, -2.2]) + + M = np.hstack((np.vstack((np.diag(sigmas[0:-1]), + np.zeros((1, len(m_vec) - 1)))), + m_vec[:, np.newaxis])) + SM = svd(M, full_matrices=False, compute_uv=False, overwrite_a=False, + check_finite=False) + + it_len = len(sigmas) + sgm = np.concatenate((sigmas[::-1], [sigmas[0] + it_len*norm(m_vec)])) + mvc = np.concatenate((m_vec[::-1], (0,))) + + lasd4 = get_lapack_funcs('lasd4', (sigmas,)) + + roots = [] + for i in range(0, it_len): + res = lasd4(i, sgm, mvc) + roots.append(res[1]) + + assert_((res[3] <= 0), "LAPACK root finding dlasd4 failed to find \ + the singular value %i" % i) + roots = np.array(roots)[::-1] + + assert_((not np.any(np.isnan(roots)), "There are NaN roots")) + assert_allclose(SM, roots, atol=100*np.finfo(np.float64).eps, + rtol=100*np.finfo(np.float64).eps) + + +class TestTbtrs: + + @pytest.mark.parametrize('dtype', DTYPES) + def test_nag_example_f07vef_f07vsf(self, dtype): + """Test real (f07vef) and complex (f07vsf) examples from NAG + + Examples available from: + * https://www.nag.com/numeric/fl/nagdoc_latest/html/f07/f07vef.html + * https://www.nag.com/numeric/fl/nagdoc_latest/html/f07/f07vsf.html + + """ + if dtype in REAL_DTYPES: + ab = np.array([[-4.16, 4.78, 6.32, 0.16], + [-2.25, 5.86, -4.82, 0]], + dtype=dtype) + b = np.array([[-16.64, -4.16], + [-13.78, -16.59], + [13.10, -4.94], + [-14.14, -9.96]], + dtype=dtype) + x_out = np.array([[4, 1], + [-1, -3], + [3, 2], + [2, -2]], + dtype=dtype) + elif dtype in COMPLEX_DTYPES: + ab = np.array([[-1.94+4.43j, 4.12-4.27j, 0.43-2.66j, 0.44+0.1j], + [-3.39+3.44j, -1.84+5.52j, 1.74 - 0.04j, 0], + [1.62+3.68j, -2.77-1.93j, 0, 0]], + dtype=dtype) + b = np.array([[-8.86 - 3.88j, -24.09 - 5.27j], + [-15.57 - 23.41j, -57.97 + 8.14j], + [-7.63 + 22.78j, 19.09 - 29.51j], + [-14.74 - 2.40j, 19.17 + 21.33j]], + dtype=dtype) + x_out = np.array([[2j, 1 + 5j], + [1 - 3j, -7 - 2j], + [-4.001887 - 4.988417j, 3.026830 + 4.003182j], + [1.996158 - 1.045105j, -6.103357 - 8.986653j]], + dtype=dtype) + else: + raise ValueError(f"Datatype {dtype} not understood.") + + tbtrs = get_lapack_funcs(('tbtrs'), dtype=dtype) + x, info = tbtrs(ab=ab, b=b, uplo='L') + assert_equal(info, 0) + assert_allclose(x, x_out, rtol=0, atol=1e-5) + + @pytest.mark.parametrize('dtype,trans', + [(dtype, trans) + for dtype in DTYPES for trans in ['N', 'T', 'C'] + if not (trans == 'C' and dtype in REAL_DTYPES)]) + @pytest.mark.parametrize('uplo', ['U', 'L']) + @pytest.mark.parametrize('diag', ['N', 'U']) + def test_random_matrices(self, dtype, trans, uplo, diag): + seed(1724) + # n, nrhs, kd are used to specify A and b. + # A is of shape n x n with kd super/sub-diagonals + # b is of shape n x nrhs matrix + n, nrhs, kd = 4, 3, 2 + tbtrs = get_lapack_funcs('tbtrs', dtype=dtype) + + is_upper = (uplo == 'U') + ku = kd * is_upper + kl = kd - ku + + # Construct the diagonal and kd super/sub diagonals of A with + # the corresponding offsets. + band_offsets = range(ku, -kl - 1, -1) + band_widths = [n - abs(x) for x in band_offsets] + bands = [generate_random_dtype_array((width,), dtype) + for width in band_widths] + + if diag == 'U': # A must be unit triangular + bands[ku] = np.ones(n, dtype=dtype) + + # Construct the diagonal banded matrix A from the bands and offsets. + a = sps.diags(bands, band_offsets, format='dia') + + # Convert A into banded storage form + ab = np.zeros((kd + 1, n), dtype) + for row, k in enumerate(band_offsets): + ab[row, max(k, 0):min(n+k, n)] = a.diagonal(k) + + # The RHS values. + b = generate_random_dtype_array((n, nrhs), dtype) + + x, info = tbtrs(ab=ab, b=b, uplo=uplo, trans=trans, diag=diag) + assert_equal(info, 0) + + if trans == 'N': + assert_allclose(a @ x, b, rtol=5e-5) + elif trans == 'T': + assert_allclose(a.T @ x, b, rtol=5e-5) + elif trans == 'C': + assert_allclose(a.H @ x, b, rtol=5e-5) + else: + raise ValueError('Invalid trans argument') + + @pytest.mark.parametrize('uplo,trans,diag', + [['U', 'N', 'Invalid'], + ['U', 'Invalid', 'N'], + ['Invalid', 'N', 'N']]) + def test_invalid_argument_raises_exception(self, uplo, trans, diag): + """Test if invalid values of uplo, trans and diag raise exceptions""" + # Argument checks occur independently of used datatype. + # This mean we must not parameterize all available datatypes. + tbtrs = get_lapack_funcs('tbtrs', dtype=np.float64) + ab = rand(4, 2) + b = rand(2, 4) + assert_raises(Exception, tbtrs, ab, b, uplo, trans, diag) + + def test_zero_element_in_diagonal(self): + """Test if a matrix with a zero diagonal element is singular + + If the i-th diagonal of A is zero, ?tbtrs should return `i` in `info` + indicating the provided matrix is singular. + + Note that ?tbtrs requires the matrix A to be stored in banded form. + In this form the diagonal corresponds to the last row.""" + ab = np.ones((3, 4), dtype=float) + b = np.ones(4, dtype=float) + tbtrs = get_lapack_funcs('tbtrs', dtype=float) + + ab[-1, 3] = 0 + _, info = tbtrs(ab=ab, b=b, uplo='U') + assert_equal(info, 4) + + @pytest.mark.parametrize('ldab,n,ldb,nrhs', [ + (5, 5, 0, 5), + (5, 5, 3, 5) + ]) + def test_invalid_matrix_shapes(self, ldab, n, ldb, nrhs): + """Test ?tbtrs fails correctly if shapes are invalid.""" + ab = np.ones((ldab, n), dtype=float) + b = np.ones((ldb, nrhs), dtype=float) + tbtrs = get_lapack_funcs('tbtrs', dtype=float) + assert_raises(Exception, tbtrs, ab, b) + + +def test_lartg(): + for dtype in 'fdFD': + lartg = get_lapack_funcs('lartg', dtype=dtype) + + f = np.array(3, dtype) + g = np.array(4, dtype) + + if np.iscomplexobj(g): + g *= 1j + + cs, sn, r = lartg(f, g) + + assert_allclose(cs, 3.0/5.0) + assert_allclose(r, 5.0) + + if np.iscomplexobj(g): + assert_allclose(sn, -4.0j/5.0) + assert_(isinstance(r, complex)) + assert_(isinstance(cs, float)) + else: + assert_allclose(sn, 4.0/5.0) + + +def test_rot(): + # srot, drot from blas and crot and zrot from lapack. + + for dtype in 'fdFD': + c = 0.6 + s = 0.8 + + u = np.full(4, 3, dtype) + v = np.full(4, 4, dtype) + atol = 10**-(np.finfo(dtype).precision-1) + + if dtype in 'fd': + rot = get_blas_funcs('rot', dtype=dtype) + f = 4 + else: + rot = get_lapack_funcs('rot', dtype=dtype) + s *= -1j + v *= 1j + f = 4j + + assert_allclose(rot(u, v, c, s), [[5, 5, 5, 5], + [0, 0, 0, 0]], atol=atol) + assert_allclose(rot(u, v, c, s, n=2), [[5, 5, 3, 3], + [0, 0, f, f]], atol=atol) + assert_allclose(rot(u, v, c, s, offx=2, offy=2), + [[3, 3, 5, 5], [f, f, 0, 0]], atol=atol) + assert_allclose(rot(u, v, c, s, incx=2, offy=2, n=2), + [[5, 3, 5, 3], [f, f, 0, 0]], atol=atol) + assert_allclose(rot(u, v, c, s, offx=2, incy=2, n=2), + [[3, 3, 5, 5], [0, f, 0, f]], atol=atol) + assert_allclose(rot(u, v, c, s, offx=2, incx=2, offy=2, incy=2, n=1), + [[3, 3, 5, 3], [f, f, 0, f]], atol=atol) + assert_allclose(rot(u, v, c, s, incx=-2, incy=-2, n=2), + [[5, 3, 5, 3], [0, f, 0, f]], atol=atol) + + a, b = rot(u, v, c, s, overwrite_x=1, overwrite_y=1) + assert_(a is u) + assert_(b is v) + assert_allclose(a, [5, 5, 5, 5], atol=atol) + assert_allclose(b, [0, 0, 0, 0], atol=atol) + + +def test_larfg_larf(): + np.random.seed(1234) + a0 = np.random.random((4, 4)) + a0 = a0.T.dot(a0) + + a0j = np.random.random((4, 4)) + 1j*np.random.random((4, 4)) + a0j = a0j.T.conj().dot(a0j) + + # our test here will be to do one step of reducing a hermetian matrix to + # tridiagonal form using householder transforms. + + for dtype in 'fdFD': + larfg, larf = get_lapack_funcs(['larfg', 'larf'], dtype=dtype) + + if dtype in 'FD': + a = a0j.copy() + else: + a = a0.copy() + + # generate a householder transform to clear a[2:,0] + alpha, x, tau = larfg(a.shape[0]-1, a[1, 0], a[2:, 0]) + + # create expected output + expected = np.zeros_like(a[:, 0]) + expected[0] = a[0, 0] + expected[1] = alpha + + # assemble householder vector + v = np.zeros_like(a[1:, 0]) + v[0] = 1.0 + v[1:] = x + + # apply transform from the left + a[1:, :] = larf(v, tau.conjugate(), a[1:, :], np.zeros(a.shape[1])) + + # apply transform from the right + a[:, 1:] = larf(v, tau, a[:, 1:], np.zeros(a.shape[0]), side='R') + + assert_allclose(a[:, 0], expected, atol=1e-5) + assert_allclose(a[0, :], expected, atol=1e-5) + + +def test_sgesdd_lwork_bug_workaround(): + # Test that SGESDD lwork is sufficiently large for LAPACK. + # + # This checks that _compute_lwork() correctly works around a bug in + # LAPACK versions older than 3.10.1. + + sgesdd_lwork = get_lapack_funcs('gesdd_lwork', dtype=np.float32, + ilp64='preferred') + n = 9537 + lwork = _compute_lwork(sgesdd_lwork, n, n, + compute_uv=True, full_matrices=True) + # If we called the Fortran function SGESDD directly with IWORK=-1, the + # LAPACK bug would result in lwork being 272929856, which was too small. + # (The result was returned in a single precision float, which does not + # have sufficient precision to represent the exact integer value that it + # computed internally.) The work-around implemented in _compute_lwork() + # will convert that to 272929888. If we are using LAPACK 3.10.1 or later + # (such as in OpenBLAS 0.3.21 or later), the work-around will return + # 272929920, because it does not know which version of LAPACK is being + # used, so it always applies the correction to whatever it is given. We + # will accept either 272929888 or 272929920. + # Note that the acceptable values are a LAPACK implementation detail. + # If a future version of LAPACK changes how SGESDD works, and therefore + # changes the required LWORK size, the acceptable values might have to + # be updated. + assert lwork == 272929888 or lwork == 272929920 + + +class TestSytrd: + @pytest.mark.parametrize('dtype', REAL_DTYPES) + def test_sytrd_with_zero_dim_array(self, dtype): + # Assert that a 0x0 matrix raises an error + A = np.zeros((0, 0), dtype=dtype) + sytrd = get_lapack_funcs('sytrd', (A,)) + assert_raises(ValueError, sytrd, A) + + @pytest.mark.parametrize('dtype', REAL_DTYPES) + @pytest.mark.parametrize('n', (1, 3)) + def test_sytrd(self, dtype, n): + A = np.zeros((n, n), dtype=dtype) + + sytrd, sytrd_lwork = \ + get_lapack_funcs(('sytrd', 'sytrd_lwork'), (A,)) + + # some upper triangular array + A[np.triu_indices_from(A)] = \ + np.arange(1, n*(n+1)//2+1, dtype=dtype) + + # query lwork + lwork, info = sytrd_lwork(n) + assert_equal(info, 0) + + # check lower=1 behavior (shouldn't do much since the matrix is + # upper triangular) + data, d, e, tau, info = sytrd(A, lower=1, lwork=lwork) + assert_equal(info, 0) + + assert_allclose(data, A, atol=5*np.finfo(dtype).eps, rtol=1.0) + assert_allclose(d, np.diag(A)) + assert_allclose(e, 0.0) + assert_allclose(tau, 0.0) + + # and now for the proper test (lower=0 is the default) + data, d, e, tau, info = sytrd(A, lwork=lwork) + assert_equal(info, 0) + + # assert Q^T*A*Q = tridiag(e, d, e) + + # build tridiagonal matrix + T = np.zeros_like(A, dtype=dtype) + k = np.arange(A.shape[0]) + T[k, k] = d + k2 = np.arange(A.shape[0]-1) + T[k2+1, k2] = e + T[k2, k2+1] = e + + # build Q + Q = np.eye(n, n, dtype=dtype) + for i in range(n-1): + v = np.zeros(n, dtype=dtype) + v[:i] = data[:i, i+1] + v[i] = 1.0 + H = np.eye(n, n, dtype=dtype) - tau[i] * np.outer(v, v) + Q = np.dot(H, Q) + + # Make matrix fully symmetric + i_lower = np.tril_indices(n, -1) + A[i_lower] = A.T[i_lower] + + QTAQ = np.dot(Q.T, np.dot(A, Q)) + + # disable rtol here since some values in QTAQ and T are very close + # to 0. + assert_allclose(QTAQ, T, atol=5*np.finfo(dtype).eps, rtol=1.0) + + +class TestHetrd: + @pytest.mark.parametrize('complex_dtype', COMPLEX_DTYPES) + def test_hetrd_with_zero_dim_array(self, complex_dtype): + # Assert that a 0x0 matrix raises an error + A = np.zeros((0, 0), dtype=complex_dtype) + hetrd = get_lapack_funcs('hetrd', (A,)) + assert_raises(ValueError, hetrd, A) + + @pytest.mark.parametrize('real_dtype,complex_dtype', + zip(REAL_DTYPES, COMPLEX_DTYPES)) + @pytest.mark.parametrize('n', (1, 3)) + def test_hetrd(self, n, real_dtype, complex_dtype): + A = np.zeros((n, n), dtype=complex_dtype) + hetrd, hetrd_lwork = \ + get_lapack_funcs(('hetrd', 'hetrd_lwork'), (A,)) + + # some upper triangular array + A[np.triu_indices_from(A)] = ( + np.arange(1, n*(n+1)//2+1, dtype=real_dtype) + + 1j * np.arange(1, n*(n+1)//2+1, dtype=real_dtype) + ) + np.fill_diagonal(A, np.real(np.diag(A))) + + # test query lwork + for x in [0, 1]: + _, info = hetrd_lwork(n, lower=x) + assert_equal(info, 0) + # lwork returns complex which segfaults hetrd call (gh-10388) + # use the safe and recommended option + lwork = _compute_lwork(hetrd_lwork, n) + + # check lower=1 behavior (shouldn't do much since the matrix is + # upper triangular) + data, d, e, tau, info = hetrd(A, lower=1, lwork=lwork) + assert_equal(info, 0) + + assert_allclose(data, A, atol=5*np.finfo(real_dtype).eps, rtol=1.0) + + assert_allclose(d, np.real(np.diag(A))) + assert_allclose(e, 0.0) + assert_allclose(tau, 0.0) + + # and now for the proper test (lower=0 is the default) + data, d, e, tau, info = hetrd(A, lwork=lwork) + assert_equal(info, 0) + + # assert Q^T*A*Q = tridiag(e, d, e) + + # build tridiagonal matrix + T = np.zeros_like(A, dtype=real_dtype) + k = np.arange(A.shape[0], dtype=int) + T[k, k] = d + k2 = np.arange(A.shape[0]-1, dtype=int) + T[k2+1, k2] = e + T[k2, k2+1] = e + + # build Q + Q = np.eye(n, n, dtype=complex_dtype) + for i in range(n-1): + v = np.zeros(n, dtype=complex_dtype) + v[:i] = data[:i, i+1] + v[i] = 1.0 + H = np.eye(n, n, dtype=complex_dtype) \ + - tau[i] * np.outer(v, np.conj(v)) + Q = np.dot(H, Q) + + # Make matrix fully Hermitian + i_lower = np.tril_indices(n, -1) + A[i_lower] = np.conj(A.T[i_lower]) + + QHAQ = np.dot(np.conj(Q.T), np.dot(A, Q)) + + # disable rtol here since some values in QTAQ and T are very close + # to 0. + assert_allclose( + QHAQ, T, atol=10*np.finfo(real_dtype).eps, rtol=1.0 + ) + + +def test_gglse(): + # Example data taken from NAG manual + for ind, dtype in enumerate(DTYPES): + # DTYPES = gglse + func, func_lwork = get_lapack_funcs(('gglse', 'gglse_lwork'), + dtype=dtype) + lwork = _compute_lwork(func_lwork, m=6, n=4, p=2) + # For gglse + if ind < 2: + a = np.array([[-0.57, -1.28, -0.39, 0.25], + [-1.93, 1.08, -0.31, -2.14], + [2.30, 0.24, 0.40, -0.35], + [-1.93, 0.64, -0.66, 0.08], + [0.15, 0.30, 0.15, -2.13], + [-0.02, 1.03, -1.43, 0.50]], dtype=dtype) + c = np.array([-1.50, -2.14, 1.23, -0.54, -1.68, 0.82], dtype=dtype) + d = np.array([0., 0.], dtype=dtype) + # For gglse + else: + a = np.array([[0.96-0.81j, -0.03+0.96j, -0.91+2.06j, -0.05+0.41j], + [-0.98+1.98j, -1.20+0.19j, -0.66+0.42j, -0.81+0.56j], + [0.62-0.46j, 1.01+0.02j, 0.63-0.17j, -1.11+0.60j], + [0.37+0.38j, 0.19-0.54j, -0.98-0.36j, 0.22-0.20j], + [0.83+0.51j, 0.20+0.01j, -0.17-0.46j, 1.47+1.59j], + [1.08-0.28j, 0.20-0.12j, -0.07+1.23j, 0.26+0.26j]]) + c = np.array([[-2.54+0.09j], + [1.65-2.26j], + [-2.11-3.96j], + [1.82+3.30j], + [-6.41+3.77j], + [2.07+0.66j]]) + d = np.zeros(2, dtype=dtype) + + b = np.array([[1., 0., -1., 0.], [0., 1., 0., -1.]], dtype=dtype) + + _, _, _, result, _ = func(a, b, c, d, lwork=lwork) + if ind < 2: + expected = np.array([0.48904455, + 0.99754786, + 0.48904455, + 0.99754786]) + else: + expected = np.array([1.08742917-1.96205783j, + -0.74093902+3.72973919j, + 1.08742917-1.96205759j, + -0.74093896+3.72973895j]) + assert_array_almost_equal(result, expected, decimal=4) + + +def test_sycon_hecon(): + seed(1234) + for ind, dtype in enumerate(DTYPES+COMPLEX_DTYPES): + # DTYPES + COMPLEX DTYPES = sycon + hecon + n = 10 + # For sycon + if ind < 4: + func_lwork = get_lapack_funcs('sytrf_lwork', dtype=dtype) + funcon, functrf = get_lapack_funcs(('sycon', 'sytrf'), dtype=dtype) + A = (rand(n, n)).astype(dtype) + # For hecon + else: + func_lwork = get_lapack_funcs('hetrf_lwork', dtype=dtype) + funcon, functrf = get_lapack_funcs(('hecon', 'hetrf'), dtype=dtype) + A = (rand(n, n) + rand(n, n)*1j).astype(dtype) + + # Since sycon only refers to upper/lower part, conj() is safe here. + A = (A + A.conj().T)/2 + 2*np.eye(n, dtype=dtype) + + anorm = norm(A, 1) + lwork = _compute_lwork(func_lwork, n) + ldu, ipiv, _ = functrf(A, lwork=lwork, lower=1) + rcond, _ = funcon(a=ldu, ipiv=ipiv, anorm=anorm, lower=1) + # The error is at most 1-fold + assert_(abs(1/rcond - np.linalg.cond(A, p=1))*rcond < 1) + + +def test_sygst(): + seed(1234) + for ind, dtype in enumerate(REAL_DTYPES): + # DTYPES = sygst + n = 10 + + potrf, sygst, syevd, sygvd = get_lapack_funcs(('potrf', 'sygst', + 'syevd', 'sygvd'), + dtype=dtype) + + A = rand(n, n).astype(dtype) + A = (A + A.T)/2 + # B must be positive definite + B = rand(n, n).astype(dtype) + B = (B + B.T)/2 + 2 * np.eye(n, dtype=dtype) + + # Perform eig (sygvd) + eig_gvd, _, info = sygvd(A, B) + assert_(info == 0) + + # Convert to std problem potrf + b, info = potrf(B) + assert_(info == 0) + a, info = sygst(A, b) + assert_(info == 0) + + eig, _, info = syevd(a) + assert_(info == 0) + assert_allclose(eig, eig_gvd, rtol=1.2e-4) + + +def test_hegst(): + seed(1234) + for ind, dtype in enumerate(COMPLEX_DTYPES): + # DTYPES = hegst + n = 10 + + potrf, hegst, heevd, hegvd = get_lapack_funcs(('potrf', 'hegst', + 'heevd', 'hegvd'), + dtype=dtype) + + A = rand(n, n).astype(dtype) + 1j * rand(n, n).astype(dtype) + A = (A + A.conj().T)/2 + # B must be positive definite + B = rand(n, n).astype(dtype) + 1j * rand(n, n).astype(dtype) + B = (B + B.conj().T)/2 + 2 * np.eye(n, dtype=dtype) + + # Perform eig (hegvd) + eig_gvd, _, info = hegvd(A, B) + assert_(info == 0) + + # Convert to std problem potrf + b, info = potrf(B) + assert_(info == 0) + a, info = hegst(A, b) + assert_(info == 0) + + eig, _, info = heevd(a) + assert_(info == 0) + assert_allclose(eig, eig_gvd, rtol=1e-4) + + +def test_tzrzf(): + """ + This test performs an RZ decomposition in which an m x n upper trapezoidal + array M (m <= n) is factorized as M = [R 0] * Z where R is upper triangular + and Z is unitary. + """ + seed(1234) + m, n = 10, 15 + for ind, dtype in enumerate(DTYPES): + tzrzf, tzrzf_lw = get_lapack_funcs(('tzrzf', 'tzrzf_lwork'), + dtype=dtype) + lwork = _compute_lwork(tzrzf_lw, m, n) + + if ind < 2: + A = triu(rand(m, n).astype(dtype)) + else: + A = triu((rand(m, n) + rand(m, n)*1j).astype(dtype)) + + # assert wrong shape arg, f2py returns generic error + assert_raises(Exception, tzrzf, A.T) + rz, tau, info = tzrzf(A, lwork=lwork) + # Check success + assert_(info == 0) + + # Get Z manually for comparison + R = np.hstack((rz[:, :m], np.zeros((m, n-m), dtype=dtype))) + V = np.hstack((np.eye(m, dtype=dtype), rz[:, m:])) + Id = np.eye(n, dtype=dtype) + ref = [Id-tau[x]*V[[x], :].T.dot(V[[x], :].conj()) for x in range(m)] + Z = reduce(np.dot, ref) + assert_allclose(R.dot(Z) - A, zeros_like(A, dtype=dtype), + atol=10*np.spacing(dtype(1.0).real), rtol=0.) + + +def test_tfsm(): + """ + Test for solving a linear system with the coefficient matrix is a + triangular array stored in Full Packed (RFP) format. + """ + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 20 + if ind > 1: + A = triu(rand(n, n) + rand(n, n)*1j + eye(n)).astype(dtype) + trans = 'C' + else: + A = triu(rand(n, n) + eye(n)).astype(dtype) + trans = 'T' + + trttf, tfttr, tfsm = get_lapack_funcs(('trttf', 'tfttr', 'tfsm'), + dtype=dtype) + + Afp, _ = trttf(A) + B = rand(n, 2).astype(dtype) + soln = tfsm(-1, Afp, B) + assert_array_almost_equal(soln, solve(-A, B), + decimal=4 if ind % 2 == 0 else 6) + + soln = tfsm(-1, Afp, B, trans=trans) + assert_array_almost_equal(soln, solve(-A.conj().T, B), + decimal=4 if ind % 2 == 0 else 6) + + # Make A, unit diagonal + A[np.arange(n), np.arange(n)] = dtype(1.) + soln = tfsm(-1, Afp, B, trans=trans, diag='U') + assert_array_almost_equal(soln, solve(-A.conj().T, B), + decimal=4 if ind % 2 == 0 else 6) + + # Change side + B2 = rand(3, n).astype(dtype) + soln = tfsm(-1, Afp, B2, trans=trans, diag='U', side='R') + assert_array_almost_equal(soln, solve(-A, B2.T).conj().T, + decimal=4 if ind % 2 == 0 else 6) + + +def test_ormrz_unmrz(): + """ + This test performs a matrix multiplication with an arbitrary m x n matrix C + and a unitary matrix Q without explicitly forming the array. The array data + is encoded in the rectangular part of A which is obtained from ?TZRZF. Q + size is inferred by m, n, side keywords. + """ + seed(1234) + qm, qn, cn = 10, 15, 15 + for ind, dtype in enumerate(DTYPES): + tzrzf, tzrzf_lw = get_lapack_funcs(('tzrzf', 'tzrzf_lwork'), + dtype=dtype) + lwork_rz = _compute_lwork(tzrzf_lw, qm, qn) + + if ind < 2: + A = triu(rand(qm, qn).astype(dtype)) + C = rand(cn, cn).astype(dtype) + orun_mrz, orun_mrz_lw = get_lapack_funcs(('ormrz', 'ormrz_lwork'), + dtype=dtype) + else: + A = triu((rand(qm, qn) + rand(qm, qn)*1j).astype(dtype)) + C = (rand(cn, cn) + rand(cn, cn)*1j).astype(dtype) + orun_mrz, orun_mrz_lw = get_lapack_funcs(('unmrz', 'unmrz_lwork'), + dtype=dtype) + + lwork_mrz = _compute_lwork(orun_mrz_lw, cn, cn) + rz, tau, info = tzrzf(A, lwork=lwork_rz) + + # Get Q manually for comparison + V = np.hstack((np.eye(qm, dtype=dtype), rz[:, qm:])) + Id = np.eye(qn, dtype=dtype) + ref = [Id-tau[x]*V[[x], :].T.dot(V[[x], :].conj()) for x in range(qm)] + Q = reduce(np.dot, ref) + + # Now that we have Q, we can test whether lapack results agree with + # each case of CQ, CQ^H, QC, and QC^H + trans = 'T' if ind < 2 else 'C' + tol = 10*np.spacing(dtype(1.0).real) + + cq, info = orun_mrz(rz, tau, C, lwork=lwork_mrz) + assert_(info == 0) + assert_allclose(cq - Q.dot(C), zeros_like(C), atol=tol, rtol=0.) + + cq, info = orun_mrz(rz, tau, C, trans=trans, lwork=lwork_mrz) + assert_(info == 0) + assert_allclose(cq - Q.conj().T.dot(C), zeros_like(C), atol=tol, + rtol=0.) + + cq, info = orun_mrz(rz, tau, C, side='R', lwork=lwork_mrz) + assert_(info == 0) + assert_allclose(cq - C.dot(Q), zeros_like(C), atol=tol, rtol=0.) + + cq, info = orun_mrz(rz, tau, C, side='R', trans=trans, lwork=lwork_mrz) + assert_(info == 0) + assert_allclose(cq - C.dot(Q.conj().T), zeros_like(C), atol=tol, + rtol=0.) + + +def test_tfttr_trttf(): + """ + Test conversion routines between the Rectengular Full Packed (RFP) format + and Standard Triangular Array (TR) + """ + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 20 + if ind > 1: + A_full = (rand(n, n) + rand(n, n)*1j).astype(dtype) + transr = 'C' + else: + A_full = (rand(n, n)).astype(dtype) + transr = 'T' + + trttf, tfttr = get_lapack_funcs(('trttf', 'tfttr'), dtype=dtype) + A_tf_U, info = trttf(A_full) + assert_(info == 0) + A_tf_L, info = trttf(A_full, uplo='L') + assert_(info == 0) + A_tf_U_T, info = trttf(A_full, transr=transr, uplo='U') + assert_(info == 0) + A_tf_L_T, info = trttf(A_full, transr=transr, uplo='L') + assert_(info == 0) + + # Create the RFP array manually (n is even!) + A_tf_U_m = zeros((n+1, n//2), dtype=dtype) + A_tf_U_m[:-1, :] = triu(A_full)[:, n//2:] + A_tf_U_m[n//2+1:, :] += triu(A_full)[:n//2, :n//2].conj().T + + A_tf_L_m = zeros((n+1, n//2), dtype=dtype) + A_tf_L_m[1:, :] = tril(A_full)[:, :n//2] + A_tf_L_m[:n//2, :] += tril(A_full)[n//2:, n//2:].conj().T + + assert_array_almost_equal(A_tf_U, A_tf_U_m.reshape(-1, order='F')) + assert_array_almost_equal(A_tf_U_T, + A_tf_U_m.conj().T.reshape(-1, order='F')) + + assert_array_almost_equal(A_tf_L, A_tf_L_m.reshape(-1, order='F')) + assert_array_almost_equal(A_tf_L_T, + A_tf_L_m.conj().T.reshape(-1, order='F')) + + # Get the original array from RFP + A_tr_U, info = tfttr(n, A_tf_U) + assert_(info == 0) + A_tr_L, info = tfttr(n, A_tf_L, uplo='L') + assert_(info == 0) + A_tr_U_T, info = tfttr(n, A_tf_U_T, transr=transr, uplo='U') + assert_(info == 0) + A_tr_L_T, info = tfttr(n, A_tf_L_T, transr=transr, uplo='L') + assert_(info == 0) + + assert_array_almost_equal(A_tr_U, triu(A_full)) + assert_array_almost_equal(A_tr_U_T, triu(A_full)) + assert_array_almost_equal(A_tr_L, tril(A_full)) + assert_array_almost_equal(A_tr_L_T, tril(A_full)) + + +def test_tpttr_trttp(): + """ + Test conversion routines between the Rectengular Full Packed (RFP) format + and Standard Triangular Array (TR) + """ + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 20 + if ind > 1: + A_full = (rand(n, n) + rand(n, n)*1j).astype(dtype) + else: + A_full = (rand(n, n)).astype(dtype) + + trttp, tpttr = get_lapack_funcs(('trttp', 'tpttr'), dtype=dtype) + A_tp_U, info = trttp(A_full) + assert_(info == 0) + A_tp_L, info = trttp(A_full, uplo='L') + assert_(info == 0) + + # Create the TP array manually + inds = tril_indices(n) + A_tp_U_m = zeros(n*(n+1)//2, dtype=dtype) + A_tp_U_m[:] = (triu(A_full).T)[inds] + + inds = triu_indices(n) + A_tp_L_m = zeros(n*(n+1)//2, dtype=dtype) + A_tp_L_m[:] = (tril(A_full).T)[inds] + + assert_array_almost_equal(A_tp_U, A_tp_U_m) + assert_array_almost_equal(A_tp_L, A_tp_L_m) + + # Get the original array from TP + A_tr_U, info = tpttr(n, A_tp_U) + assert_(info == 0) + A_tr_L, info = tpttr(n, A_tp_L, uplo='L') + assert_(info == 0) + + assert_array_almost_equal(A_tr_U, triu(A_full)) + assert_array_almost_equal(A_tr_L, tril(A_full)) + + +def test_pftrf(): + """ + Test Cholesky factorization of a positive definite Rectengular Full + Packed (RFP) format array + """ + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 20 + if ind > 1: + A = (rand(n, n) + rand(n, n)*1j).astype(dtype) + A = A + A.conj().T + n*eye(n) + else: + A = (rand(n, n)).astype(dtype) + A = A + A.T + n*eye(n) + + pftrf, trttf, tfttr = get_lapack_funcs(('pftrf', 'trttf', 'tfttr'), + dtype=dtype) + + # Get the original array from TP + Afp, info = trttf(A) + Achol_rfp, info = pftrf(n, Afp) + assert_(info == 0) + A_chol_r, _ = tfttr(n, Achol_rfp) + Achol = cholesky(A) + assert_array_almost_equal(A_chol_r, Achol) + + +def test_pftri(): + """ + Test Cholesky factorization of a positive definite Rectengular Full + Packed (RFP) format array to find its inverse + """ + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 20 + if ind > 1: + A = (rand(n, n) + rand(n, n)*1j).astype(dtype) + A = A + A.conj().T + n*eye(n) + else: + A = (rand(n, n)).astype(dtype) + A = A + A.T + n*eye(n) + + pftri, pftrf, trttf, tfttr = get_lapack_funcs(('pftri', + 'pftrf', + 'trttf', + 'tfttr'), + dtype=dtype) + + # Get the original array from TP + Afp, info = trttf(A) + A_chol_rfp, info = pftrf(n, Afp) + A_inv_rfp, info = pftri(n, A_chol_rfp) + assert_(info == 0) + A_inv_r, _ = tfttr(n, A_inv_rfp) + Ainv = inv(A) + assert_array_almost_equal(A_inv_r, triu(Ainv), + decimal=4 if ind % 2 == 0 else 6) + + +def test_pftrs(): + """ + Test Cholesky factorization of a positive definite Rectengular Full + Packed (RFP) format array and solve a linear system + """ + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 20 + if ind > 1: + A = (rand(n, n) + rand(n, n)*1j).astype(dtype) + A = A + A.conj().T + n*eye(n) + else: + A = (rand(n, n)).astype(dtype) + A = A + A.T + n*eye(n) + + B = ones((n, 3), dtype=dtype) + Bf1 = ones((n+2, 3), dtype=dtype) + Bf2 = ones((n-2, 3), dtype=dtype) + pftrs, pftrf, trttf, tfttr = get_lapack_funcs(('pftrs', + 'pftrf', + 'trttf', + 'tfttr'), + dtype=dtype) + + # Get the original array from TP + Afp, info = trttf(A) + A_chol_rfp, info = pftrf(n, Afp) + # larger B arrays shouldn't segfault + soln, info = pftrs(n, A_chol_rfp, Bf1) + assert_(info == 0) + assert_raises(Exception, pftrs, n, A_chol_rfp, Bf2) + soln, info = pftrs(n, A_chol_rfp, B) + assert_(info == 0) + assert_array_almost_equal(solve(A, B), soln, + decimal=4 if ind % 2 == 0 else 6) + + +def test_sfrk_hfrk(): + """ + Test for performing a symmetric rank-k operation for matrix in RFP format. + """ + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 20 + if ind > 1: + A = (rand(n, n) + rand(n, n)*1j).astype(dtype) + A = A + A.conj().T + n*eye(n) + else: + A = (rand(n, n)).astype(dtype) + A = A + A.T + n*eye(n) + + prefix = 's'if ind < 2 else 'h' + trttf, tfttr, shfrk = get_lapack_funcs(('trttf', 'tfttr', f'{prefix}frk'), + dtype=dtype) + + Afp, _ = trttf(A) + C = np.random.rand(n, 2).astype(dtype) + Afp_out = shfrk(n, 2, -1, C, 2, Afp) + A_out, _ = tfttr(n, Afp_out) + assert_array_almost_equal(A_out, triu(-C.dot(C.conj().T) + 2*A), + decimal=4 if ind % 2 == 0 else 6) + + +def test_syconv(): + """ + Test for going back and forth between the returned format of he/sytrf to + L and D factors/permutations. + """ + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 10 + + if ind > 1: + A = (randint(-30, 30, (n, n)) + + randint(-30, 30, (n, n))*1j).astype(dtype) + + A = A + A.conj().T + else: + A = randint(-30, 30, (n, n)).astype(dtype) + A = A + A.T + n*eye(n) + + tol = 100*np.spacing(dtype(1.0).real) + syconv, trf, trf_lwork = get_lapack_funcs(('syconv', 'sytrf', + 'sytrf_lwork'), dtype=dtype) + lw = _compute_lwork(trf_lwork, n, lower=1) + L, D, perm = ldl(A, lower=1, hermitian=False) + lw = _compute_lwork(trf_lwork, n, lower=1) + ldu, ipiv, info = trf(A, lower=1, lwork=lw) + a, e, info = syconv(ldu, ipiv, lower=1) + assert_allclose(tril(a, -1,), tril(L[perm, :], -1), atol=tol, rtol=0.) + + # Test also upper + U, D, perm = ldl(A, lower=0, hermitian=False) + ldu, ipiv, info = trf(A, lower=0) + a, e, info = syconv(ldu, ipiv, lower=0) + assert_allclose(triu(a, 1), triu(U[perm, :], 1), atol=tol, rtol=0.) + + +class TestBlockedQR: + """ + Tests for the blocked QR factorization, namely through geqrt, gemqrt, tpqrt + and tpmqr. + """ + + def test_geqrt_gemqrt(self): + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 20 + + if ind > 1: + A = (rand(n, n) + rand(n, n)*1j).astype(dtype) + else: + A = (rand(n, n)).astype(dtype) + + tol = 100*np.spacing(dtype(1.0).real) + geqrt, gemqrt = get_lapack_funcs(('geqrt', 'gemqrt'), dtype=dtype) + + a, t, info = geqrt(n, A) + assert info == 0 + + # Extract elementary reflectors from lower triangle, adding the + # main diagonal of ones. + v = np.tril(a, -1) + np.eye(n, dtype=dtype) + # Generate the block Householder transform I - VTV^H + Q = np.eye(n, dtype=dtype) - v @ t @ v.T.conj() + R = np.triu(a) + + # Test columns of Q are orthogonal + assert_allclose(Q.T.conj() @ Q, np.eye(n, dtype=dtype), atol=tol, + rtol=0.) + assert_allclose(Q @ R, A, atol=tol, rtol=0.) + + if ind > 1: + C = (rand(n, n) + rand(n, n)*1j).astype(dtype) + transpose = 'C' + else: + C = (rand(n, n)).astype(dtype) + transpose = 'T' + + for side in ('L', 'R'): + for trans in ('N', transpose): + c, info = gemqrt(a, t, C, side=side, trans=trans) + assert info == 0 + + if trans == transpose: + q = Q.T.conj() + else: + q = Q + + if side == 'L': + qC = q @ C + else: + qC = C @ q + + assert_allclose(c, qC, atol=tol, rtol=0.) + + # Test default arguments + if (side, trans) == ('L', 'N'): + c_default, info = gemqrt(a, t, C) + assert info == 0 + assert_equal(c_default, c) + + # Test invalid side/trans + assert_raises(Exception, gemqrt, a, t, C, side='A') + assert_raises(Exception, gemqrt, a, t, C, trans='A') + + def test_tpqrt_tpmqrt(self): + seed(1234) + for ind, dtype in enumerate(DTYPES): + n = 20 + + if ind > 1: + A = (rand(n, n) + rand(n, n)*1j).astype(dtype) + B = (rand(n, n) + rand(n, n)*1j).astype(dtype) + else: + A = (rand(n, n)).astype(dtype) + B = (rand(n, n)).astype(dtype) + + tol = 100*np.spacing(dtype(1.0).real) + tpqrt, tpmqrt = get_lapack_funcs(('tpqrt', 'tpmqrt'), dtype=dtype) + + # Test for the range of pentagonal B, from square to upper + # triangular + for l in (0, n // 2, n): + a, b, t, info = tpqrt(l, n, A, B) + assert info == 0 + + # Check that lower triangular part of A has not been modified + assert_equal(np.tril(a, -1), np.tril(A, -1)) + # Check that elements not part of the pentagonal portion of B + # have not been modified. + assert_equal(np.tril(b, l - n - 1), np.tril(B, l - n - 1)) + + # Extract pentagonal portion of B + B_pent, b_pent = np.triu(B, l - n), np.triu(b, l - n) + + # Generate elementary reflectors + v = np.concatenate((np.eye(n, dtype=dtype), b_pent)) + # Generate the block Householder transform I - VTV^H + Q = np.eye(2 * n, dtype=dtype) - v @ t @ v.T.conj() + R = np.concatenate((np.triu(a), np.zeros_like(a))) + + # Test columns of Q are orthogonal + assert_allclose(Q.T.conj() @ Q, np.eye(2 * n, dtype=dtype), + atol=tol, rtol=0.) + assert_allclose(Q @ R, np.concatenate((np.triu(A), B_pent)), + atol=tol, rtol=0.) + + if ind > 1: + C = (rand(n, n) + rand(n, n)*1j).astype(dtype) + D = (rand(n, n) + rand(n, n)*1j).astype(dtype) + transpose = 'C' + else: + C = (rand(n, n)).astype(dtype) + D = (rand(n, n)).astype(dtype) + transpose = 'T' + + for side in ('L', 'R'): + for trans in ('N', transpose): + c, d, info = tpmqrt(l, b, t, C, D, side=side, + trans=trans) + assert info == 0 + + if trans == transpose: + q = Q.T.conj() + else: + q = Q + + if side == 'L': + cd = np.concatenate((c, d), axis=0) + CD = np.concatenate((C, D), axis=0) + qCD = q @ CD + else: + cd = np.concatenate((c, d), axis=1) + CD = np.concatenate((C, D), axis=1) + qCD = CD @ q + + assert_allclose(cd, qCD, atol=tol, rtol=0.) + + if (side, trans) == ('L', 'N'): + c_default, d_default, info = tpmqrt(l, b, t, C, D) + assert info == 0 + assert_equal(c_default, c) + assert_equal(d_default, d) + + # Test invalid side/trans + assert_raises(Exception, tpmqrt, l, b, t, C, D, side='A') + assert_raises(Exception, tpmqrt, l, b, t, C, D, trans='A') + + +def test_pstrf(): + seed(1234) + for ind, dtype in enumerate(DTYPES): + # DTYPES = pstrf + n = 10 + r = 2 + pstrf = get_lapack_funcs('pstrf', dtype=dtype) + + # Create positive semidefinite A + if ind > 1: + A = rand(n, n-r).astype(dtype) + 1j * rand(n, n-r).astype(dtype) + A = A @ A.conj().T + else: + A = rand(n, n-r).astype(dtype) + A = A @ A.T + + c, piv, r_c, info = pstrf(A) + U = triu(c) + U[r_c - n:, r_c - n:] = 0. + + assert_equal(info, 1) + # python-dbg 3.5.2 runs cause trouble with the following assertion. + # assert_equal(r_c, n - r) + single_atol = 1000 * np.finfo(np.float32).eps + double_atol = 1000 * np.finfo(np.float64).eps + atol = single_atol if ind in [0, 2] else double_atol + assert_allclose(A[piv-1][:, piv-1], U.conj().T @ U, rtol=0., atol=atol) + + c, piv, r_c, info = pstrf(A, lower=1) + L = tril(c) + L[r_c - n:, r_c - n:] = 0. + + assert_equal(info, 1) + # assert_equal(r_c, n - r) + single_atol = 1000 * np.finfo(np.float32).eps + double_atol = 1000 * np.finfo(np.float64).eps + atol = single_atol if ind in [0, 2] else double_atol + assert_allclose(A[piv-1][:, piv-1], L @ L.conj().T, rtol=0., atol=atol) + + +def test_pstf2(): + seed(1234) + for ind, dtype in enumerate(DTYPES): + # DTYPES = pstf2 + n = 10 + r = 2 + pstf2 = get_lapack_funcs('pstf2', dtype=dtype) + + # Create positive semidefinite A + if ind > 1: + A = rand(n, n-r).astype(dtype) + 1j * rand(n, n-r).astype(dtype) + A = A @ A.conj().T + else: + A = rand(n, n-r).astype(dtype) + A = A @ A.T + + c, piv, r_c, info = pstf2(A) + U = triu(c) + U[r_c - n:, r_c - n:] = 0. + + assert_equal(info, 1) + # python-dbg 3.5.2 runs cause trouble with the commented assertions. + # assert_equal(r_c, n - r) + single_atol = 1000 * np.finfo(np.float32).eps + double_atol = 1000 * np.finfo(np.float64).eps + atol = single_atol if ind in [0, 2] else double_atol + assert_allclose(A[piv-1][:, piv-1], U.conj().T @ U, rtol=0., atol=atol) + + c, piv, r_c, info = pstf2(A, lower=1) + L = tril(c) + L[r_c - n:, r_c - n:] = 0. + + assert_equal(info, 1) + # assert_equal(r_c, n - r) + single_atol = 1000 * np.finfo(np.float32).eps + double_atol = 1000 * np.finfo(np.float64).eps + atol = single_atol if ind in [0, 2] else double_atol + assert_allclose(A[piv-1][:, piv-1], L @ L.conj().T, rtol=0., atol=atol) + + +def test_geequ(): + desired_real = np.array([[0.6250, 1.0000, 0.0393, -0.4269], + [1.0000, -0.5619, -1.0000, -1.0000], + [0.5874, -1.0000, -0.0596, -0.5341], + [-1.0000, -0.5946, -0.0294, 0.9957]]) + + desired_cplx = np.array([[-0.2816+0.5359*1j, + 0.0812+0.9188*1j, + -0.7439-0.2561*1j], + [-0.3562-0.2954*1j, + 0.9566-0.0434*1j, + -0.0174+0.1555*1j], + [0.8607+0.1393*1j, + -0.2759+0.7241*1j, + -0.1642-0.1365*1j]]) + + for ind, dtype in enumerate(DTYPES): + if ind < 2: + # Use examples from the NAG documentation + A = np.array([[1.80e+10, 2.88e+10, 2.05e+00, -8.90e+09], + [5.25e+00, -2.95e+00, -9.50e-09, -3.80e+00], + [1.58e+00, -2.69e+00, -2.90e-10, -1.04e+00], + [-1.11e+00, -6.60e-01, -5.90e-11, 8.00e-01]]) + A = A.astype(dtype) + else: + A = np.array([[-1.34e+00, 0.28e+10, -6.39e+00], + [-1.70e+00, 3.31e+10, -0.15e+00], + [2.41e-10, -0.56e+00, -0.83e-10]], dtype=dtype) + A += np.array([[2.55e+00, 3.17e+10, -2.20e+00], + [-1.41e+00, -0.15e+10, 1.34e+00], + [0.39e-10, 1.47e+00, -0.69e-10]])*1j + + A = A.astype(dtype) + + geequ = get_lapack_funcs('geequ', dtype=dtype) + r, c, rowcnd, colcnd, amax, info = geequ(A) + + if ind < 2: + assert_allclose(desired_real.astype(dtype), r[:, None]*A*c, + rtol=0, atol=1e-4) + else: + assert_allclose(desired_cplx.astype(dtype), r[:, None]*A*c, + rtol=0, atol=1e-4) + + +def test_syequb(): + desired_log2s = np.array([0, 0, 0, 0, 0, 0, -1, -1, -2, -3]) + + for ind, dtype in enumerate(DTYPES): + A = np.eye(10, dtype=dtype) + alpha = dtype(1. if ind < 2 else 1.j) + d = np.array([alpha * 2.**x for x in range(-5, 5)], dtype=dtype) + A += np.rot90(np.diag(d)) + + syequb = get_lapack_funcs('syequb', dtype=dtype) + s, scond, amax, info = syequb(A) + + assert_equal(np.log2(s).astype(int), desired_log2s) + + +@pytest.mark.skipif(True, + reason="Failing on some OpenBLAS version, see gh-12276") +def test_heequb(): + # zheequb has a bug for versions =< LAPACK 3.9.0 + # See Reference-LAPACK gh-61 and gh-408 + # Hence the zheequb test is customized accordingly to avoid + # work scaling. + A = np.diag([2]*5 + [1002]*5) + np.diag(np.ones(9), k=1)*1j + s, scond, amax, info = lapack.zheequb(A) + assert_equal(info, 0) + assert_allclose(np.log2(s), [0., -1.]*2 + [0.] + [-4]*5) + + A = np.diag(2**np.abs(np.arange(-5, 6)) + 0j) + A[5, 5] = 1024 + A[5, 0] = 16j + s, scond, amax, info = lapack.cheequb(A.astype(np.complex64), lower=1) + assert_equal(info, 0) + assert_allclose(np.log2(s), [-2, -1, -1, 0, 0, -5, 0, -1, -1, -2, -2]) + + +def test_getc2_gesc2(): + np.random.seed(42) + n = 10 + desired_real = np.random.rand(n) + desired_cplx = np.random.rand(n) + np.random.rand(n)*1j + + for ind, dtype in enumerate(DTYPES): + if ind < 2: + A = np.random.rand(n, n) + A = A.astype(dtype) + b = A @ desired_real + b = b.astype(dtype) + else: + A = np.random.rand(n, n) + np.random.rand(n, n)*1j + A = A.astype(dtype) + b = A @ desired_cplx + b = b.astype(dtype) + + getc2 = get_lapack_funcs('getc2', dtype=dtype) + gesc2 = get_lapack_funcs('gesc2', dtype=dtype) + lu, ipiv, jpiv, info = getc2(A, overwrite_a=0) + x, scale = gesc2(lu, b, ipiv, jpiv, overwrite_rhs=0) + + if ind < 2: + assert_array_almost_equal(desired_real.astype(dtype), + x/scale, decimal=4) + else: + assert_array_almost_equal(desired_cplx.astype(dtype), + x/scale, decimal=4) + + +@pytest.mark.parametrize('size', [(6, 5), (5, 5)]) +@pytest.mark.parametrize('dtype', REAL_DTYPES) +@pytest.mark.parametrize('joba', range(6)) # 'C', 'E', 'F', 'G', 'A', 'R' +@pytest.mark.parametrize('jobu', range(4)) # 'U', 'F', 'W', 'N' +@pytest.mark.parametrize('jobv', range(4)) # 'V', 'J', 'W', 'N' +@pytest.mark.parametrize('jobr', [0, 1]) +@pytest.mark.parametrize('jobp', [0, 1]) +def test_gejsv_general(size, dtype, joba, jobu, jobv, jobr, jobp, jobt=0): + """Test the lapack routine ?gejsv. + + This function tests that a singular value decomposition can be performed + on the random M-by-N matrix A. The test performs the SVD using ?gejsv + then performs the following checks: + + * ?gejsv exist successfully (info == 0) + * The returned singular values are correct + * `A` can be reconstructed from `u`, `SIGMA`, `v` + * Ensure that u.T @ u is the identity matrix + * Ensure that v.T @ v is the identity matrix + * The reported matrix rank + * The reported number of singular values + * If denormalized floats are required + + Notes + ----- + joba specifies several choices effecting the calculation's accuracy + Although all arguments are tested, the tests only check that the correct + solution is returned - NOT that the prescribed actions are performed + internally. + + jobt is, as of v3.9.0, still experimental and removed to cut down number of + test cases. However keyword itself is tested externally. + """ + seed(42) + + # Define some constants for later use: + m, n = size + atol = 100 * np.finfo(dtype).eps + A = generate_random_dtype_array(size, dtype) + gejsv = get_lapack_funcs('gejsv', dtype=dtype) + + # Set up checks for invalid job? combinations + # if an invalid combination occurs we set the appropriate + # exit status. + lsvec = jobu < 2 # Calculate left singular vectors + rsvec = jobv < 2 # Calculate right singular vectors + l2tran = (jobt == 1) and (m == n) + is_complex = np.iscomplexobj(A) + + invalid_real_jobv = (jobv == 1) and (not lsvec) and (not is_complex) + invalid_cplx_jobu = (jobu == 2) and not (rsvec and l2tran) and is_complex + invalid_cplx_jobv = (jobv == 2) and not (lsvec and l2tran) and is_complex + + # Set the exit status to the expected value. + # Here we only check for invalid combinations, not individual + # parameters. + if invalid_cplx_jobu: + exit_status = -2 + elif invalid_real_jobv or invalid_cplx_jobv: + exit_status = -3 + else: + exit_status = 0 + + if (jobu > 1) and (jobv == 1): + assert_raises(Exception, gejsv, A, joba, jobu, jobv, jobr, jobt, jobp) + else: + sva, u, v, work, iwork, info = gejsv(A, + joba=joba, + jobu=jobu, + jobv=jobv, + jobr=jobr, + jobt=jobt, + jobp=jobp) + + # Check that ?gejsv exited successfully/as expected + assert_equal(info, exit_status) + + # If exit_status is non-zero the combination of jobs is invalid. + # We test this above but no calculations are performed. + if not exit_status: + + # Check the returned singular values + sigma = (work[0] / work[1]) * sva[:n] + assert_allclose(sigma, svd(A, compute_uv=False), atol=atol) + + if jobu == 1: + # If JOBU = 'F', then u contains the M-by-M matrix of + # the left singular vectors, including an ONB of the orthogonal + # complement of the Range(A) + # However, to recalculate A we are concerned about the + # first n singular values and so can ignore the latter. + # TODO: Add a test for ONB? + u = u[:, :n] + + if lsvec and rsvec: + assert_allclose(u @ np.diag(sigma) @ v.conj().T, A, atol=atol) + if lsvec: + assert_allclose(u.conj().T @ u, np.identity(n), atol=atol) + if rsvec: + assert_allclose(v.conj().T @ v, np.identity(n), atol=atol) + + assert_equal(iwork[0], np.linalg.matrix_rank(A)) + assert_equal(iwork[1], np.count_nonzero(sigma)) + # iwork[2] is non-zero if requested accuracy is not warranted for + # the data. This should never occur for these tests. + assert_equal(iwork[2], 0) + + +@pytest.mark.parametrize('dtype', REAL_DTYPES) +def test_gejsv_edge_arguments(dtype): + """Test edge arguments return expected status""" + gejsv = get_lapack_funcs('gejsv', dtype=dtype) + + # scalar A + sva, u, v, work, iwork, info = gejsv(1.) + assert_equal(info, 0) + assert_equal(u.shape, (1, 1)) + assert_equal(v.shape, (1, 1)) + assert_equal(sva, np.array([1.], dtype=dtype)) + + # 1d A + A = np.ones((1,), dtype=dtype) + sva, u, v, work, iwork, info = gejsv(A) + assert_equal(info, 0) + assert_equal(u.shape, (1, 1)) + assert_equal(v.shape, (1, 1)) + assert_equal(sva, np.array([1.], dtype=dtype)) + + # 2d empty A + A = np.ones((1, 0), dtype=dtype) + sva, u, v, work, iwork, info = gejsv(A) + assert_equal(info, 0) + assert_equal(u.shape, (1, 0)) + assert_equal(v.shape, (1, 0)) + assert_equal(sva, np.array([], dtype=dtype)) + + # make sure "overwrite_a" is respected - user reported in gh-13191 + A = np.sin(np.arange(100).reshape(10, 10)).astype(dtype) + A = np.asfortranarray(A + A.T) # make it symmetric and column major + Ac = A.copy('A') + _ = gejsv(A) + assert_allclose(A, Ac) + + +@pytest.mark.parametrize(('kwargs'), + ({'joba': 9}, + {'jobu': 9}, + {'jobv': 9}, + {'jobr': 9}, + {'jobt': 9}, + {'jobp': 9}) + ) +def test_gejsv_invalid_job_arguments(kwargs): + """Test invalid job arguments raise an Exception""" + A = np.ones((2, 2), dtype=float) + gejsv = get_lapack_funcs('gejsv', dtype=float) + assert_raises(Exception, gejsv, A, **kwargs) + + +@pytest.mark.parametrize("A,sva_expect,u_expect,v_expect", + [(np.array([[2.27, -1.54, 1.15, -1.94], + [0.28, -1.67, 0.94, -0.78], + [-0.48, -3.09, 0.99, -0.21], + [1.07, 1.22, 0.79, 0.63], + [-2.35, 2.93, -1.45, 2.30], + [0.62, -7.39, 1.03, -2.57]]), + np.array([9.9966, 3.6831, 1.3569, 0.5000]), + np.array([[0.2774, -0.6003, -0.1277, 0.1323], + [0.2020, -0.0301, 0.2805, 0.7034], + [0.2918, 0.3348, 0.6453, 0.1906], + [-0.0938, -0.3699, 0.6781, -0.5399], + [-0.4213, 0.5266, 0.0413, -0.0575], + [0.7816, 0.3353, -0.1645, -0.3957]]), + np.array([[0.1921, -0.8030, 0.0041, -0.5642], + [-0.8794, -0.3926, -0.0752, 0.2587], + [0.2140, -0.2980, 0.7827, 0.5027], + [-0.3795, 0.3351, 0.6178, -0.6017]]))]) +def test_gejsv_NAG(A, sva_expect, u_expect, v_expect): + """ + This test implements the example found in the NAG manual, f08khf. + An example was not found for the complex case. + """ + # NAG manual provides accuracy up to 4 decimals + atol = 1e-4 + gejsv = get_lapack_funcs('gejsv', dtype=A.dtype) + + sva, u, v, work, iwork, info = gejsv(A) + + assert_allclose(sva_expect, sva, atol=atol) + assert_allclose(u_expect, u, atol=atol) + assert_allclose(v_expect, v, atol=atol) + + +@pytest.mark.parametrize("dtype", DTYPES) +def test_gttrf_gttrs(dtype): + # The test uses ?gttrf and ?gttrs to solve a random system for each dtype, + # tests that the output of ?gttrf define LU matrices, that input + # parameters are unmodified, transposal options function correctly, that + # incompatible matrix shapes raise an error, and singular matrices return + # non zero info. + + seed(42) + n = 10 + atol = 100 * np.finfo(dtype).eps + + # create the matrix in accordance with the data type + du = generate_random_dtype_array((n-1,), dtype=dtype) + d = generate_random_dtype_array((n,), dtype=dtype) + dl = generate_random_dtype_array((n-1,), dtype=dtype) + + diag_cpy = [dl.copy(), d.copy(), du.copy()] + + A = np.diag(d) + np.diag(dl, -1) + np.diag(du, 1) + x = np.random.rand(n) + b = A @ x + + gttrf, gttrs = get_lapack_funcs(('gttrf', 'gttrs'), dtype=dtype) + + _dl, _d, _du, du2, ipiv, info = gttrf(dl, d, du) + # test to assure that the inputs of ?gttrf are unmodified + assert_array_equal(dl, diag_cpy[0]) + assert_array_equal(d, diag_cpy[1]) + assert_array_equal(du, diag_cpy[2]) + + # generate L and U factors from ?gttrf return values + # L/U are lower/upper triangular by construction (initially and at end) + U = np.diag(_d, 0) + np.diag(_du, 1) + np.diag(du2, 2) + L = np.eye(n, dtype=dtype) + + for i, m in enumerate(_dl): + # L is given in a factored form. + # See + # www.hpcavf.uclan.ac.uk/softwaredoc/sgi_scsl_html/sgi_html/ch03.html + piv = ipiv[i] - 1 + # right multiply by permutation matrix + L[:, [i, piv]] = L[:, [piv, i]] + # right multiply by Li, rank-one modification of identity + L[:, i] += L[:, i+1]*m + + # one last permutation + i, piv = -1, ipiv[-1] - 1 + # right multiply by final permutation matrix + L[:, [i, piv]] = L[:, [piv, i]] + + # check that the outputs of ?gttrf define an LU decomposition of A + assert_allclose(A, L @ U, atol=atol) + + b_cpy = b.copy() + x_gttrs, info = gttrs(_dl, _d, _du, du2, ipiv, b) + # test that the inputs of ?gttrs are unmodified + assert_array_equal(b, b_cpy) + # test that the result of ?gttrs matches the expected input + assert_allclose(x, x_gttrs, atol=atol) + + # test that ?gttrf and ?gttrs work with transposal options + if dtype in REAL_DTYPES: + trans = "T" + b_trans = A.T @ x + else: + trans = "C" + b_trans = A.conj().T @ x + + x_gttrs, info = gttrs(_dl, _d, _du, du2, ipiv, b_trans, trans=trans) + assert_allclose(x, x_gttrs, atol=atol) + + # test that ValueError is raised with incompatible matrix shapes + with assert_raises(ValueError): + gttrf(dl[:-1], d, du) + with assert_raises(ValueError): + gttrf(dl, d[:-1], du) + with assert_raises(ValueError): + gttrf(dl, d, du[:-1]) + + # test that matrix of size n=2 raises exception + with assert_raises(Exception): + gttrf(dl[0], d[:1], du[0]) + + # test that singular (row of all zeroes) matrix fails via info + du[0] = 0 + d[0] = 0 + __dl, __d, __du, _du2, _ipiv, _info = gttrf(dl, d, du) + np.testing.assert_(__d[info - 1] == 0, + "?gttrf: _d[info-1] is {}, not the illegal value :0." + .format(__d[info - 1])) + + +@pytest.mark.parametrize("du, d, dl, du_exp, d_exp, du2_exp, ipiv_exp, b, x", + [(np.array([2.1, -1.0, 1.9, 8.0]), + np.array([3.0, 2.3, -5.0, -.9, 7.1]), + np.array([3.4, 3.6, 7.0, -6.0]), + np.array([2.3, -5, -.9, 7.1]), + np.array([3.4, 3.6, 7, -6, -1.015373]), + np.array([-1, 1.9, 8]), + np.array([2, 3, 4, 5, 5]), + np.array([[2.7, 6.6], + [-0.5, 10.8], + [2.6, -3.2], + [0.6, -11.2], + [2.7, 19.1] + ]), + np.array([[-4, 5], + [7, -4], + [3, -3], + [-4, -2], + [-3, 1]])), + ( + np.array([2 - 1j, 2 + 1j, -1 + 1j, 1 - 1j]), + np.array([-1.3 + 1.3j, -1.3 + 1.3j, + -1.3 + 3.3j, - .3 + 4.3j, + -3.3 + 1.3j]), + np.array([1 - 2j, 1 + 1j, 2 - 3j, 1 + 1j]), + # du exp + np.array([-1.3 + 1.3j, -1.3 + 3.3j, + -0.3 + 4.3j, -3.3 + 1.3j]), + np.array([1 - 2j, 1 + 1j, 2 - 3j, 1 + 1j, + -1.3399 + 0.2875j]), + np.array([2 + 1j, -1 + 1j, 1 - 1j]), + np.array([2, 3, 4, 5, 5]), + np.array([[2.4 - 5j, 2.7 + 6.9j], + [3.4 + 18.2j, - 6.9 - 5.3j], + [-14.7 + 9.7j, - 6 - .6j], + [31.9 - 7.7j, -3.9 + 9.3j], + [-1 + 1.6j, -3 + 12.2j]]), + np.array([[1 + 1j, 2 - 1j], + [3 - 1j, 1 + 2j], + [4 + 5j, -1 + 1j], + [-1 - 2j, 2 + 1j], + [1 - 1j, 2 - 2j]]) + )]) +def test_gttrf_gttrs_NAG_f07cdf_f07cef_f07crf_f07csf(du, d, dl, du_exp, d_exp, + du2_exp, ipiv_exp, b, x): + # test to assure that wrapper is consistent with NAG Library Manual Mark 26 + # example problems: f07cdf and f07cef (real) + # examples: f07crf and f07csf (complex) + # (Links may expire, so search for "NAG Library Manual Mark 26" online) + + gttrf, gttrs = get_lapack_funcs(('gttrf', "gttrs"), (du[0], du[0])) + + _dl, _d, _du, du2, ipiv, info = gttrf(dl, d, du) + assert_allclose(du2, du2_exp) + assert_allclose(_du, du_exp) + assert_allclose(_d, d_exp, atol=1e-4) # NAG examples provide 4 decimals. + assert_allclose(ipiv, ipiv_exp) + + x_gttrs, info = gttrs(_dl, _d, _du, du2, ipiv, b) + + assert_allclose(x_gttrs, x) + + +@pytest.mark.parametrize('dtype', DTYPES) +@pytest.mark.parametrize('shape', [(3, 7), (7, 3), (2**18, 2**18)]) +def test_geqrfp_lwork(dtype, shape): + geqrfp_lwork = get_lapack_funcs(('geqrfp_lwork'), dtype=dtype) + m, n = shape + lwork, info = geqrfp_lwork(m=m, n=n) + assert_equal(info, 0) + + +@pytest.mark.parametrize("ddtype,dtype", + zip(REAL_DTYPES + REAL_DTYPES, DTYPES)) +def test_pttrf_pttrs(ddtype, dtype): + seed(42) + # set test tolerance appropriate for dtype + atol = 100*np.finfo(dtype).eps + # n is the length diagonal of A + n = 10 + # create diagonals according to size and dtype + + # diagonal d should always be real. + # add 4 to d so it will be dominant for all dtypes + d = generate_random_dtype_array((n,), ddtype) + 4 + # diagonal e may be real or complex. + e = generate_random_dtype_array((n-1,), dtype) + + # assemble diagonals together into matrix + A = np.diag(d) + np.diag(e, -1) + np.diag(np.conj(e), 1) + # store a copy of diagonals to later verify + diag_cpy = [d.copy(), e.copy()] + + pttrf = get_lapack_funcs('pttrf', dtype=dtype) + + _d, _e, info = pttrf(d, e) + # test to assure that the inputs of ?pttrf are unmodified + assert_array_equal(d, diag_cpy[0]) + assert_array_equal(e, diag_cpy[1]) + assert_equal(info, 0, err_msg=f"pttrf: info = {info}, should be 0") + + # test that the factors from pttrf can be recombined to make A + L = np.diag(_e, -1) + np.diag(np.ones(n)) + D = np.diag(_d) + + assert_allclose(A, L@D@L.conjugate().T, atol=atol) + + # generate random solution x + x = generate_random_dtype_array((n,), dtype) + # determine accompanying b to get soln x + b = A@x + + # determine _x from pttrs + pttrs = get_lapack_funcs('pttrs', dtype=dtype) + _x, info = pttrs(_d, _e.conj(), b) + assert_equal(info, 0, err_msg=f"pttrs: info = {info}, should be 0") + + # test that _x from pttrs matches the expected x + assert_allclose(x, _x, atol=atol) + + +@pytest.mark.parametrize("ddtype,dtype", + zip(REAL_DTYPES + REAL_DTYPES, DTYPES)) +def test_pttrf_pttrs_errors_incompatible_shape(ddtype, dtype): + n = 10 + pttrf = get_lapack_funcs('pttrf', dtype=dtype) + d = generate_random_dtype_array((n,), ddtype) + 2 + e = generate_random_dtype_array((n-1,), dtype) + # test that ValueError is raised with incompatible matrix shapes + assert_raises(ValueError, pttrf, d[:-1], e) + assert_raises(ValueError, pttrf, d, e[:-1]) + + +@pytest.mark.parametrize("ddtype,dtype", + zip(REAL_DTYPES + REAL_DTYPES, DTYPES)) +def test_pttrf_pttrs_errors_singular_nonSPD(ddtype, dtype): + n = 10 + pttrf = get_lapack_funcs('pttrf', dtype=dtype) + d = generate_random_dtype_array((n,), ddtype) + 2 + e = generate_random_dtype_array((n-1,), dtype) + # test that singular (row of all zeroes) matrix fails via info + d[0] = 0 + e[0] = 0 + _d, _e, info = pttrf(d, e) + assert_equal(_d[info - 1], 0, + f"?pttrf: _d[info-1] is {_d[info - 1]}, not the illegal value :0.") + + # test with non-spd matrix + d = generate_random_dtype_array((n,), ddtype) + _d, _e, info = pttrf(d, e) + assert_(info != 0, "?pttrf should fail with non-spd matrix, but didn't") + + +@pytest.mark.parametrize(("d, e, d_expect, e_expect, b, x_expect"), [ + (np.array([4, 10, 29, 25, 5]), + np.array([-2, -6, 15, 8]), + np.array([4, 9, 25, 16, 1]), + np.array([-.5, -.6667, .6, .5]), + np.array([[6, 10], [9, 4], [2, 9], [14, 65], + [7, 23]]), + np.array([[2.5, 2], [2, -1], [1, -3], [-1, 6], + [3, -5]]) + ), ( + np.array([16, 41, 46, 21]), + np.array([16 + 16j, 18 - 9j, 1 - 4j]), + np.array([16, 9, 1, 4]), + np.array([1+1j, 2-1j, 1-4j]), + np.array([[64+16j, -16-32j], [93+62j, 61-66j], + [78-80j, 71-74j], [14-27j, 35+15j]]), + np.array([[2+1j, -3-2j], [1+1j, 1+1j], [1-2j, 1-2j], + [1-1j, 2+1j]]) + )]) +def test_pttrf_pttrs_NAG(d, e, d_expect, e_expect, b, x_expect): + # test to assure that wrapper is consistent with NAG Manual Mark 26 + # example problems: f07jdf and f07jef (real) + # examples: f07jrf and f07csf (complex) + # NAG examples provide 4 decimals. + # (Links expire, so please search for "NAG Library Manual Mark 26" online) + + atol = 1e-4 + pttrf = get_lapack_funcs('pttrf', dtype=e[0]) + _d, _e, info = pttrf(d, e) + assert_allclose(_d, d_expect, atol=atol) + assert_allclose(_e, e_expect, atol=atol) + + pttrs = get_lapack_funcs('pttrs', dtype=e[0]) + _x, info = pttrs(_d, _e.conj(), b) + assert_allclose(_x, x_expect, atol=atol) + + # also test option `lower` + if e.dtype in COMPLEX_DTYPES: + _x, info = pttrs(_d, _e, b, lower=1) + assert_allclose(_x, x_expect, atol=atol) + + +def pteqr_get_d_e_A_z(dtype, realtype, n, compute_z): + # used by ?pteqr tests to build parameters + # returns tuple of (d, e, A, z) + if compute_z == 1: + # build Hermitian A from Q**T * tri * Q = A by creating Q and tri + A_eig = generate_random_dtype_array((n, n), dtype) + A_eig = A_eig + np.diag(np.zeros(n) + 4*n) + A_eig = (A_eig + A_eig.conj().T) / 2 + # obtain right eigenvectors (orthogonal) + vr = eigh(A_eig)[1] + # create tridiagonal matrix + d = generate_random_dtype_array((n,), realtype) + 4 + e = generate_random_dtype_array((n-1,), realtype) + tri = np.diag(d) + np.diag(e, 1) + np.diag(e, -1) + # Build A using these factors that sytrd would: (Q**T * tri * Q = A) + A = vr @ tri @ vr.conj().T + # vr is orthogonal + z = vr + + else: + # d and e are always real per lapack docs. + d = generate_random_dtype_array((n,), realtype) + e = generate_random_dtype_array((n-1,), realtype) + + # make SPD + d = d + 4 + A = np.diag(d) + np.diag(e, 1) + np.diag(e, -1) + z = np.diag(d) + np.diag(e, -1) + np.diag(e, 1) + return (d, e, A, z) + + +@pytest.mark.parametrize("dtype,realtype", + zip(DTYPES, REAL_DTYPES + REAL_DTYPES)) +@pytest.mark.parametrize("compute_z", range(3)) +def test_pteqr(dtype, realtype, compute_z): + ''' + Tests the ?pteqr lapack routine for all dtypes and compute_z parameters. + It generates random SPD matrix diagonals d and e, and then confirms + correct eigenvalues with scipy.linalg.eig. With applicable compute_z=2 it + tests that z can reform A. + ''' + seed(42) + atol = 1000*np.finfo(dtype).eps + pteqr = get_lapack_funcs(('pteqr'), dtype=dtype) + + n = 10 + + d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z) + + d_pteqr, e_pteqr, z_pteqr, info = pteqr(d=d, e=e, z=z, compute_z=compute_z) + assert_equal(info, 0, f"info = {info}, should be 0.") + + # compare the routine's eigenvalues with scipy.linalg.eig's. + assert_allclose(np.sort(eigh(A)[0]), np.sort(d_pteqr), atol=atol) + + if compute_z: + # verify z_pteqr as orthogonal + assert_allclose(z_pteqr @ np.conj(z_pteqr).T, np.identity(n), + atol=atol) + # verify that z_pteqr recombines to A + assert_allclose(z_pteqr @ np.diag(d_pteqr) @ np.conj(z_pteqr).T, + A, atol=atol) + + +@pytest.mark.parametrize("dtype,realtype", + zip(DTYPES, REAL_DTYPES + REAL_DTYPES)) +@pytest.mark.parametrize("compute_z", range(3)) +def test_pteqr_error_non_spd(dtype, realtype, compute_z): + seed(42) + pteqr = get_lapack_funcs(('pteqr'), dtype=dtype) + + n = 10 + d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z) + + # test with non-spd matrix + d_pteqr, e_pteqr, z_pteqr, info = pteqr(d - 4, e, z=z, compute_z=compute_z) + assert info > 0 + + +@pytest.mark.parametrize("dtype,realtype", + zip(DTYPES, REAL_DTYPES + REAL_DTYPES)) +@pytest.mark.parametrize("compute_z", range(3)) +def test_pteqr_raise_error_wrong_shape(dtype, realtype, compute_z): + seed(42) + pteqr = get_lapack_funcs(('pteqr'), dtype=dtype) + n = 10 + d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z) + # test with incorrect/incompatible array sizes + assert_raises(ValueError, pteqr, d[:-1], e, z=z, compute_z=compute_z) + assert_raises(ValueError, pteqr, d, e[:-1], z=z, compute_z=compute_z) + if compute_z: + assert_raises(ValueError, pteqr, d, e, z=z[:-1], compute_z=compute_z) + + +@pytest.mark.parametrize("dtype,realtype", + zip(DTYPES, REAL_DTYPES + REAL_DTYPES)) +@pytest.mark.parametrize("compute_z", range(3)) +def test_pteqr_error_singular(dtype, realtype, compute_z): + seed(42) + pteqr = get_lapack_funcs(('pteqr'), dtype=dtype) + n = 10 + d, e, A, z = pteqr_get_d_e_A_z(dtype, realtype, n, compute_z) + # test with singular matrix + d[0] = 0 + e[0] = 0 + d_pteqr, e_pteqr, z_pteqr, info = pteqr(d, e, z=z, compute_z=compute_z) + assert info > 0 + + +@pytest.mark.parametrize("compute_z,d,e,d_expect,z_expect", + [(2, # "I" + np.array([4.16, 5.25, 1.09, .62]), + np.array([3.17, -.97, .55]), + np.array([8.0023, 1.9926, 1.0014, 0.1237]), + np.array([[0.6326, 0.6245, -0.4191, 0.1847], + [0.7668, -0.4270, 0.4176, -0.2352], + [-0.1082, 0.6071, 0.4594, -0.6393], + [-0.0081, 0.2432, 0.6625, 0.7084]])), + ]) +def test_pteqr_NAG_f08jgf(compute_z, d, e, d_expect, z_expect): + ''' + Implements real (f08jgf) example from NAG Manual Mark 26. + Tests for correct outputs. + ''' + # the NAG manual has 4 decimals accuracy + atol = 1e-4 + pteqr = get_lapack_funcs(('pteqr'), dtype=d.dtype) + + z = np.diag(d) + np.diag(e, 1) + np.diag(e, -1) + _d, _e, _z, info = pteqr(d=d, e=e, z=z, compute_z=compute_z) + assert_allclose(_d, d_expect, atol=atol) + assert_allclose(np.abs(_z), np.abs(z_expect), atol=atol) + + +@pytest.mark.parametrize('dtype', DTYPES) +@pytest.mark.parametrize('matrix_size', [(3, 4), (7, 6), (6, 6)]) +def test_geqrfp(dtype, matrix_size): + # Tests for all dytpes, tall, wide, and square matrices. + # Using the routine with random matrix A, Q and R are obtained and then + # tested such that R is upper triangular and non-negative on the diagonal, + # and Q is an orthogonal matrix. Verifies that A=Q@R. It also + # tests against a matrix that for which the linalg.qr method returns + # negative diagonals, and for error messaging. + + # set test tolerance appropriate for dtype + np.random.seed(42) + rtol = 250*np.finfo(dtype).eps + atol = 100*np.finfo(dtype).eps + # get appropriate ?geqrfp for dtype + geqrfp = get_lapack_funcs(('geqrfp'), dtype=dtype) + gqr = get_lapack_funcs(("orgqr"), dtype=dtype) + + m, n = matrix_size + + # create random matrix of dimensions m x n + A = generate_random_dtype_array((m, n), dtype=dtype) + # create qr matrix using geqrfp + qr_A, tau, info = geqrfp(A) + + # obtain r from the upper triangular area + r = np.triu(qr_A) + + # obtain q from the orgqr lapack routine + # based on linalg.qr's extraction strategy of q with orgqr + + if m > n: + # this adds an extra column to the end of qr_A + # let qqr be an empty m x m matrix + qqr = np.zeros((m, m), dtype=dtype) + # set first n columns of qqr to qr_A + qqr[:, :n] = qr_A + # determine q from this qqr + # note that m is a sufficient for lwork based on LAPACK documentation + q = gqr(qqr, tau=tau, lwork=m)[0] + else: + q = gqr(qr_A[:, :m], tau=tau, lwork=m)[0] + + # test that q and r still make A + assert_allclose(q@r, A, rtol=rtol) + # ensure that q is orthogonal (that q @ transposed q is the identity) + assert_allclose(np.eye(q.shape[0]), q@(q.conj().T), rtol=rtol, + atol=atol) + # ensure r is upper tri by comparing original r to r as upper triangular + assert_allclose(r, np.triu(r), rtol=rtol) + # make sure diagonals of r are positive for this random solution + assert_(np.all(np.diag(r) > np.zeros(len(np.diag(r))))) + # ensure that info is zero for this success + assert_(info == 0) + + # test that this routine gives r diagonals that are positive for a + # matrix that returns negatives in the diagonal with scipy.linalg.rq + A_negative = generate_random_dtype_array((n, m), dtype=dtype) * -1 + r_rq_neg, q_rq_neg = qr(A_negative) + rq_A_neg, tau_neg, info_neg = geqrfp(A_negative) + # assert that any of the entries on the diagonal from linalg.qr + # are negative and that all of geqrfp are positive. + assert_(np.any(np.diag(r_rq_neg) < 0) and + np.all(np.diag(r) > 0)) + + +def test_geqrfp_errors_with_empty_array(): + # check that empty array raises good error message + A_empty = np.array([]) + geqrfp = get_lapack_funcs('geqrfp', dtype=A_empty.dtype) + assert_raises(Exception, geqrfp, A_empty) + + +@pytest.mark.parametrize("driver", ['ev', 'evd', 'evr', 'evx']) +@pytest.mark.parametrize("pfx", ['sy', 'he']) +def test_standard_eigh_lworks(pfx, driver): + n = 1200 # Some sufficiently big arbitrary number + dtype = REAL_DTYPES if pfx == 'sy' else COMPLEX_DTYPES + sc_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[0]) + dz_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[1]) + try: + _compute_lwork(sc_dlw, n, lower=1) + _compute_lwork(dz_dlw, n, lower=1) + except Exception as e: + pytest.fail(f"{pfx+driver}_lwork raised unexpected exception: {e}") + + +@pytest.mark.parametrize("driver", ['gv', 'gvx']) +@pytest.mark.parametrize("pfx", ['sy', 'he']) +def test_generalized_eigh_lworks(pfx, driver): + n = 1200 # Some sufficiently big arbitrary number + dtype = REAL_DTYPES if pfx == 'sy' else COMPLEX_DTYPES + sc_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[0]) + dz_dlw = get_lapack_funcs(pfx+driver+'_lwork', dtype=dtype[1]) + # Shouldn't raise any exceptions + try: + _compute_lwork(sc_dlw, n, uplo="L") + _compute_lwork(dz_dlw, n, uplo="L") + except Exception as e: + pytest.fail(f"{pfx+driver}_lwork raised unexpected exception: {e}") + + +@pytest.mark.parametrize("dtype_", DTYPES) +@pytest.mark.parametrize("m", [1, 10, 100, 1000]) +def test_orcsd_uncsd_lwork(dtype_, m): + seed(1234) + p = randint(0, m) + q = m - p + pfx = 'or' if dtype_ in REAL_DTYPES else 'un' + dlw = pfx + 'csd_lwork' + lw = get_lapack_funcs(dlw, dtype=dtype_) + lwval = _compute_lwork(lw, m, p, q) + lwval = lwval if pfx == 'un' else (lwval,) + assert all([x > 0 for x in lwval]) + + +@pytest.mark.parametrize("dtype_", DTYPES) +def test_orcsd_uncsd(dtype_): + m, p, q = 250, 80, 170 + + pfx = 'or' if dtype_ in REAL_DTYPES else 'un' + X = ortho_group.rvs(m) if pfx == 'or' else unitary_group.rvs(m) + + drv, dlw = get_lapack_funcs((pfx + 'csd', pfx + 'csd_lwork'), dtype=dtype_) + lwval = _compute_lwork(dlw, m, p, q) + lwvals = {'lwork': lwval} if pfx == 'or' else dict(zip(['lwork', + 'lrwork'], lwval)) + + cs11, cs12, cs21, cs22, theta, u1, u2, v1t, v2t, info =\ + drv(X[:p, :q], X[:p, q:], X[p:, :q], X[p:, q:], **lwvals) + + assert info == 0 + + U = block_diag(u1, u2) + VH = block_diag(v1t, v2t) + r = min(min(p, q), min(m-p, m-q)) + n11 = min(p, q) - r + n12 = min(p, m-q) - r + n21 = min(m-p, q) - r + n22 = min(m-p, m-q) - r + + S = np.zeros((m, m), dtype=dtype_) + one = dtype_(1.) + for i in range(n11): + S[i, i] = one + for i in range(n22): + S[p+i, q+i] = one + for i in range(n12): + S[i+n11+r, i+n11+r+n21+n22+r] = -one + for i in range(n21): + S[p+n22+r+i, n11+r+i] = one + + for i in range(r): + S[i+n11, i+n11] = np.cos(theta[i]) + S[p+n22+i, i+r+n21+n22] = np.cos(theta[i]) + + S[i+n11, i+n11+n21+n22+r] = -np.sin(theta[i]) + S[p+n22+i, i+n11] = np.sin(theta[i]) + + Xc = U @ S @ VH + assert_allclose(X, Xc, rtol=0., atol=1e4*np.finfo(dtype_).eps) + + +@pytest.mark.parametrize("dtype", DTYPES) +@pytest.mark.parametrize("trans_bool", [False, True]) +@pytest.mark.parametrize("fact", ["F", "N"]) +def test_gtsvx(dtype, trans_bool, fact): + """ + These tests uses ?gtsvx to solve a random Ax=b system for each dtype. + It tests that the outputs define an LU matrix, that inputs are unmodified, + transposal options, incompatible shapes, singular matrices, and + singular factorizations. It parametrizes DTYPES and the 'fact' value along + with the fact related inputs. + """ + seed(42) + # set test tolerance appropriate for dtype + atol = 100 * np.finfo(dtype).eps + # obtain routine + gtsvx, gttrf = get_lapack_funcs(('gtsvx', 'gttrf'), dtype=dtype) + # Generate random tridiagonal matrix A + n = 10 + dl = generate_random_dtype_array((n-1,), dtype=dtype) + d = generate_random_dtype_array((n,), dtype=dtype) + du = generate_random_dtype_array((n-1,), dtype=dtype) + A = np.diag(dl, -1) + np.diag(d) + np.diag(du, 1) + # generate random solution x + x = generate_random_dtype_array((n, 2), dtype=dtype) + # create b from x for equation Ax=b + trans = ("T" if dtype in REAL_DTYPES else "C") if trans_bool else "N" + b = (A.conj().T if trans_bool else A) @ x + + # store a copy of the inputs to check they haven't been modified later + inputs_cpy = [dl.copy(), d.copy(), du.copy(), b.copy()] + + # set these to None if fact = 'N', or the output of gttrf is fact = 'F' + dlf_, df_, duf_, du2f_, ipiv_, info_ = \ + gttrf(dl, d, du) if fact == 'F' else [None]*6 + + gtsvx_out = gtsvx(dl, d, du, b, fact=fact, trans=trans, dlf=dlf_, df=df_, + duf=duf_, du2=du2f_, ipiv=ipiv_) + dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out + assert_(info == 0, f"?gtsvx info = {info}, should be zero") + + # assure that inputs are unmodified + assert_array_equal(dl, inputs_cpy[0]) + assert_array_equal(d, inputs_cpy[1]) + assert_array_equal(du, inputs_cpy[2]) + assert_array_equal(b, inputs_cpy[3]) + + # test that x_soln matches the expected x + assert_allclose(x, x_soln, atol=atol) + + # assert that the outputs are of correct type or shape + # rcond should be a scalar + assert_(hasattr(rcond, "__len__") is not True, + f"rcond should be scalar but is {rcond}") + # ferr should be length of # of cols in x + assert_(ferr.shape[0] == b.shape[1], "ferr.shape is {} but should be {}," + .format(ferr.shape[0], b.shape[1])) + # berr should be length of # of cols in x + assert_(berr.shape[0] == b.shape[1], "berr.shape is {} but should be {}," + .format(berr.shape[0], b.shape[1])) + + +@pytest.mark.parametrize("dtype", DTYPES) +@pytest.mark.parametrize("trans_bool", [0, 1]) +@pytest.mark.parametrize("fact", ["F", "N"]) +def test_gtsvx_error_singular(dtype, trans_bool, fact): + seed(42) + # obtain routine + gtsvx, gttrf = get_lapack_funcs(('gtsvx', 'gttrf'), dtype=dtype) + # Generate random tridiagonal matrix A + n = 10 + dl = generate_random_dtype_array((n-1,), dtype=dtype) + d = generate_random_dtype_array((n,), dtype=dtype) + du = generate_random_dtype_array((n-1,), dtype=dtype) + A = np.diag(dl, -1) + np.diag(d) + np.diag(du, 1) + # generate random solution x + x = generate_random_dtype_array((n, 2), dtype=dtype) + # create b from x for equation Ax=b + trans = "T" if dtype in REAL_DTYPES else "C" + b = (A.conj().T if trans_bool else A) @ x + + # set these to None if fact = 'N', or the output of gttrf is fact = 'F' + dlf_, df_, duf_, du2f_, ipiv_, info_ = \ + gttrf(dl, d, du) if fact == 'F' else [None]*6 + + gtsvx_out = gtsvx(dl, d, du, b, fact=fact, trans=trans, dlf=dlf_, df=df_, + duf=duf_, du2=du2f_, ipiv=ipiv_) + dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out + # test with singular matrix + # no need to test inputs with fact "F" since ?gttrf already does. + if fact == "N": + # Construct a singular example manually + d[-1] = 0 + dl[-1] = 0 + # solve using routine + gtsvx_out = gtsvx(dl, d, du, b) + dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out + # test for the singular matrix. + assert info > 0, "info should be > 0 for singular matrix" + + elif fact == 'F': + # assuming that a singular factorization is input + df_[-1] = 0 + duf_[-1] = 0 + du2f_[-1] = 0 + + gtsvx_out = gtsvx(dl, d, du, b, fact=fact, dlf=dlf_, df=df_, duf=duf_, + du2=du2f_, ipiv=ipiv_) + dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out + # info should not be zero and should provide index of illegal value + assert info > 0, "info should be > 0 for singular matrix" + + +@pytest.mark.parametrize("dtype", DTYPES*2) +@pytest.mark.parametrize("trans_bool", [False, True]) +@pytest.mark.parametrize("fact", ["F", "N"]) +def test_gtsvx_error_incompatible_size(dtype, trans_bool, fact): + seed(42) + # obtain routine + gtsvx, gttrf = get_lapack_funcs(('gtsvx', 'gttrf'), dtype=dtype) + # Generate random tridiagonal matrix A + n = 10 + dl = generate_random_dtype_array((n-1,), dtype=dtype) + d = generate_random_dtype_array((n,), dtype=dtype) + du = generate_random_dtype_array((n-1,), dtype=dtype) + A = np.diag(dl, -1) + np.diag(d) + np.diag(du, 1) + # generate random solution x + x = generate_random_dtype_array((n, 2), dtype=dtype) + # create b from x for equation Ax=b + trans = "T" if dtype in REAL_DTYPES else "C" + b = (A.conj().T if trans_bool else A) @ x + + # set these to None if fact = 'N', or the output of gttrf is fact = 'F' + dlf_, df_, duf_, du2f_, ipiv_, info_ = \ + gttrf(dl, d, du) if fact == 'F' else [None]*6 + + if fact == "N": + assert_raises(ValueError, gtsvx, dl[:-1], d, du, b, + fact=fact, trans=trans, dlf=dlf_, df=df_, + duf=duf_, du2=du2f_, ipiv=ipiv_) + assert_raises(ValueError, gtsvx, dl, d[:-1], du, b, + fact=fact, trans=trans, dlf=dlf_, df=df_, + duf=duf_, du2=du2f_, ipiv=ipiv_) + assert_raises(ValueError, gtsvx, dl, d, du[:-1], b, + fact=fact, trans=trans, dlf=dlf_, df=df_, + duf=duf_, du2=du2f_, ipiv=ipiv_) + assert_raises(Exception, gtsvx, dl, d, du, b[:-1], + fact=fact, trans=trans, dlf=dlf_, df=df_, + duf=duf_, du2=du2f_, ipiv=ipiv_) + else: + assert_raises(ValueError, gtsvx, dl, d, du, b, + fact=fact, trans=trans, dlf=dlf_[:-1], df=df_, + duf=duf_, du2=du2f_, ipiv=ipiv_) + assert_raises(ValueError, gtsvx, dl, d, du, b, + fact=fact, trans=trans, dlf=dlf_, df=df_[:-1], + duf=duf_, du2=du2f_, ipiv=ipiv_) + assert_raises(ValueError, gtsvx, dl, d, du, b, + fact=fact, trans=trans, dlf=dlf_, df=df_, + duf=duf_[:-1], du2=du2f_, ipiv=ipiv_) + assert_raises(ValueError, gtsvx, dl, d, du, b, + fact=fact, trans=trans, dlf=dlf_, df=df_, + duf=duf_, du2=du2f_[:-1], ipiv=ipiv_) + + +@pytest.mark.parametrize("du,d,dl,b,x", + [(np.array([2.1, -1.0, 1.9, 8.0]), + np.array([3.0, 2.3, -5.0, -0.9, 7.1]), + np.array([3.4, 3.6, 7.0, -6.0]), + np.array([[2.7, 6.6], [-.5, 10.8], [2.6, -3.2], + [.6, -11.2], [2.7, 19.1]]), + np.array([[-4, 5], [7, -4], [3, -3], [-4, -2], + [-3, 1]])), + (np.array([2 - 1j, 2 + 1j, -1 + 1j, 1 - 1j]), + np.array([-1.3 + 1.3j, -1.3 + 1.3j, -1.3 + 3.3j, + -.3 + 4.3j, -3.3 + 1.3j]), + np.array([1 - 2j, 1 + 1j, 2 - 3j, 1 + 1j]), + np.array([[2.4 - 5j, 2.7 + 6.9j], + [3.4 + 18.2j, -6.9 - 5.3j], + [-14.7 + 9.7j, -6 - .6j], + [31.9 - 7.7j, -3.9 + 9.3j], + [-1 + 1.6j, -3 + 12.2j]]), + np.array([[1 + 1j, 2 - 1j], [3 - 1j, 1 + 2j], + [4 + 5j, -1 + 1j], [-1 - 2j, 2 + 1j], + [1 - 1j, 2 - 2j]]))]) +def test_gtsvx_NAG(du, d, dl, b, x): + # Test to ensure wrapper is consistent with NAG Manual Mark 26 + # example problems: real (f07cbf) and complex (f07cpf) + gtsvx = get_lapack_funcs('gtsvx', dtype=d.dtype) + + gtsvx_out = gtsvx(dl, d, du, b) + dlf, df, duf, du2f, ipiv, x_soln, rcond, ferr, berr, info = gtsvx_out + + assert_array_almost_equal(x, x_soln) + + +@pytest.mark.parametrize("dtype,realtype", zip(DTYPES, REAL_DTYPES + + REAL_DTYPES)) +@pytest.mark.parametrize("fact,df_de_lambda", + [("F", + lambda d, e:get_lapack_funcs('pttrf', + dtype=e.dtype)(d, e)), + ("N", lambda d, e: (None, None, None))]) +def test_ptsvx(dtype, realtype, fact, df_de_lambda): + ''' + This tests the ?ptsvx lapack routine wrapper to solve a random system + Ax = b for all dtypes and input variations. Tests for: unmodified + input parameters, fact options, incompatible matrix shapes raise an error, + and singular matrices return info of illegal value. + ''' + seed(42) + # set test tolerance appropriate for dtype + atol = 100 * np.finfo(dtype).eps + ptsvx = get_lapack_funcs('ptsvx', dtype=dtype) + n = 5 + # create diagonals according to size and dtype + d = generate_random_dtype_array((n,), realtype) + 4 + e = generate_random_dtype_array((n-1,), dtype) + A = np.diag(d) + np.diag(e, -1) + np.diag(np.conj(e), 1) + x_soln = generate_random_dtype_array((n, 2), dtype=dtype) + b = A @ x_soln + + # use lambda to determine what df, ef are + df, ef, info = df_de_lambda(d, e) + + # create copy to later test that they are unmodified + diag_cpy = [d.copy(), e.copy(), b.copy()] + + # solve using routine + df, ef, x, rcond, ferr, berr, info = ptsvx(d, e, b, fact=fact, + df=df, ef=ef) + # d, e, and b should be unmodified + assert_array_equal(d, diag_cpy[0]) + assert_array_equal(e, diag_cpy[1]) + assert_array_equal(b, diag_cpy[2]) + assert_(info == 0, f"info should be 0 but is {info}.") + assert_array_almost_equal(x_soln, x) + + # test that the factors from ptsvx can be recombined to make A + L = np.diag(ef, -1) + np.diag(np.ones(n)) + D = np.diag(df) + assert_allclose(A, L@D@(np.conj(L).T), atol=atol) + + # assert that the outputs are of correct type or shape + # rcond should be a scalar + assert not hasattr(rcond, "__len__"), \ + f"rcond should be scalar but is {rcond}" + # ferr should be length of # of cols in x + assert_(ferr.shape == (2,), "ferr.shape is {} but should be ({},)" + .format(ferr.shape, x_soln.shape[1])) + # berr should be length of # of cols in x + assert_(berr.shape == (2,), "berr.shape is {} but should be ({},)" + .format(berr.shape, x_soln.shape[1])) + + +@pytest.mark.parametrize("dtype,realtype", zip(DTYPES, REAL_DTYPES + + REAL_DTYPES)) +@pytest.mark.parametrize("fact,df_de_lambda", + [("F", + lambda d, e:get_lapack_funcs('pttrf', + dtype=e.dtype)(d, e)), + ("N", lambda d, e: (None, None, None))]) +def test_ptsvx_error_raise_errors(dtype, realtype, fact, df_de_lambda): + seed(42) + ptsvx = get_lapack_funcs('ptsvx', dtype=dtype) + n = 5 + # create diagonals according to size and dtype + d = generate_random_dtype_array((n,), realtype) + 4 + e = generate_random_dtype_array((n-1,), dtype) + A = np.diag(d) + np.diag(e, -1) + np.diag(np.conj(e), 1) + x_soln = generate_random_dtype_array((n, 2), dtype=dtype) + b = A @ x_soln + + # use lambda to determine what df, ef are + df, ef, info = df_de_lambda(d, e) + + # test with malformatted array sizes + assert_raises(ValueError, ptsvx, d[:-1], e, b, fact=fact, df=df, ef=ef) + assert_raises(ValueError, ptsvx, d, e[:-1], b, fact=fact, df=df, ef=ef) + assert_raises(Exception, ptsvx, d, e, b[:-1], fact=fact, df=df, ef=ef) + + +@pytest.mark.parametrize("dtype,realtype", zip(DTYPES, REAL_DTYPES + + REAL_DTYPES)) +@pytest.mark.parametrize("fact,df_de_lambda", + [("F", + lambda d, e:get_lapack_funcs('pttrf', + dtype=e.dtype)(d, e)), + ("N", lambda d, e: (None, None, None))]) +def test_ptsvx_non_SPD_singular(dtype, realtype, fact, df_de_lambda): + seed(42) + ptsvx = get_lapack_funcs('ptsvx', dtype=dtype) + n = 5 + # create diagonals according to size and dtype + d = generate_random_dtype_array((n,), realtype) + 4 + e = generate_random_dtype_array((n-1,), dtype) + A = np.diag(d) + np.diag(e, -1) + np.diag(np.conj(e), 1) + x_soln = generate_random_dtype_array((n, 2), dtype=dtype) + b = A @ x_soln + + # use lambda to determine what df, ef are + df, ef, info = df_de_lambda(d, e) + + if fact == "N": + d[3] = 0 + # obtain new df, ef + df, ef, info = df_de_lambda(d, e) + # solve using routine + df, ef, x, rcond, ferr, berr, info = ptsvx(d, e, b) + # test for the singular matrix. + assert info > 0 and info <= n + + # non SPD matrix + d = generate_random_dtype_array((n,), realtype) + df, ef, x, rcond, ferr, berr, info = ptsvx(d, e, b) + assert info > 0 and info <= n + else: + # assuming that someone is using a singular factorization + df, ef, info = df_de_lambda(d, e) + df[0] = 0 + ef[0] = 0 + df, ef, x, rcond, ferr, berr, info = ptsvx(d, e, b, fact=fact, + df=df, ef=ef) + assert info > 0 + + +@pytest.mark.parametrize('d,e,b,x', + [(np.array([4, 10, 29, 25, 5]), + np.array([-2, -6, 15, 8]), + np.array([[6, 10], [9, 4], [2, 9], [14, 65], + [7, 23]]), + np.array([[2.5, 2], [2, -1], [1, -3], + [-1, 6], [3, -5]])), + (np.array([16, 41, 46, 21]), + np.array([16 + 16j, 18 - 9j, 1 - 4j]), + np.array([[64 + 16j, -16 - 32j], + [93 + 62j, 61 - 66j], + [78 - 80j, 71 - 74j], + [14 - 27j, 35 + 15j]]), + np.array([[2 + 1j, -3 - 2j], + [1 + 1j, 1 + 1j], + [1 - 2j, 1 - 2j], + [1 - 1j, 2 + 1j]]))]) +def test_ptsvx_NAG(d, e, b, x): + # test to assure that wrapper is consistent with NAG Manual Mark 26 + # example problemss: f07jbf, f07jpf + # (Links expire, so please search for "NAG Library Manual Mark 26" online) + + # obtain routine with correct type based on e.dtype + ptsvx = get_lapack_funcs('ptsvx', dtype=e.dtype) + # solve using routine + df, ef, x_ptsvx, rcond, ferr, berr, info = ptsvx(d, e, b) + # determine ptsvx's solution and x are the same. + assert_array_almost_equal(x, x_ptsvx) + + +@pytest.mark.parametrize('lower', [False, True]) +@pytest.mark.parametrize('dtype', DTYPES) +def test_pptrs_pptri_pptrf_ppsv_ppcon(dtype, lower): + seed(1234) + atol = np.finfo(dtype).eps*100 + # Manual conversion to/from packed format is feasible here. + n, nrhs = 10, 4 + a = generate_random_dtype_array([n, n], dtype=dtype) + b = generate_random_dtype_array([n, nrhs], dtype=dtype) + + a = a.conj().T + a + np.eye(n, dtype=dtype) * dtype(5.) + if lower: + inds = ([x for y in range(n) for x in range(y, n)], + [y for y in range(n) for x in range(y, n)]) + else: + inds = ([x for y in range(1, n+1) for x in range(y)], + [y-1 for y in range(1, n+1) for x in range(y)]) + ap = a[inds] + ppsv, pptrf, pptrs, pptri, ppcon = get_lapack_funcs( + ('ppsv', 'pptrf', 'pptrs', 'pptri', 'ppcon'), + dtype=dtype, + ilp64="preferred") + + ul, info = pptrf(n, ap, lower=lower) + assert_equal(info, 0) + aul = cholesky(a, lower=lower)[inds] + assert_allclose(ul, aul, rtol=0, atol=atol) + + uli, info = pptri(n, ul, lower=lower) + assert_equal(info, 0) + auli = inv(a)[inds] + assert_allclose(uli, auli, rtol=0, atol=atol) + + x, info = pptrs(n, ul, b, lower=lower) + assert_equal(info, 0) + bx = solve(a, b) + assert_allclose(x, bx, rtol=0, atol=atol) + + xv, info = ppsv(n, ap, b, lower=lower) + assert_equal(info, 0) + assert_allclose(xv, bx, rtol=0, atol=atol) + + anorm = np.linalg.norm(a, 1) + rcond, info = ppcon(n, ap, anorm=anorm, lower=lower) + assert_equal(info, 0) + assert_(abs(1/rcond - np.linalg.cond(a, p=1))*rcond < 1) + + +@pytest.mark.parametrize('dtype', DTYPES) +def test_gees_trexc(dtype): + seed(1234) + atol = np.finfo(dtype).eps*100 + + n = 10 + a = generate_random_dtype_array([n, n], dtype=dtype) + + gees, trexc = get_lapack_funcs(('gees', 'trexc'), dtype=dtype) + + result = gees(lambda x: None, a, overwrite_a=False) + assert_equal(result[-1], 0) + + t = result[0] + z = result[-3] + + d2 = t[6, 6] + + if dtype in COMPLEX_DTYPES: + assert_allclose(t, np.triu(t), rtol=0, atol=atol) + + assert_allclose(z @ t @ z.conj().T, a, rtol=0, atol=atol) + + result = trexc(t, z, 7, 1) + assert_equal(result[-1], 0) + + t = result[0] + z = result[-2] + + if dtype in COMPLEX_DTYPES: + assert_allclose(t, np.triu(t), rtol=0, atol=atol) + + assert_allclose(z @ t @ z.conj().T, a, rtol=0, atol=atol) + + assert_allclose(t[0, 0], d2, rtol=0, atol=atol) + + +@pytest.mark.parametrize( + "t, expect, ifst, ilst", + [(np.array([[0.80, -0.11, 0.01, 0.03], + [0.00, -0.10, 0.25, 0.35], + [0.00, -0.65, -0.10, 0.20], + [0.00, 0.00, 0.00, -0.10]]), + np.array([[-0.1000, -0.6463, 0.0874, 0.2010], + [0.2514, -0.1000, 0.0927, 0.3505], + [0.0000, 0.0000, 0.8000, -0.0117], + [0.0000, 0.0000, 0.0000, -0.1000]]), + 2, 1), + (np.array([[-6.00 - 7.00j, 0.36 - 0.36j, -0.19 + 0.48j, 0.88 - 0.25j], + [0.00 + 0.00j, -5.00 + 2.00j, -0.03 - 0.72j, -0.23 + 0.13j], + [0.00 + 0.00j, 0.00 + 0.00j, 8.00 - 1.00j, 0.94 + 0.53j], + [0.00 + 0.00j, 0.00 + 0.00j, 0.00 + 0.00j, 3.00 - 4.00j]]), + np.array([[-5.0000 + 2.0000j, -0.1574 + 0.7143j, + 0.1781 - 0.1913j, 0.3950 + 0.3861j], + [0.0000 + 0.0000j, 8.0000 - 1.0000j, + 1.0742 + 0.1447j, 0.2515 - 0.3397j], + [0.0000 + 0.0000j, 0.0000 + 0.0000j, + 3.0000 - 4.0000j, 0.2264 + 0.8962j], + [0.0000 + 0.0000j, 0.0000 + 0.0000j, + 0.0000 + 0.0000j, -6.0000 - 7.0000j]]), + 1, 4)]) +def test_trexc_NAG(t, ifst, ilst, expect): + """ + This test implements the example found in the NAG manual, + f08qfc, f08qtc, f08qgc, f08quc. + """ + # NAG manual provides accuracy up to 4 decimals + atol = 1e-4 + trexc = get_lapack_funcs('trexc', dtype=t.dtype) + + result = trexc(t, t, ifst, ilst, wantq=0) + assert_equal(result[-1], 0) + + t = result[0] + assert_allclose(expect, t, atol=atol) + + +@pytest.mark.parametrize('dtype', DTYPES) +def test_gges_tgexc(dtype): + if ( + dtype == np.float32 and + sys.platform == 'darwin' and + blas_provider == 'openblas' and + blas_version < '0.3.21.dev' + ): + pytest.xfail("gges[float32] broken for OpenBLAS on macOS, see gh-16949") + + seed(1234) + atol = np.finfo(dtype).eps*100 + + n = 10 + a = generate_random_dtype_array([n, n], dtype=dtype) + b = generate_random_dtype_array([n, n], dtype=dtype) + + gges, tgexc = get_lapack_funcs(('gges', 'tgexc'), dtype=dtype) + + result = gges(lambda x: None, a, b, overwrite_a=False, overwrite_b=False) + assert_equal(result[-1], 0) + + s = result[0] + t = result[1] + q = result[-4] + z = result[-3] + + d1 = s[0, 0] / t[0, 0] + d2 = s[6, 6] / t[6, 6] + + if dtype in COMPLEX_DTYPES: + assert_allclose(s, np.triu(s), rtol=0, atol=atol) + assert_allclose(t, np.triu(t), rtol=0, atol=atol) + + assert_allclose(q @ s @ z.conj().T, a, rtol=0, atol=atol) + assert_allclose(q @ t @ z.conj().T, b, rtol=0, atol=atol) + + result = tgexc(s, t, q, z, 7, 1) + assert_equal(result[-1], 0) + + s = result[0] + t = result[1] + q = result[2] + z = result[3] + + if dtype in COMPLEX_DTYPES: + assert_allclose(s, np.triu(s), rtol=0, atol=atol) + assert_allclose(t, np.triu(t), rtol=0, atol=atol) + + assert_allclose(q @ s @ z.conj().T, a, rtol=0, atol=atol) + assert_allclose(q @ t @ z.conj().T, b, rtol=0, atol=atol) + + assert_allclose(s[0, 0] / t[0, 0], d2, rtol=0, atol=atol) + assert_allclose(s[1, 1] / t[1, 1], d1, rtol=0, atol=atol) + + +@pytest.mark.parametrize('dtype', DTYPES) +def test_gees_trsen(dtype): + seed(1234) + atol = np.finfo(dtype).eps*100 + + n = 10 + a = generate_random_dtype_array([n, n], dtype=dtype) + + gees, trsen, trsen_lwork = get_lapack_funcs( + ('gees', 'trsen', 'trsen_lwork'), dtype=dtype) + + result = gees(lambda x: None, a, overwrite_a=False) + assert_equal(result[-1], 0) + + t = result[0] + z = result[-3] + + d2 = t[6, 6] + + if dtype in COMPLEX_DTYPES: + assert_allclose(t, np.triu(t), rtol=0, atol=atol) + + assert_allclose(z @ t @ z.conj().T, a, rtol=0, atol=atol) + + select = np.zeros(n) + select[6] = 1 + + lwork = _compute_lwork(trsen_lwork, select, t) + + if dtype in COMPLEX_DTYPES: + result = trsen(select, t, z, lwork=lwork) + else: + result = trsen(select, t, z, lwork=lwork, liwork=lwork[1]) + assert_equal(result[-1], 0) + + t = result[0] + z = result[1] + + if dtype in COMPLEX_DTYPES: + assert_allclose(t, np.triu(t), rtol=0, atol=atol) + + assert_allclose(z @ t @ z.conj().T, a, rtol=0, atol=atol) + + assert_allclose(t[0, 0], d2, rtol=0, atol=atol) + + +@pytest.mark.parametrize( + "t, q, expect, select, expect_s, expect_sep", + [(np.array([[0.7995, -0.1144, 0.0060, 0.0336], + [0.0000, -0.0994, 0.2478, 0.3474], + [0.0000, -0.6483, -0.0994, 0.2026], + [0.0000, 0.0000, 0.0000, -0.1007]]), + np.array([[0.6551, 0.1037, 0.3450, 0.6641], + [0.5236, -0.5807, -0.6141, -0.1068], + [-0.5362, -0.3073, -0.2935, 0.7293], + [0.0956, 0.7467, -0.6463, 0.1249]]), + np.array([[0.3500, 0.4500, -0.1400, -0.1700], + [0.0900, 0.0700, -0.5399, 0.3500], + [-0.4400, -0.3300, -0.0300, 0.1700], + [0.2500, -0.3200, -0.1300, 0.1100]]), + np.array([1, 0, 0, 1]), + 1.75e+00, 3.22e+00), + (np.array([[-6.0004 - 6.9999j, 0.3637 - 0.3656j, + -0.1880 + 0.4787j, 0.8785 - 0.2539j], + [0.0000 + 0.0000j, -5.0000 + 2.0060j, + -0.0307 - 0.7217j, -0.2290 + 0.1313j], + [0.0000 + 0.0000j, 0.0000 + 0.0000j, + 7.9982 - 0.9964j, 0.9357 + 0.5359j], + [0.0000 + 0.0000j, 0.0000 + 0.0000j, + 0.0000 + 0.0000j, 3.0023 - 3.9998j]]), + np.array([[-0.8347 - 0.1364j, -0.0628 + 0.3806j, + 0.2765 - 0.0846j, 0.0633 - 0.2199j], + [0.0664 - 0.2968j, 0.2365 + 0.5240j, + -0.5877 - 0.4208j, 0.0835 + 0.2183j], + [-0.0362 - 0.3215j, 0.3143 - 0.5473j, + 0.0576 - 0.5736j, 0.0057 - 0.4058j], + [0.0086 + 0.2958j, -0.3416 - 0.0757j, + -0.1900 - 0.1600j, 0.8327 - 0.1868j]]), + np.array([[-3.9702 - 5.0406j, -4.1108 + 3.7002j, + -0.3403 + 1.0098j, 1.2899 - 0.8590j], + [0.3397 - 1.5006j, 1.5201 - 0.4301j, + 1.8797 - 5.3804j, 3.3606 + 0.6498j], + [3.3101 - 3.8506j, 2.4996 + 3.4504j, + 0.8802 - 1.0802j, 0.6401 - 1.4800j], + [-1.0999 + 0.8199j, 1.8103 - 1.5905j, + 3.2502 + 1.3297j, 1.5701 - 3.4397j]]), + np.array([1, 0, 0, 1]), + 1.02e+00, 1.82e-01)]) +def test_trsen_NAG(t, q, select, expect, expect_s, expect_sep): + """ + This test implements the example found in the NAG manual, + f08qgc, f08quc. + """ + # NAG manual provides accuracy up to 4 and 2 decimals + atol = 1e-4 + atol2 = 1e-2 + trsen, trsen_lwork = get_lapack_funcs( + ('trsen', 'trsen_lwork'), dtype=t.dtype) + + lwork = _compute_lwork(trsen_lwork, select, t) + + if t.dtype in COMPLEX_DTYPES: + result = trsen(select, t, q, lwork=lwork) + else: + result = trsen(select, t, q, lwork=lwork, liwork=lwork[1]) + assert_equal(result[-1], 0) + + t = result[0] + q = result[1] + if t.dtype in COMPLEX_DTYPES: + s = result[4] + sep = result[5] + else: + s = result[5] + sep = result[6] + + assert_allclose(expect, q @ t @ q.conj().T, atol=atol) + assert_allclose(expect_s, 1 / s, atol=atol2) + assert_allclose(expect_sep, 1 / sep, atol=atol2) + + +@pytest.mark.parametrize('dtype', DTYPES) +def test_gges_tgsen(dtype): + if ( + dtype == np.float32 and + sys.platform == 'darwin' and + blas_provider == 'openblas' and + blas_version < '0.3.21.dev' + ): + pytest.xfail("gges[float32] broken for OpenBLAS on macOS, see gh-16949") + + seed(1234) + atol = np.finfo(dtype).eps*100 + + n = 10 + a = generate_random_dtype_array([n, n], dtype=dtype) + b = generate_random_dtype_array([n, n], dtype=dtype) + + gges, tgsen, tgsen_lwork = get_lapack_funcs( + ('gges', 'tgsen', 'tgsen_lwork'), dtype=dtype) + + result = gges(lambda x: None, a, b, overwrite_a=False, overwrite_b=False) + assert_equal(result[-1], 0) + + s = result[0] + t = result[1] + q = result[-4] + z = result[-3] + + d1 = s[0, 0] / t[0, 0] + d2 = s[6, 6] / t[6, 6] + + if dtype in COMPLEX_DTYPES: + assert_allclose(s, np.triu(s), rtol=0, atol=atol) + assert_allclose(t, np.triu(t), rtol=0, atol=atol) + + assert_allclose(q @ s @ z.conj().T, a, rtol=0, atol=atol) + assert_allclose(q @ t @ z.conj().T, b, rtol=0, atol=atol) + + select = np.zeros(n) + select[6] = 1 + + lwork = _compute_lwork(tgsen_lwork, select, s, t) + + # off-by-one error in LAPACK, see gh-issue #13397 + lwork = (lwork[0]+1, lwork[1]) + + result = tgsen(select, s, t, q, z, lwork=lwork) + assert_equal(result[-1], 0) + + s = result[0] + t = result[1] + q = result[-7] + z = result[-6] + + if dtype in COMPLEX_DTYPES: + assert_allclose(s, np.triu(s), rtol=0, atol=atol) + assert_allclose(t, np.triu(t), rtol=0, atol=atol) + + assert_allclose(q @ s @ z.conj().T, a, rtol=0, atol=atol) + assert_allclose(q @ t @ z.conj().T, b, rtol=0, atol=atol) + + assert_allclose(s[0, 0] / t[0, 0], d2, rtol=0, atol=atol) + assert_allclose(s[1, 1] / t[1, 1], d1, rtol=0, atol=atol) + + +@pytest.mark.parametrize( + "a, b, c, d, e, f, rans, lans", + [(np.array([[4.0, 1.0, 1.0, 2.0], + [0.0, 3.0, 4.0, 1.0], + [0.0, 1.0, 3.0, 1.0], + [0.0, 0.0, 0.0, 6.0]]), + np.array([[1.0, 1.0, 1.0, 1.0], + [0.0, 3.0, 4.0, 1.0], + [0.0, 1.0, 3.0, 1.0], + [0.0, 0.0, 0.0, 4.0]]), + np.array([[-4.0, 7.0, 1.0, 12.0], + [-9.0, 2.0, -2.0, -2.0], + [-4.0, 2.0, -2.0, 8.0], + [-7.0, 7.0, -6.0, 19.0]]), + np.array([[2.0, 1.0, 1.0, 3.0], + [0.0, 1.0, 2.0, 1.0], + [0.0, 0.0, 1.0, 1.0], + [0.0, 0.0, 0.0, 2.0]]), + np.array([[1.0, 1.0, 1.0, 2.0], + [0.0, 1.0, 4.0, 1.0], + [0.0, 0.0, 1.0, 1.0], + [0.0, 0.0, 0.0, 1.0]]), + np.array([[-7.0, 5.0, 0.0, 7.0], + [-5.0, 1.0, -8.0, 0.0], + [-1.0, 2.0, -3.0, 5.0], + [-3.0, 2.0, 0.0, 5.0]]), + np.array([[1.0, 1.0, 1.0, 1.0], + [-1.0, 2.0, -1.0, -1.0], + [-1.0, 1.0, 3.0, 1.0], + [-1.0, 1.0, -1.0, 4.0]]), + np.array([[4.0, -1.0, 1.0, -1.0], + [1.0, 3.0, -1.0, 1.0], + [-1.0, 1.0, 2.0, -1.0], + [1.0, -1.0, 1.0, 1.0]]))]) +@pytest.mark.parametrize('dtype', REAL_DTYPES) +def test_tgsyl_NAG(a, b, c, d, e, f, rans, lans, dtype): + atol = 1e-4 + + tgsyl = get_lapack_funcs(('tgsyl'), dtype=dtype) + rout, lout, scale, dif, info = tgsyl(a, b, c, d, e, f) + + assert_equal(info, 0) + assert_allclose(scale, 1.0, rtol=0, atol=np.finfo(dtype).eps*100, + err_msg="SCALE must be 1.0") + assert_allclose(dif, 0.0, rtol=0, atol=np.finfo(dtype).eps*100, + err_msg="DIF must be nearly 0") + assert_allclose(rout, rans, atol=atol, + err_msg="Solution for R is incorrect") + assert_allclose(lout, lans, atol=atol, + err_msg="Solution for L is incorrect") + + +@pytest.mark.parametrize('dtype', REAL_DTYPES) +@pytest.mark.parametrize('trans', ('N', 'T')) +@pytest.mark.parametrize('ijob', [0, 1, 2, 3, 4]) +def test_tgsyl(dtype, trans, ijob): + + atol = 1e-3 if dtype == np.float32 else 1e-10 + rng = np.random.default_rng(1685779866898198) + m, n = 10, 15 + + a, d, *_ = qz(rng.uniform(-10, 10, [m, m]).astype(dtype), + rng.uniform(-10, 10, [m, m]).astype(dtype), + output='real') + + b, e, *_ = qz(rng.uniform(-10, 10, [n, n]).astype(dtype), + rng.uniform(-10, 10, [n, n]).astype(dtype), + output='real') + + c = rng.uniform(-2, 2, [m, n]).astype(dtype) + f = rng.uniform(-2, 2, [m, n]).astype(dtype) + + tgsyl = get_lapack_funcs(('tgsyl'), dtype=dtype) + rout, lout, scale, dif, info = tgsyl(a, b, c, d, e, f, + trans=trans, ijob=ijob) + + assert info == 0, "INFO is non-zero" + assert scale >= 0.0, "SCALE must be non-negative" + if ijob == 0: + assert_allclose(dif, 0.0, rtol=0, atol=np.finfo(dtype).eps*100, + err_msg="DIF must be 0 for ijob =0") + else: + assert dif >= 0.0, "DIF must be non-negative" + + # Only DIF is calculated for ijob = 3/4 + if ijob <= 2: + if trans == 'N': + lhs1 = a @ rout - lout @ b + rhs1 = scale*c + lhs2 = d @ rout - lout @ e + rhs2 = scale*f + elif trans == 'T': + lhs1 = np.transpose(a) @ rout + np.transpose(d) @ lout + rhs1 = scale*c + lhs2 = rout @ np.transpose(b) + lout @ np.transpose(e) + rhs2 = -1.0*scale*f + + assert_allclose(lhs1, rhs1, atol=atol, rtol=0., + err_msg='lhs1 and rhs1 do not match') + assert_allclose(lhs2, rhs2, atol=atol, rtol=0., + err_msg='lhs2 and rhs2 do not match') diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_matfuncs.py b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_matfuncs.py new file mode 100644 index 0000000000000000000000000000000000000000..b50122f81fa8be59aaa3bf3dca223fe7247c3301 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_matfuncs.py @@ -0,0 +1,1013 @@ +# +# Created by: Pearu Peterson, March 2002 +# +""" Test functions for linalg.matfuncs module + +""" +import random +import functools + +import numpy as np +from numpy import array, identity, dot, sqrt +from numpy.testing import (assert_array_almost_equal, assert_allclose, assert_, + assert_array_less, assert_array_equal, assert_warns) +import pytest + +import scipy.linalg +from scipy.linalg import (funm, signm, logm, sqrtm, fractional_matrix_power, + expm, expm_frechet, expm_cond, norm, khatri_rao) +from scipy.linalg import _matfuncs_inv_ssq +from scipy.linalg._matfuncs import pick_pade_structure +import scipy.linalg._expm_frechet + +from scipy.optimize import minimize + + +def _get_al_mohy_higham_2012_experiment_1(): + """ + Return the test matrix from Experiment (1) of [1]_. + + References + ---------- + .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2012) + "Improved Inverse Scaling and Squaring Algorithms + for the Matrix Logarithm." + SIAM Journal on Scientific Computing, 34 (4). C152-C169. + ISSN 1095-7197 + + """ + A = np.array([ + [3.2346e-1, 3e4, 3e4, 3e4], + [0, 3.0089e-1, 3e4, 3e4], + [0, 0, 3.2210e-1, 3e4], + [0, 0, 0, 3.0744e-1]], dtype=float) + return A + + +class TestSignM: + + def test_nils(self): + a = array([[29.2, -24.2, 69.5, 49.8, 7.], + [-9.2, 5.2, -18., -16.8, -2.], + [-10., 6., -20., -18., -2.], + [-9.6, 9.6, -25.5, -15.4, -2.], + [9.8, -4.8, 18., 18.2, 2.]]) + cr = array([[11.94933333,-2.24533333,15.31733333,21.65333333,-2.24533333], + [-3.84266667,0.49866667,-4.59066667,-7.18666667,0.49866667], + [-4.08,0.56,-4.92,-7.6,0.56], + [-4.03466667,1.04266667,-5.59866667,-7.02666667,1.04266667], + [4.15733333,-0.50133333,4.90933333,7.81333333,-0.50133333]]) + r = signm(a) + assert_array_almost_equal(r,cr) + + def test_defective1(self): + a = array([[0.0,1,0,0],[1,0,1,0],[0,0,0,1],[0,0,1,0]]) + signm(a, disp=False) + #XXX: what would be the correct result? + + def test_defective2(self): + a = array(( + [29.2,-24.2,69.5,49.8,7.0], + [-9.2,5.2,-18.0,-16.8,-2.0], + [-10.0,6.0,-20.0,-18.0,-2.0], + [-9.6,9.6,-25.5,-15.4,-2.0], + [9.8,-4.8,18.0,18.2,2.0])) + signm(a, disp=False) + #XXX: what would be the correct result? + + def test_defective3(self): + a = array([[-2., 25., 0., 0., 0., 0., 0.], + [0., -3., 10., 3., 3., 3., 0.], + [0., 0., 2., 15., 3., 3., 0.], + [0., 0., 0., 0., 15., 3., 0.], + [0., 0., 0., 0., 3., 10., 0.], + [0., 0., 0., 0., 0., -2., 25.], + [0., 0., 0., 0., 0., 0., -3.]]) + signm(a, disp=False) + #XXX: what would be the correct result? + + +class TestLogM: + + def test_nils(self): + a = array([[-2., 25., 0., 0., 0., 0., 0.], + [0., -3., 10., 3., 3., 3., 0.], + [0., 0., 2., 15., 3., 3., 0.], + [0., 0., 0., 0., 15., 3., 0.], + [0., 0., 0., 0., 3., 10., 0.], + [0., 0., 0., 0., 0., -2., 25.], + [0., 0., 0., 0., 0., 0., -3.]]) + m = (identity(7)*3.1+0j)-a + logm(m, disp=False) + #XXX: what would be the correct result? + + def test_al_mohy_higham_2012_experiment_1_logm(self): + # The logm completes the round trip successfully. + # Note that the expm leg of the round trip is badly conditioned. + A = _get_al_mohy_higham_2012_experiment_1() + A_logm, info = logm(A, disp=False) + A_round_trip = expm(A_logm) + assert_allclose(A_round_trip, A, rtol=5e-5, atol=1e-14) + + def test_al_mohy_higham_2012_experiment_1_funm_log(self): + # The raw funm with np.log does not complete the round trip. + # Note that the expm leg of the round trip is badly conditioned. + A = _get_al_mohy_higham_2012_experiment_1() + A_funm_log, info = funm(A, np.log, disp=False) + A_round_trip = expm(A_funm_log) + assert_(not np.allclose(A_round_trip, A, rtol=1e-5, atol=1e-14)) + + def test_round_trip_random_float(self): + np.random.seed(1234) + for n in range(1, 6): + M_unscaled = np.random.randn(n, n) + for scale in np.logspace(-4, 4, 9): + M = M_unscaled * scale + + # Eigenvalues are related to the branch cut. + W = np.linalg.eigvals(M) + err_msg = f'M:{M} eivals:{W}' + + # Check sqrtm round trip because it is used within logm. + M_sqrtm, info = sqrtm(M, disp=False) + M_sqrtm_round_trip = M_sqrtm.dot(M_sqrtm) + assert_allclose(M_sqrtm_round_trip, M) + + # Check logm round trip. + M_logm, info = logm(M, disp=False) + M_logm_round_trip = expm(M_logm) + assert_allclose(M_logm_round_trip, M, err_msg=err_msg) + + def test_round_trip_random_complex(self): + np.random.seed(1234) + for n in range(1, 6): + M_unscaled = np.random.randn(n, n) + 1j * np.random.randn(n, n) + for scale in np.logspace(-4, 4, 9): + M = M_unscaled * scale + M_logm, info = logm(M, disp=False) + M_round_trip = expm(M_logm) + assert_allclose(M_round_trip, M) + + def test_logm_type_preservation_and_conversion(self): + # The logm matrix function should preserve the type of a matrix + # whose eigenvalues are positive with zero imaginary part. + # Test this preservation for variously structured matrices. + complex_dtype_chars = ('F', 'D', 'G') + for matrix_as_list in ( + [[1, 0], [0, 1]], + [[1, 0], [1, 1]], + [[2, 1], [1, 1]], + [[2, 3], [1, 2]]): + + # check that the spectrum has the expected properties + W = scipy.linalg.eigvals(matrix_as_list) + assert_(not any(w.imag or w.real < 0 for w in W)) + + # check float type preservation + A = np.array(matrix_as_list, dtype=float) + A_logm, info = logm(A, disp=False) + assert_(A_logm.dtype.char not in complex_dtype_chars) + + # check complex type preservation + A = np.array(matrix_as_list, dtype=complex) + A_logm, info = logm(A, disp=False) + assert_(A_logm.dtype.char in complex_dtype_chars) + + # check float->complex type conversion for the matrix negation + A = -np.array(matrix_as_list, dtype=float) + A_logm, info = logm(A, disp=False) + assert_(A_logm.dtype.char in complex_dtype_chars) + + def test_complex_spectrum_real_logm(self): + # This matrix has complex eigenvalues and real logm. + # Its output dtype depends on its input dtype. + M = [[1, 1, 2], [2, 1, 1], [1, 2, 1]] + for dt in float, complex: + X = np.array(M, dtype=dt) + w = scipy.linalg.eigvals(X) + assert_(1e-2 < np.absolute(w.imag).sum()) + Y, info = logm(X, disp=False) + assert_(np.issubdtype(Y.dtype, np.inexact)) + assert_allclose(expm(Y), X) + + def test_real_mixed_sign_spectrum(self): + # These matrices have real eigenvalues with mixed signs. + # The output logm dtype is complex, regardless of input dtype. + for M in ( + [[1, 0], [0, -1]], + [[0, 1], [1, 0]]): + for dt in float, complex: + A = np.array(M, dtype=dt) + A_logm, info = logm(A, disp=False) + assert_(np.issubdtype(A_logm.dtype, np.complexfloating)) + + def test_exactly_singular(self): + A = np.array([[0, 0], [1j, 1j]]) + B = np.asarray([[1, 1], [0, 0]]) + for M in A, A.T, B, B.T: + expected_warning = _matfuncs_inv_ssq.LogmExactlySingularWarning + L, info = assert_warns(expected_warning, logm, M, disp=False) + E = expm(L) + assert_allclose(E, M, atol=1e-14) + + def test_nearly_singular(self): + M = np.array([[1e-100]]) + expected_warning = _matfuncs_inv_ssq.LogmNearlySingularWarning + L, info = assert_warns(expected_warning, logm, M, disp=False) + E = expm(L) + assert_allclose(E, M, atol=1e-14) + + def test_opposite_sign_complex_eigenvalues(self): + # See gh-6113 + E = [[0, 1], [-1, 0]] + L = [[0, np.pi*0.5], [-np.pi*0.5, 0]] + assert_allclose(expm(L), E, atol=1e-14) + assert_allclose(logm(E), L, atol=1e-14) + E = [[1j, 4], [0, -1j]] + L = [[1j*np.pi*0.5, 2*np.pi], [0, -1j*np.pi*0.5]] + assert_allclose(expm(L), E, atol=1e-14) + assert_allclose(logm(E), L, atol=1e-14) + E = [[1j, 0], [0, -1j]] + L = [[1j*np.pi*0.5, 0], [0, -1j*np.pi*0.5]] + assert_allclose(expm(L), E, atol=1e-14) + assert_allclose(logm(E), L, atol=1e-14) + + def test_readonly(self): + n = 5 + a = np.ones((n, n)) + np.identity(n) + a.flags.writeable = False + logm(a) + + +class TestSqrtM: + def test_round_trip_random_float(self): + np.random.seed(1234) + for n in range(1, 6): + M_unscaled = np.random.randn(n, n) + for scale in np.logspace(-4, 4, 9): + M = M_unscaled * scale + M_sqrtm, info = sqrtm(M, disp=False) + M_sqrtm_round_trip = M_sqrtm.dot(M_sqrtm) + assert_allclose(M_sqrtm_round_trip, M) + + def test_round_trip_random_complex(self): + np.random.seed(1234) + for n in range(1, 6): + M_unscaled = np.random.randn(n, n) + 1j * np.random.randn(n, n) + for scale in np.logspace(-4, 4, 9): + M = M_unscaled * scale + M_sqrtm, info = sqrtm(M, disp=False) + M_sqrtm_round_trip = M_sqrtm.dot(M_sqrtm) + assert_allclose(M_sqrtm_round_trip, M) + + def test_bad(self): + # See https://web.archive.org/web/20051220232650/http://www.maths.man.ac.uk/~nareports/narep336.ps.gz + e = 2**-5 + se = sqrt(e) + a = array([[1.0,0,0,1], + [0,e,0,0], + [0,0,e,0], + [0,0,0,1]]) + sa = array([[1,0,0,0.5], + [0,se,0,0], + [0,0,se,0], + [0,0,0,1]]) + n = a.shape[0] + assert_array_almost_equal(dot(sa,sa),a) + # Check default sqrtm. + esa = sqrtm(a, disp=False, blocksize=n)[0] + assert_array_almost_equal(dot(esa,esa),a) + # Check sqrtm with 2x2 blocks. + esa = sqrtm(a, disp=False, blocksize=2)[0] + assert_array_almost_equal(dot(esa,esa),a) + + def test_sqrtm_type_preservation_and_conversion(self): + # The sqrtm matrix function should preserve the type of a matrix + # whose eigenvalues are nonnegative with zero imaginary part. + # Test this preservation for variously structured matrices. + complex_dtype_chars = ('F', 'D', 'G') + for matrix_as_list in ( + [[1, 0], [0, 1]], + [[1, 0], [1, 1]], + [[2, 1], [1, 1]], + [[2, 3], [1, 2]], + [[1, 1], [1, 1]]): + + # check that the spectrum has the expected properties + W = scipy.linalg.eigvals(matrix_as_list) + assert_(not any(w.imag or w.real < 0 for w in W)) + + # check float type preservation + A = np.array(matrix_as_list, dtype=float) + A_sqrtm, info = sqrtm(A, disp=False) + assert_(A_sqrtm.dtype.char not in complex_dtype_chars) + + # check complex type preservation + A = np.array(matrix_as_list, dtype=complex) + A_sqrtm, info = sqrtm(A, disp=False) + assert_(A_sqrtm.dtype.char in complex_dtype_chars) + + # check float->complex type conversion for the matrix negation + A = -np.array(matrix_as_list, dtype=float) + A_sqrtm, info = sqrtm(A, disp=False) + assert_(A_sqrtm.dtype.char in complex_dtype_chars) + + def test_sqrtm_type_conversion_mixed_sign_or_complex_spectrum(self): + complex_dtype_chars = ('F', 'D', 'G') + for matrix_as_list in ( + [[1, 0], [0, -1]], + [[0, 1], [1, 0]], + [[0, 1, 0], [0, 0, 1], [1, 0, 0]]): + + # check that the spectrum has the expected properties + W = scipy.linalg.eigvals(matrix_as_list) + assert_(any(w.imag or w.real < 0 for w in W)) + + # check complex->complex + A = np.array(matrix_as_list, dtype=complex) + A_sqrtm, info = sqrtm(A, disp=False) + assert_(A_sqrtm.dtype.char in complex_dtype_chars) + + # check float->complex + A = np.array(matrix_as_list, dtype=float) + A_sqrtm, info = sqrtm(A, disp=False) + assert_(A_sqrtm.dtype.char in complex_dtype_chars) + + def test_blocksizes(self): + # Make sure I do not goof up the blocksizes when they do not divide n. + np.random.seed(1234) + for n in range(1, 8): + A = np.random.rand(n, n) + 1j*np.random.randn(n, n) + A_sqrtm_default, info = sqrtm(A, disp=False, blocksize=n) + assert_allclose(A, np.linalg.matrix_power(A_sqrtm_default, 2)) + for blocksize in range(1, 10): + A_sqrtm_new, info = sqrtm(A, disp=False, blocksize=blocksize) + assert_allclose(A_sqrtm_default, A_sqrtm_new) + + def test_al_mohy_higham_2012_experiment_1(self): + # Matrix square root of a tricky upper triangular matrix. + A = _get_al_mohy_higham_2012_experiment_1() + A_sqrtm, info = sqrtm(A, disp=False) + A_round_trip = A_sqrtm.dot(A_sqrtm) + assert_allclose(A_round_trip, A, rtol=1e-5) + assert_allclose(np.tril(A_round_trip), np.tril(A)) + + def test_strict_upper_triangular(self): + # This matrix has no square root. + for dt in int, float: + A = np.array([ + [0, 3, 0, 0], + [0, 0, 3, 0], + [0, 0, 0, 3], + [0, 0, 0, 0]], dtype=dt) + A_sqrtm, info = sqrtm(A, disp=False) + assert_(np.isnan(A_sqrtm).all()) + + def test_weird_matrix(self): + # The square root of matrix B exists. + for dt in int, float: + A = np.array([ + [0, 0, 1], + [0, 0, 0], + [0, 1, 0]], dtype=dt) + B = np.array([ + [0, 1, 0], + [0, 0, 0], + [0, 0, 0]], dtype=dt) + assert_array_equal(B, A.dot(A)) + + # But scipy sqrtm is not clever enough to find it. + B_sqrtm, info = sqrtm(B, disp=False) + assert_(np.isnan(B_sqrtm).all()) + + def test_disp(self): + np.random.seed(1234) + + A = np.random.rand(3, 3) + B = sqrtm(A, disp=True) + assert_allclose(B.dot(B), A) + + def test_opposite_sign_complex_eigenvalues(self): + M = [[2j, 4], [0, -2j]] + R = [[1+1j, 2], [0, 1-1j]] + assert_allclose(np.dot(R, R), M, atol=1e-14) + assert_allclose(sqrtm(M), R, atol=1e-14) + + def test_gh4866(self): + M = np.array([[1, 0, 0, 1], + [0, 0, 0, 0], + [0, 0, 0, 0], + [1, 0, 0, 1]]) + R = np.array([[sqrt(0.5), 0, 0, sqrt(0.5)], + [0, 0, 0, 0], + [0, 0, 0, 0], + [sqrt(0.5), 0, 0, sqrt(0.5)]]) + assert_allclose(np.dot(R, R), M, atol=1e-14) + assert_allclose(sqrtm(M), R, atol=1e-14) + + def test_gh5336(self): + M = np.diag([2, 1, 0]) + R = np.diag([sqrt(2), 1, 0]) + assert_allclose(np.dot(R, R), M, atol=1e-14) + assert_allclose(sqrtm(M), R, atol=1e-14) + + def test_gh7839(self): + M = np.zeros((2, 2)) + R = np.zeros((2, 2)) + assert_allclose(np.dot(R, R), M, atol=1e-14) + assert_allclose(sqrtm(M), R, atol=1e-14) + + @pytest.mark.xfail(reason="failing on macOS after gh-20212") + def test_gh17918(self): + M = np.empty((19, 19)) + M.fill(0.94) + np.fill_diagonal(M, 1) + assert np.isrealobj(sqrtm(M)) + + def test_data_size_preservation_uint_in_float_out(self): + M = np.zeros((10, 10), dtype=np.uint8) + # input bit size is 8, but minimum float bit size is 16 + assert sqrtm(M).dtype == np.float16 + M = np.zeros((10, 10), dtype=np.uint16) + assert sqrtm(M).dtype == np.float16 + M = np.zeros((10, 10), dtype=np.uint32) + assert sqrtm(M).dtype == np.float32 + M = np.zeros((10, 10), dtype=np.uint64) + assert sqrtm(M).dtype == np.float64 + + def test_data_size_preservation_int_in_float_out(self): + M = np.zeros((10, 10), dtype=np.int8) + # input bit size is 8, but minimum float bit size is 16 + assert sqrtm(M).dtype == np.float16 + M = np.zeros((10, 10), dtype=np.int16) + assert sqrtm(M).dtype == np.float16 + M = np.zeros((10, 10), dtype=np.int32) + assert sqrtm(M).dtype == np.float32 + M = np.zeros((10, 10), dtype=np.int64) + assert sqrtm(M).dtype == np.float64 + + def test_data_size_preservation_int_in_comp_out(self): + M = np.array([[2, 4], [0, -2]], dtype=np.int8) + # input bit size is 8, but minimum complex bit size is 64 + assert sqrtm(M).dtype == np.complex64 + M = np.array([[2, 4], [0, -2]], dtype=np.int16) + # input bit size is 16, but minimum complex bit size is 64 + assert sqrtm(M).dtype == np.complex64 + M = np.array([[2, 4], [0, -2]], dtype=np.int32) + assert sqrtm(M).dtype == np.complex64 + M = np.array([[2, 4], [0, -2]], dtype=np.int64) + assert sqrtm(M).dtype == np.complex128 + + def test_data_size_preservation_float_in_float_out(self): + M = np.zeros((10, 10), dtype=np.float16) + assert sqrtm(M).dtype == np.float16 + M = np.zeros((10, 10), dtype=np.float32) + assert sqrtm(M).dtype == np.float32 + M = np.zeros((10, 10), dtype=np.float64) + assert sqrtm(M).dtype == np.float64 + if hasattr(np, 'float128'): + M = np.zeros((10, 10), dtype=np.float128) + assert sqrtm(M).dtype == np.float128 + + def test_data_size_preservation_float_in_comp_out(self): + M = np.array([[2, 4], [0, -2]], dtype=np.float16) + # input bit size is 16, but minimum complex bit size is 64 + assert sqrtm(M).dtype == np.complex64 + M = np.array([[2, 4], [0, -2]], dtype=np.float32) + assert sqrtm(M).dtype == np.complex64 + M = np.array([[2, 4], [0, -2]], dtype=np.float64) + assert sqrtm(M).dtype == np.complex128 + if hasattr(np, 'float128') and hasattr(np, 'complex256'): + M = np.array([[2, 4], [0, -2]], dtype=np.float128) + assert sqrtm(M).dtype == np.complex256 + + def test_data_size_preservation_comp_in_comp_out(self): + M = np.array([[2j, 4], [0, -2j]], dtype=np.complex64) + assert sqrtm(M).dtype == np.complex128 + if hasattr(np, 'complex256'): + M = np.array([[2j, 4], [0, -2j]], dtype=np.complex128) + assert sqrtm(M).dtype == np.complex256 + M = np.array([[2j, 4], [0, -2j]], dtype=np.complex256) + assert sqrtm(M).dtype == np.complex256 + + +class TestFractionalMatrixPower: + def test_round_trip_random_complex(self): + np.random.seed(1234) + for p in range(1, 5): + for n in range(1, 5): + M_unscaled = np.random.randn(n, n) + 1j * np.random.randn(n, n) + for scale in np.logspace(-4, 4, 9): + M = M_unscaled * scale + M_root = fractional_matrix_power(M, 1/p) + M_round_trip = np.linalg.matrix_power(M_root, p) + assert_allclose(M_round_trip, M) + + def test_round_trip_random_float(self): + # This test is more annoying because it can hit the branch cut; + # this happens when the matrix has an eigenvalue + # with no imaginary component and with a real negative component, + # and it means that the principal branch does not exist. + np.random.seed(1234) + for p in range(1, 5): + for n in range(1, 5): + M_unscaled = np.random.randn(n, n) + for scale in np.logspace(-4, 4, 9): + M = M_unscaled * scale + M_root = fractional_matrix_power(M, 1/p) + M_round_trip = np.linalg.matrix_power(M_root, p) + assert_allclose(M_round_trip, M) + + def test_larger_abs_fractional_matrix_powers(self): + np.random.seed(1234) + for n in (2, 3, 5): + for i in range(10): + M = np.random.randn(n, n) + 1j * np.random.randn(n, n) + M_one_fifth = fractional_matrix_power(M, 0.2) + # Test the round trip. + M_round_trip = np.linalg.matrix_power(M_one_fifth, 5) + assert_allclose(M, M_round_trip) + # Test a large abs fractional power. + X = fractional_matrix_power(M, -5.4) + Y = np.linalg.matrix_power(M_one_fifth, -27) + assert_allclose(X, Y) + # Test another large abs fractional power. + X = fractional_matrix_power(M, 3.8) + Y = np.linalg.matrix_power(M_one_fifth, 19) + assert_allclose(X, Y) + + def test_random_matrices_and_powers(self): + # Each independent iteration of this fuzz test picks random parameters. + # It tries to hit some edge cases. + np.random.seed(1234) + nsamples = 20 + for i in range(nsamples): + # Sample a matrix size and a random real power. + n = random.randrange(1, 5) + p = np.random.randn() + + # Sample a random real or complex matrix. + matrix_scale = np.exp(random.randrange(-4, 5)) + A = np.random.randn(n, n) + if random.choice((True, False)): + A = A + 1j * np.random.randn(n, n) + A = A * matrix_scale + + # Check a couple of analytically equivalent ways + # to compute the fractional matrix power. + # These can be compared because they both use the principal branch. + A_power = fractional_matrix_power(A, p) + A_logm, info = logm(A, disp=False) + A_power_expm_logm = expm(A_logm * p) + assert_allclose(A_power, A_power_expm_logm) + + def test_al_mohy_higham_2012_experiment_1(self): + # Fractional powers of a tricky upper triangular matrix. + A = _get_al_mohy_higham_2012_experiment_1() + + # Test remainder matrix power. + A_funm_sqrt, info = funm(A, np.sqrt, disp=False) + A_sqrtm, info = sqrtm(A, disp=False) + A_rem_power = _matfuncs_inv_ssq._remainder_matrix_power(A, 0.5) + A_power = fractional_matrix_power(A, 0.5) + assert_allclose(A_rem_power, A_power, rtol=1e-11) + assert_allclose(A_sqrtm, A_power) + assert_allclose(A_sqrtm, A_funm_sqrt) + + # Test more fractional powers. + for p in (1/2, 5/3): + A_power = fractional_matrix_power(A, p) + A_round_trip = fractional_matrix_power(A_power, 1/p) + assert_allclose(A_round_trip, A, rtol=1e-2) + assert_allclose(np.tril(A_round_trip, 1), np.tril(A, 1)) + + def test_briggs_helper_function(self): + np.random.seed(1234) + for a in np.random.randn(10) + 1j * np.random.randn(10): + for k in range(5): + x_observed = _matfuncs_inv_ssq._briggs_helper_function(a, k) + x_expected = a ** np.exp2(-k) - 1 + assert_allclose(x_observed, x_expected) + + def test_type_preservation_and_conversion(self): + # The fractional_matrix_power matrix function should preserve + # the type of a matrix whose eigenvalues + # are positive with zero imaginary part. + # Test this preservation for variously structured matrices. + complex_dtype_chars = ('F', 'D', 'G') + for matrix_as_list in ( + [[1, 0], [0, 1]], + [[1, 0], [1, 1]], + [[2, 1], [1, 1]], + [[2, 3], [1, 2]]): + + # check that the spectrum has the expected properties + W = scipy.linalg.eigvals(matrix_as_list) + assert_(not any(w.imag or w.real < 0 for w in W)) + + # Check various positive and negative powers + # with absolute values bigger and smaller than 1. + for p in (-2.4, -0.9, 0.2, 3.3): + + # check float type preservation + A = np.array(matrix_as_list, dtype=float) + A_power = fractional_matrix_power(A, p) + assert_(A_power.dtype.char not in complex_dtype_chars) + + # check complex type preservation + A = np.array(matrix_as_list, dtype=complex) + A_power = fractional_matrix_power(A, p) + assert_(A_power.dtype.char in complex_dtype_chars) + + # check float->complex for the matrix negation + A = -np.array(matrix_as_list, dtype=float) + A_power = fractional_matrix_power(A, p) + assert_(A_power.dtype.char in complex_dtype_chars) + + def test_type_conversion_mixed_sign_or_complex_spectrum(self): + complex_dtype_chars = ('F', 'D', 'G') + for matrix_as_list in ( + [[1, 0], [0, -1]], + [[0, 1], [1, 0]], + [[0, 1, 0], [0, 0, 1], [1, 0, 0]]): + + # check that the spectrum has the expected properties + W = scipy.linalg.eigvals(matrix_as_list) + assert_(any(w.imag or w.real < 0 for w in W)) + + # Check various positive and negative powers + # with absolute values bigger and smaller than 1. + for p in (-2.4, -0.9, 0.2, 3.3): + + # check complex->complex + A = np.array(matrix_as_list, dtype=complex) + A_power = fractional_matrix_power(A, p) + assert_(A_power.dtype.char in complex_dtype_chars) + + # check float->complex + A = np.array(matrix_as_list, dtype=float) + A_power = fractional_matrix_power(A, p) + assert_(A_power.dtype.char in complex_dtype_chars) + + @pytest.mark.xfail(reason='Too unstable across LAPACKs.') + def test_singular(self): + # Negative fractional powers do not work with singular matrices. + for matrix_as_list in ( + [[0, 0], [0, 0]], + [[1, 1], [1, 1]], + [[1, 2], [3, 6]], + [[0, 0, 0], [0, 1, 1], [0, -1, 1]]): + + # Check fractional powers both for float and for complex types. + for newtype in (float, complex): + A = np.array(matrix_as_list, dtype=newtype) + for p in (-0.7, -0.9, -2.4, -1.3): + A_power = fractional_matrix_power(A, p) + assert_(np.isnan(A_power).all()) + for p in (0.2, 1.43): + A_power = fractional_matrix_power(A, p) + A_round_trip = fractional_matrix_power(A_power, 1/p) + assert_allclose(A_round_trip, A) + + def test_opposite_sign_complex_eigenvalues(self): + M = [[2j, 4], [0, -2j]] + R = [[1+1j, 2], [0, 1-1j]] + assert_allclose(np.dot(R, R), M, atol=1e-14) + assert_allclose(fractional_matrix_power(M, 0.5), R, atol=1e-14) + + +class TestExpM: + def test_zero(self): + a = array([[0.,0],[0,0]]) + assert_array_almost_equal(expm(a),[[1,0],[0,1]]) + + def test_single_elt(self): + elt = expm(1) + assert_allclose(elt, np.array([[np.e]])) + + def test_empty_matrix_input(self): + # handle gh-11082 + A = np.zeros((0, 0)) + result = expm(A) + assert result.size == 0 + + def test_2x2_input(self): + E = np.e + a = array([[1, 4], [1, 1]]) + aa = (E**4 + 1)/(2*E) + bb = (E**4 - 1)/E + assert_allclose(expm(a), array([[aa, bb], [bb/4, aa]])) + assert expm(a.astype(np.complex64)).dtype.char == 'F' + assert expm(a.astype(np.float32)).dtype.char == 'f' + + def test_nx2x2_input(self): + E = np.e + # These are integer matrices with integer eigenvalues + a = np.array([[[1, 4], [1, 1]], + [[1, 3], [1, -1]], + [[1, 3], [4, 5]], + [[1, 3], [5, 3]], + [[4, 5], [-3, -4]]], order='F') + # Exact results are computed symbolically + a_res = np.array([ + [[(E**4+1)/(2*E), (E**4-1)/E], + [(E**4-1)/4/E, (E**4+1)/(2*E)]], + [[1/(4*E**2)+(3*E**2)/4, (3*E**2)/4-3/(4*E**2)], + [E**2/4-1/(4*E**2), 3/(4*E**2)+E**2/4]], + [[3/(4*E)+E**7/4, -3/(8*E)+(3*E**7)/8], + [-1/(2*E)+E**7/2, 1/(4*E)+(3*E**7)/4]], + [[5/(8*E**2)+(3*E**6)/8, -3/(8*E**2)+(3*E**6)/8], + [-5/(8*E**2)+(5*E**6)/8, 3/(8*E**2)+(5*E**6)/8]], + [[-3/(2*E)+(5*E)/2, -5/(2*E)+(5*E)/2], + [3/(2*E)-(3*E)/2, 5/(2*E)-(3*E)/2]] + ]) + assert_allclose(expm(a), a_res) + + def test_readonly(self): + n = 7 + a = np.ones((n, n)) + a.flags.writeable = False + expm(a) + + def test_gh18086(self): + A = np.zeros((400, 400), dtype=float) + rng = np.random.default_rng(100) + i = rng.integers(0, 399, 500) + j = rng.integers(0, 399, 500) + A[i, j] = rng.random(500) + # Problem appears when m = 9 + Am = np.empty((5, 400, 400), dtype=float) + Am[0] = A.copy() + m, s = pick_pade_structure(Am) + assert m == 9 + # Check that result is accurate + first_res = expm(A) + np.testing.assert_array_almost_equal(logm(first_res), A) + # Check that result is consistent + for i in range(5): + next_res = expm(A) + np.testing.assert_array_almost_equal(first_res, next_res) + + +class TestExpmFrechet: + + def test_expm_frechet(self): + # a test of the basic functionality + M = np.array([ + [1, 2, 3, 4], + [5, 6, 7, 8], + [0, 0, 1, 2], + [0, 0, 5, 6], + ], dtype=float) + A = np.array([ + [1, 2], + [5, 6], + ], dtype=float) + E = np.array([ + [3, 4], + [7, 8], + ], dtype=float) + expected_expm = scipy.linalg.expm(A) + expected_frechet = scipy.linalg.expm(M)[:2, 2:] + for kwargs in ({}, {'method':'SPS'}, {'method':'blockEnlarge'}): + observed_expm, observed_frechet = expm_frechet(A, E, **kwargs) + assert_allclose(expected_expm, observed_expm) + assert_allclose(expected_frechet, observed_frechet) + + def test_small_norm_expm_frechet(self): + # methodically test matrices with a range of norms, for better coverage + M_original = np.array([ + [1, 2, 3, 4], + [5, 6, 7, 8], + [0, 0, 1, 2], + [0, 0, 5, 6], + ], dtype=float) + A_original = np.array([ + [1, 2], + [5, 6], + ], dtype=float) + E_original = np.array([ + [3, 4], + [7, 8], + ], dtype=float) + A_original_norm_1 = scipy.linalg.norm(A_original, 1) + selected_m_list = [1, 3, 5, 7, 9, 11, 13, 15] + m_neighbor_pairs = zip(selected_m_list[:-1], selected_m_list[1:]) + for ma, mb in m_neighbor_pairs: + ell_a = scipy.linalg._expm_frechet.ell_table_61[ma] + ell_b = scipy.linalg._expm_frechet.ell_table_61[mb] + target_norm_1 = 0.5 * (ell_a + ell_b) + scale = target_norm_1 / A_original_norm_1 + M = scale * M_original + A = scale * A_original + E = scale * E_original + expected_expm = scipy.linalg.expm(A) + expected_frechet = scipy.linalg.expm(M)[:2, 2:] + observed_expm, observed_frechet = expm_frechet(A, E) + assert_allclose(expected_expm, observed_expm) + assert_allclose(expected_frechet, observed_frechet) + + def test_fuzz(self): + # try a bunch of crazy inputs + rfuncs = ( + np.random.uniform, + np.random.normal, + np.random.standard_cauchy, + np.random.exponential) + ntests = 100 + for i in range(ntests): + rfunc = random.choice(rfuncs) + target_norm_1 = random.expovariate(1.0) + n = random.randrange(2, 16) + A_original = rfunc(size=(n,n)) + E_original = rfunc(size=(n,n)) + A_original_norm_1 = scipy.linalg.norm(A_original, 1) + scale = target_norm_1 / A_original_norm_1 + A = scale * A_original + E = scale * E_original + M = np.vstack([ + np.hstack([A, E]), + np.hstack([np.zeros_like(A), A])]) + expected_expm = scipy.linalg.expm(A) + expected_frechet = scipy.linalg.expm(M)[:n, n:] + observed_expm, observed_frechet = expm_frechet(A, E) + assert_allclose(expected_expm, observed_expm, atol=5e-8) + assert_allclose(expected_frechet, observed_frechet, atol=1e-7) + + def test_problematic_matrix(self): + # this test case uncovered a bug which has since been fixed + A = np.array([ + [1.50591997, 1.93537998], + [0.41203263, 0.23443516], + ], dtype=float) + E = np.array([ + [1.87864034, 2.07055038], + [1.34102727, 0.67341123], + ], dtype=float) + scipy.linalg.norm(A, 1) + sps_expm, sps_frechet = expm_frechet( + A, E, method='SPS') + blockEnlarge_expm, blockEnlarge_frechet = expm_frechet( + A, E, method='blockEnlarge') + assert_allclose(sps_expm, blockEnlarge_expm) + assert_allclose(sps_frechet, blockEnlarge_frechet) + + @pytest.mark.slow + @pytest.mark.skip(reason='this test is deliberately slow') + def test_medium_matrix(self): + # profile this to see the speed difference + n = 1000 + A = np.random.exponential(size=(n, n)) + E = np.random.exponential(size=(n, n)) + sps_expm, sps_frechet = expm_frechet( + A, E, method='SPS') + blockEnlarge_expm, blockEnlarge_frechet = expm_frechet( + A, E, method='blockEnlarge') + assert_allclose(sps_expm, blockEnlarge_expm) + assert_allclose(sps_frechet, blockEnlarge_frechet) + + +def _help_expm_cond_search(A, A_norm, X, X_norm, eps, p): + p = np.reshape(p, A.shape) + p_norm = norm(p) + perturbation = eps * p * (A_norm / p_norm) + X_prime = expm(A + perturbation) + scaled_relative_error = norm(X_prime - X) / (X_norm * eps) + return -scaled_relative_error + + +def _normalized_like(A, B): + return A * (scipy.linalg.norm(B) / scipy.linalg.norm(A)) + + +def _relative_error(f, A, perturbation): + X = f(A) + X_prime = f(A + perturbation) + return norm(X_prime - X) / norm(X) + + +class TestExpmConditionNumber: + def test_expm_cond_smoke(self): + np.random.seed(1234) + for n in range(1, 4): + A = np.random.randn(n, n) + kappa = expm_cond(A) + assert_array_less(0, kappa) + + def test_expm_bad_condition_number(self): + A = np.array([ + [-1.128679820, 9.614183771e4, -4.524855739e9, 2.924969411e14], + [0, -1.201010529, 9.634696872e4, -4.681048289e9], + [0, 0, -1.132893222, 9.532491830e4], + [0, 0, 0, -1.179475332], + ]) + kappa = expm_cond(A) + assert_array_less(1e36, kappa) + + def test_univariate(self): + np.random.seed(12345) + for x in np.linspace(-5, 5, num=11): + A = np.array([[x]]) + assert_allclose(expm_cond(A), abs(x)) + for x in np.logspace(-2, 2, num=11): + A = np.array([[x]]) + assert_allclose(expm_cond(A), abs(x)) + for i in range(10): + A = np.random.randn(1, 1) + assert_allclose(expm_cond(A), np.absolute(A)[0, 0]) + + @pytest.mark.slow + def test_expm_cond_fuzz(self): + np.random.seed(12345) + eps = 1e-5 + nsamples = 10 + for i in range(nsamples): + n = np.random.randint(2, 5) + A = np.random.randn(n, n) + A_norm = scipy.linalg.norm(A) + X = expm(A) + X_norm = scipy.linalg.norm(X) + kappa = expm_cond(A) + + # Look for the small perturbation that gives the greatest + # relative error. + f = functools.partial(_help_expm_cond_search, + A, A_norm, X, X_norm, eps) + guess = np.ones(n*n) + out = minimize(f, guess, method='L-BFGS-B') + xopt = out.x + yopt = f(xopt) + p_best = eps * _normalized_like(np.reshape(xopt, A.shape), A) + p_best_relerr = _relative_error(expm, A, p_best) + assert_allclose(p_best_relerr, -yopt * eps) + + # Check that the identified perturbation indeed gives greater + # relative error than random perturbations with similar norms. + for j in range(5): + p_rand = eps * _normalized_like(np.random.randn(*A.shape), A) + assert_allclose(norm(p_best), norm(p_rand)) + p_rand_relerr = _relative_error(expm, A, p_rand) + assert_array_less(p_rand_relerr, p_best_relerr) + + # The greatest relative error should not be much greater than + # eps times the condition number kappa. + # In the limit as eps approaches zero it should never be greater. + assert_array_less(p_best_relerr, (1 + 2*eps) * eps * kappa) + + +class TestKhatriRao: + + def test_basic(self): + a = khatri_rao(array([[1, 2], [3, 4]]), + array([[5, 6], [7, 8]])) + + assert_array_equal(a, array([[5, 12], + [7, 16], + [15, 24], + [21, 32]])) + + b = khatri_rao(np.empty([2, 2]), np.empty([2, 2])) + assert_array_equal(b.shape, (4, 2)) + + def test_number_of_columns_equality(self): + with pytest.raises(ValueError): + a = array([[1, 2, 3], + [4, 5, 6]]) + b = array([[1, 2], + [3, 4]]) + khatri_rao(a, b) + + def test_to_assure_2d_array(self): + with pytest.raises(ValueError): + # both arrays are 1-D + a = array([1, 2, 3]) + b = array([4, 5, 6]) + khatri_rao(a, b) + + with pytest.raises(ValueError): + # first array is 1-D + a = array([1, 2, 3]) + b = array([ + [1, 2, 3], + [4, 5, 6] + ]) + khatri_rao(a, b) + + with pytest.raises(ValueError): + # second array is 1-D + a = array([ + [1, 2, 3], + [7, 8, 9] + ]) + b = array([4, 5, 6]) + khatri_rao(a, b) + + def test_equality_of_two_equations(self): + a = array([[1, 2], [3, 4]]) + b = array([[5, 6], [7, 8]]) + + res1 = khatri_rao(a, b) + res2 = np.vstack([np.kron(a[:, k], b[:, k]) + for k in range(b.shape[1])]).T + + assert_array_equal(res1, res2) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_misc.py b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_misc.py new file mode 100644 index 0000000000000000000000000000000000000000..1c10923e088b796764e3cc01bbda7da8351a85bf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_misc.py @@ -0,0 +1,5 @@ +from scipy.linalg import norm + + +def test_norm(): + assert norm([]) == 0.0 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_procrustes.py b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_procrustes.py new file mode 100644 index 0000000000000000000000000000000000000000..f41fd0e254c7d301d8a2612a6c9d67a26e2c302c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_procrustes.py @@ -0,0 +1,191 @@ +from itertools import product, permutations + +import numpy as np +from numpy.testing import assert_array_less, assert_allclose +from pytest import raises as assert_raises + +from scipy.linalg import inv, eigh, norm +from scipy.linalg import orthogonal_procrustes +from scipy.sparse._sputils import matrix + + +def test_orthogonal_procrustes_ndim_too_large(): + np.random.seed(1234) + A = np.random.randn(3, 4, 5) + B = np.random.randn(3, 4, 5) + assert_raises(ValueError, orthogonal_procrustes, A, B) + + +def test_orthogonal_procrustes_ndim_too_small(): + np.random.seed(1234) + A = np.random.randn(3) + B = np.random.randn(3) + assert_raises(ValueError, orthogonal_procrustes, A, B) + + +def test_orthogonal_procrustes_shape_mismatch(): + np.random.seed(1234) + shapes = ((3, 3), (3, 4), (4, 3), (4, 4)) + for a, b in permutations(shapes, 2): + A = np.random.randn(*a) + B = np.random.randn(*b) + assert_raises(ValueError, orthogonal_procrustes, A, B) + + +def test_orthogonal_procrustes_checkfinite_exception(): + np.random.seed(1234) + m, n = 2, 3 + A_good = np.random.randn(m, n) + B_good = np.random.randn(m, n) + for bad_value in np.inf, -np.inf, np.nan: + A_bad = A_good.copy() + A_bad[1, 2] = bad_value + B_bad = B_good.copy() + B_bad[1, 2] = bad_value + for A, B in ((A_good, B_bad), (A_bad, B_good), (A_bad, B_bad)): + assert_raises(ValueError, orthogonal_procrustes, A, B) + + +def test_orthogonal_procrustes_scale_invariance(): + np.random.seed(1234) + m, n = 4, 3 + for i in range(3): + A_orig = np.random.randn(m, n) + B_orig = np.random.randn(m, n) + R_orig, s = orthogonal_procrustes(A_orig, B_orig) + for A_scale in np.square(np.random.randn(3)): + for B_scale in np.square(np.random.randn(3)): + R, s = orthogonal_procrustes(A_orig * A_scale, B_orig * B_scale) + assert_allclose(R, R_orig) + + +def test_orthogonal_procrustes_array_conversion(): + np.random.seed(1234) + for m, n in ((6, 4), (4, 4), (4, 6)): + A_arr = np.random.randn(m, n) + B_arr = np.random.randn(m, n) + As = (A_arr, A_arr.tolist(), matrix(A_arr)) + Bs = (B_arr, B_arr.tolist(), matrix(B_arr)) + R_arr, s = orthogonal_procrustes(A_arr, B_arr) + AR_arr = A_arr.dot(R_arr) + for A, B in product(As, Bs): + R, s = orthogonal_procrustes(A, B) + AR = A_arr.dot(R) + assert_allclose(AR, AR_arr) + + +def test_orthogonal_procrustes(): + np.random.seed(1234) + for m, n in ((6, 4), (4, 4), (4, 6)): + # Sample a random target matrix. + B = np.random.randn(m, n) + # Sample a random orthogonal matrix + # by computing eigh of a sampled symmetric matrix. + X = np.random.randn(n, n) + w, V = eigh(X.T + X) + assert_allclose(inv(V), V.T) + # Compute a matrix with a known orthogonal transformation that gives B. + A = np.dot(B, V.T) + # Check that an orthogonal transformation from A to B can be recovered. + R, s = orthogonal_procrustes(A, B) + assert_allclose(inv(R), R.T) + assert_allclose(A.dot(R), B) + # Create a perturbed input matrix. + A_perturbed = A + 1e-2 * np.random.randn(m, n) + # Check that the orthogonal procrustes function can find an orthogonal + # transformation that is better than the orthogonal transformation + # computed from the original input matrix. + R_prime, s = orthogonal_procrustes(A_perturbed, B) + assert_allclose(inv(R_prime), R_prime.T) + # Compute the naive and optimal transformations of the perturbed input. + naive_approx = A_perturbed.dot(R) + optim_approx = A_perturbed.dot(R_prime) + # Compute the Frobenius norm errors of the matrix approximations. + naive_approx_error = norm(naive_approx - B, ord='fro') + optim_approx_error = norm(optim_approx - B, ord='fro') + # Check that the orthogonal Procrustes approximation is better. + assert_array_less(optim_approx_error, naive_approx_error) + + +def _centered(A): + mu = A.mean(axis=0) + return A - mu, mu + + +def test_orthogonal_procrustes_exact_example(): + # Check a small application. + # It uses translation, scaling, reflection, and rotation. + # + # | + # a b | + # | + # d c | w + # | + # --------+--- x ----- z --- + # | + # | y + # | + # + A_orig = np.array([[-3, 3], [-2, 3], [-2, 2], [-3, 2]], dtype=float) + B_orig = np.array([[3, 2], [1, 0], [3, -2], [5, 0]], dtype=float) + A, A_mu = _centered(A_orig) + B, B_mu = _centered(B_orig) + R, s = orthogonal_procrustes(A, B) + scale = s / np.square(norm(A)) + B_approx = scale * np.dot(A, R) + B_mu + assert_allclose(B_approx, B_orig, atol=1e-8) + + +def test_orthogonal_procrustes_stretched_example(): + # Try again with a target with a stretched y axis. + A_orig = np.array([[-3, 3], [-2, 3], [-2, 2], [-3, 2]], dtype=float) + B_orig = np.array([[3, 40], [1, 0], [3, -40], [5, 0]], dtype=float) + A, A_mu = _centered(A_orig) + B, B_mu = _centered(B_orig) + R, s = orthogonal_procrustes(A, B) + scale = s / np.square(norm(A)) + B_approx = scale * np.dot(A, R) + B_mu + expected = np.array([[3, 21], [-18, 0], [3, -21], [24, 0]], dtype=float) + assert_allclose(B_approx, expected, atol=1e-8) + # Check disparity symmetry. + expected_disparity = 0.4501246882793018 + AB_disparity = np.square(norm(B_approx - B_orig) / norm(B)) + assert_allclose(AB_disparity, expected_disparity) + R, s = orthogonal_procrustes(B, A) + scale = s / np.square(norm(B)) + A_approx = scale * np.dot(B, R) + A_mu + BA_disparity = np.square(norm(A_approx - A_orig) / norm(A)) + assert_allclose(BA_disparity, expected_disparity) + + +def test_orthogonal_procrustes_skbio_example(): + # This transformation is also exact. + # It uses translation, scaling, and reflection. + # + # | + # | a + # | b + # | c d + # --+--------- + # | + # | w + # | + # | x + # | + # | z y + # | + # + A_orig = np.array([[4, -2], [4, -4], [4, -6], [2, -6]], dtype=float) + B_orig = np.array([[1, 3], [1, 2], [1, 1], [2, 1]], dtype=float) + B_standardized = np.array([ + [-0.13363062, 0.6681531], + [-0.13363062, 0.13363062], + [-0.13363062, -0.40089186], + [0.40089186, -0.40089186]]) + A, A_mu = _centered(A_orig) + B, B_mu = _centered(B_orig) + R, s = orthogonal_procrustes(A, B) + scale = s / np.square(norm(A)) + B_approx = scale * np.dot(A, R) + B_mu + assert_allclose(B_approx, B_orig) + assert_allclose(B / norm(B), B_standardized) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_sketches.py b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_sketches.py new file mode 100644 index 0000000000000000000000000000000000000000..f4515e2dbfd1c7955487489a2324cd17b31d6726 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_sketches.py @@ -0,0 +1,118 @@ +"""Tests for _sketches.py.""" + +import numpy as np +from numpy.testing import assert_, assert_equal +from scipy.linalg import clarkson_woodruff_transform +from scipy.linalg._sketches import cwt_matrix +from scipy.sparse import issparse, rand +from scipy.sparse.linalg import norm + + +class TestClarksonWoodruffTransform: + """ + Testing the Clarkson Woodruff Transform + """ + # set seed for generating test matrices + rng = np.random.RandomState(seed=1179103485) + + # Test matrix parameters + n_rows = 2000 + n_cols = 100 + density = 0.1 + + # Sketch matrix dimensions + n_sketch_rows = 200 + + # Seeds to test with + seeds = [1755490010, 934377150, 1391612830, 1752708722, 2008891431, + 1302443994, 1521083269, 1501189312, 1126232505, 1533465685] + + A_dense = rng.randn(n_rows, n_cols) + A_csc = rand( + n_rows, n_cols, density=density, format='csc', random_state=rng, + ) + A_csr = rand( + n_rows, n_cols, density=density, format='csr', random_state=rng, + ) + A_coo = rand( + n_rows, n_cols, density=density, format='coo', random_state=rng, + ) + + # Collect the test matrices + test_matrices = [ + A_dense, A_csc, A_csr, A_coo, + ] + + # Test vector with norm ~1 + x = rng.randn(n_rows, 1) / np.sqrt(n_rows) + + def test_sketch_dimensions(self): + for A in self.test_matrices: + for seed in self.seeds: + sketch = clarkson_woodruff_transform( + A, self.n_sketch_rows, seed=seed + ) + assert_(sketch.shape == (self.n_sketch_rows, self.n_cols)) + + def test_seed_returns_identical_transform_matrix(self): + for A in self.test_matrices: + for seed in self.seeds: + S1 = cwt_matrix( + self.n_sketch_rows, self.n_rows, seed=seed + ).toarray() + S2 = cwt_matrix( + self.n_sketch_rows, self.n_rows, seed=seed + ).toarray() + assert_equal(S1, S2) + + def test_seed_returns_identically(self): + for A in self.test_matrices: + for seed in self.seeds: + sketch1 = clarkson_woodruff_transform( + A, self.n_sketch_rows, seed=seed + ) + sketch2 = clarkson_woodruff_transform( + A, self.n_sketch_rows, seed=seed + ) + if issparse(sketch1): + sketch1 = sketch1.toarray() + if issparse(sketch2): + sketch2 = sketch2.toarray() + assert_equal(sketch1, sketch2) + + def test_sketch_preserves_frobenius_norm(self): + # Given the probabilistic nature of the sketches + # we run the test multiple times and check that + # we pass all/almost all the tries. + n_errors = 0 + for A in self.test_matrices: + if issparse(A): + true_norm = norm(A) + else: + true_norm = np.linalg.norm(A) + for seed in self.seeds: + sketch = clarkson_woodruff_transform( + A, self.n_sketch_rows, seed=seed, + ) + if issparse(sketch): + sketch_norm = norm(sketch) + else: + sketch_norm = np.linalg.norm(sketch) + + if np.abs(true_norm - sketch_norm) > 0.1 * true_norm: + n_errors += 1 + assert_(n_errors == 0) + + def test_sketch_preserves_vector_norm(self): + n_errors = 0 + n_sketch_rows = int(np.ceil(2. / (0.01 * 0.5**2))) + true_norm = np.linalg.norm(self.x) + for seed in self.seeds: + sketch = clarkson_woodruff_transform( + self.x, n_sketch_rows, seed=seed, + ) + sketch_norm = np.linalg.norm(sketch) + + if np.abs(true_norm - sketch_norm) > 0.5 * true_norm: + n_errors += 1 + assert_(n_errors == 0) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_solve_toeplitz.py b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_solve_toeplitz.py new file mode 100644 index 0000000000000000000000000000000000000000..ecced19e2d397d5ed0754d9e4f4edf1125156922 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_solve_toeplitz.py @@ -0,0 +1,121 @@ +"""Test functions for linalg._solve_toeplitz module +""" +import numpy as np +from scipy.linalg._solve_toeplitz import levinson +from scipy.linalg import solve, toeplitz, solve_toeplitz +from numpy.testing import assert_equal, assert_allclose + +import pytest +from pytest import raises as assert_raises + + +def test_solve_equivalence(): + # For toeplitz matrices, solve_toeplitz() should be equivalent to solve(). + random = np.random.RandomState(1234) + for n in (1, 2, 3, 10): + c = random.randn(n) + if random.rand() < 0.5: + c = c + 1j * random.randn(n) + r = random.randn(n) + if random.rand() < 0.5: + r = r + 1j * random.randn(n) + y = random.randn(n) + if random.rand() < 0.5: + y = y + 1j * random.randn(n) + + # Check equivalence when both the column and row are provided. + actual = solve_toeplitz((c,r), y) + desired = solve(toeplitz(c, r=r), y) + assert_allclose(actual, desired) + + # Check equivalence when the column is provided but not the row. + actual = solve_toeplitz(c, b=y) + desired = solve(toeplitz(c), y) + assert_allclose(actual, desired) + + +def test_multiple_rhs(): + random = np.random.RandomState(1234) + c = random.randn(4) + r = random.randn(4) + for offset in [0, 1j]: + for yshape in ((4,), (4, 3), (4, 3, 2)): + y = random.randn(*yshape) + offset + actual = solve_toeplitz((c,r), b=y) + desired = solve(toeplitz(c, r=r), y) + assert_equal(actual.shape, yshape) + assert_equal(desired.shape, yshape) + assert_allclose(actual, desired) + + +def test_native_list_arguments(): + c = [1,2,4,7] + r = [1,3,9,12] + y = [5,1,4,2] + actual = solve_toeplitz((c,r), y) + desired = solve(toeplitz(c, r=r), y) + assert_allclose(actual, desired) + + +def test_zero_diag_error(): + # The Levinson-Durbin implementation fails when the diagonal is zero. + random = np.random.RandomState(1234) + n = 4 + c = random.randn(n) + r = random.randn(n) + y = random.randn(n) + c[0] = 0 + assert_raises(np.linalg.LinAlgError, + solve_toeplitz, (c, r), b=y) + + +def test_wikipedia_counterexample(): + # The Levinson-Durbin implementation also fails in other cases. + # This example is from the talk page of the wikipedia article. + random = np.random.RandomState(1234) + c = [2, 2, 1] + y = random.randn(3) + assert_raises(np.linalg.LinAlgError, solve_toeplitz, c, b=y) + + +def test_reflection_coeffs(): + # check that the partial solutions are given by the reflection + # coefficients + + random = np.random.RandomState(1234) + y_d = random.randn(10) + y_z = random.randn(10) + 1j + reflection_coeffs_d = [1] + reflection_coeffs_z = [1] + for i in range(2, 10): + reflection_coeffs_d.append(solve_toeplitz(y_d[:(i-1)], b=y_d[1:i])[-1]) + reflection_coeffs_z.append(solve_toeplitz(y_z[:(i-1)], b=y_z[1:i])[-1]) + + y_d_concat = np.concatenate((y_d[-2:0:-1], y_d[:-1])) + y_z_concat = np.concatenate((y_z[-2:0:-1].conj(), y_z[:-1])) + _, ref_d = levinson(y_d_concat, b=y_d[1:]) + _, ref_z = levinson(y_z_concat, b=y_z[1:]) + + assert_allclose(reflection_coeffs_d, ref_d[:-1]) + assert_allclose(reflection_coeffs_z, ref_z[:-1]) + + +@pytest.mark.xfail(reason='Instability of Levinson iteration') +def test_unstable(): + # this is a "Gaussian Toeplitz matrix", as mentioned in Example 2 of + # I. Gohbert, T. Kailath and V. Olshevsky "Fast Gaussian Elimination with + # Partial Pivoting for Matrices with Displacement Structure" + # Mathematics of Computation, 64, 212 (1995), pp 1557-1576 + # which can be unstable for levinson recursion. + + # other fast toeplitz solvers such as GKO or Burg should be better. + random = np.random.RandomState(1234) + n = 100 + c = 0.9 ** (np.arange(n)**2) + y = random.randn(n) + + solution1 = solve_toeplitz(c, b=y) + solution2 = solve(toeplitz(c), y) + + assert_allclose(solution1, solution2) + diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_solvers.py b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_solvers.py new file mode 100644 index 0000000000000000000000000000000000000000..1be2f0d35bd13c2c760c9011b6c482b61a92ccbc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_solvers.py @@ -0,0 +1,777 @@ +import os +import numpy as np + +from numpy.testing import assert_array_almost_equal, assert_allclose +import pytest +from pytest import raises as assert_raises + +from scipy.linalg import solve_sylvester +from scipy.linalg import solve_continuous_lyapunov, solve_discrete_lyapunov +from scipy.linalg import solve_continuous_are, solve_discrete_are +from scipy.linalg import block_diag, solve, LinAlgError +from scipy.sparse._sputils import matrix + + +def _load_data(name): + """ + Load npz data file under data/ + Returns a copy of the data, rather than keeping the npz file open. + """ + filename = os.path.join(os.path.abspath(os.path.dirname(__file__)), + 'data', name) + with np.load(filename) as f: + return dict(f.items()) + + +class TestSolveLyapunov: + + cases = [ + (np.array([[1, 2], [3, 4]]), + np.array([[9, 10], [11, 12]])), + # a, q all complex. + (np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]), + np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])), + # a real; q complex. + (np.array([[1.0, 2.0], [3.0, 5.0]]), + np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])), + # a complex; q real. + (np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]), + np.array([[2.0, 2.0], [-1.0, 2.0]])), + # An example from Kitagawa, 1977 + (np.array([[3, 9, 5, 1, 4], [1, 2, 3, 8, 4], [4, 6, 6, 6, 3], + [1, 5, 2, 0, 7], [5, 3, 3, 1, 5]]), + np.array([[2, 4, 1, 0, 1], [4, 1, 0, 2, 0], [1, 0, 3, 0, 3], + [0, 2, 0, 1, 0], [1, 0, 3, 0, 4]])), + # Companion matrix example. a complex; q real; a.shape[0] = 11 + (np.array([[0.100+0.j, 0.091+0.j, 0.082+0.j, 0.073+0.j, 0.064+0.j, + 0.055+0.j, 0.046+0.j, 0.037+0.j, 0.028+0.j, 0.019+0.j, + 0.010+0.j], + [1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, + 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, + 0.000+0.j], + [0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, + 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, + 0.000+0.j], + [0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, + 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, + 0.000+0.j], + [0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, + 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, + 0.000+0.j], + [0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, + 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, + 0.000+0.j], + [0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, + 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, + 0.000+0.j], + [0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, + 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, + 0.000+0.j], + [0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, + 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, 0.000+0.j, + 0.000+0.j], + [0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, + 0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, 0.000+0.j, + 0.000+0.j], + [0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, + 0.000+0.j, 0.000+0.j, 0.000+0.j, 0.000+0.j, 1.000+0.j, + 0.000+0.j]]), + np.eye(11)), + # https://github.com/scipy/scipy/issues/4176 + (matrix([[0, 1], [-1/2, -1]]), + (matrix([0, 3]).T @ matrix([0, 3]).T.T)), + # https://github.com/scipy/scipy/issues/4176 + (matrix([[0, 1], [-1/2, -1]]), + (np.array(matrix([0, 3]).T @ matrix([0, 3]).T.T))), + ] + + def test_continuous_squareness_and_shape(self): + nsq = np.ones((3, 2)) + sq = np.eye(3) + assert_raises(ValueError, solve_continuous_lyapunov, nsq, sq) + assert_raises(ValueError, solve_continuous_lyapunov, sq, nsq) + assert_raises(ValueError, solve_continuous_lyapunov, sq, np.eye(2)) + + def check_continuous_case(self, a, q): + x = solve_continuous_lyapunov(a, q) + assert_array_almost_equal( + np.dot(a, x) + np.dot(x, a.conj().transpose()), q) + + def check_discrete_case(self, a, q, method=None): + x = solve_discrete_lyapunov(a, q, method=method) + assert_array_almost_equal( + np.dot(np.dot(a, x), a.conj().transpose()) - x, -1.0*q) + + def test_cases(self): + for case in self.cases: + self.check_continuous_case(case[0], case[1]) + self.check_discrete_case(case[0], case[1]) + self.check_discrete_case(case[0], case[1], method='direct') + self.check_discrete_case(case[0], case[1], method='bilinear') + + +class TestSolveContinuousAre: + mat6 = _load_data('carex_6_data.npz') + mat15 = _load_data('carex_15_data.npz') + mat18 = _load_data('carex_18_data.npz') + mat19 = _load_data('carex_19_data.npz') + mat20 = _load_data('carex_20_data.npz') + cases = [ + # Carex examples taken from (with default parameters): + # [1] P.BENNER, A.J. LAUB, V. MEHRMANN: 'A Collection of Benchmark + # Examples for the Numerical Solution of Algebraic Riccati + # Equations II: Continuous-Time Case', Tech. Report SPC 95_23, + # Fak. f. Mathematik, TU Chemnitz-Zwickau (Germany), 1995. + # + # The format of the data is (a, b, q, r, knownfailure), where + # knownfailure is None if the test passes or a string + # indicating the reason for failure. + # + # Test Case 0: carex #1 + (np.diag([1.], 1), + np.array([[0], [1]]), + block_diag(1., 2.), + 1, + None), + # Test Case 1: carex #2 + (np.array([[4, 3], [-4.5, -3.5]]), + np.array([[1], [-1]]), + np.array([[9, 6], [6, 4.]]), + 1, + None), + # Test Case 2: carex #3 + (np.array([[0, 1, 0, 0], + [0, -1.89, 0.39, -5.53], + [0, -0.034, -2.98, 2.43], + [0.034, -0.0011, -0.99, -0.21]]), + np.array([[0, 0], [0.36, -1.6], [-0.95, -0.032], [0.03, 0]]), + np.array([[2.313, 2.727, 0.688, 0.023], + [2.727, 4.271, 1.148, 0.323], + [0.688, 1.148, 0.313, 0.102], + [0.023, 0.323, 0.102, 0.083]]), + np.eye(2), + None), + # Test Case 3: carex #4 + (np.array([[-0.991, 0.529, 0, 0, 0, 0, 0, 0], + [0.522, -1.051, 0.596, 0, 0, 0, 0, 0], + [0, 0.522, -1.118, 0.596, 0, 0, 0, 0], + [0, 0, 0.522, -1.548, 0.718, 0, 0, 0], + [0, 0, 0, 0.922, -1.64, 0.799, 0, 0], + [0, 0, 0, 0, 0.922, -1.721, 0.901, 0], + [0, 0, 0, 0, 0, 0.922, -1.823, 1.021], + [0, 0, 0, 0, 0, 0, 0.922, -1.943]]), + np.array([[3.84, 4.00, 37.60, 3.08, 2.36, 2.88, 3.08, 3.00], + [-2.88, -3.04, -2.80, -2.32, -3.32, -3.82, -4.12, -3.96]] + ).T * 0.001, + np.array([[1.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.1], + [0.0, 1.0, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0], + [0.0, 0.0, 1.0, 0.0, 0.0, 0.5, 0.0, 0.0], + [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0], + [0.5, 0.1, 0.0, 0.0, 0.1, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.5, 0.0, 0.0, 0.1, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1, 0.0], + [0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1]]), + np.eye(2), + None), + # Test Case 4: carex #5 + (np.array( + [[-4.019, 5.120, 0., 0., -2.082, 0., 0., 0., 0.870], + [-0.346, 0.986, 0., 0., -2.340, 0., 0., 0., 0.970], + [-7.909, 15.407, -4.069, 0., -6.450, 0., 0., 0., 2.680], + [-21.816, 35.606, -0.339, -3.870, -17.800, 0., 0., 0., 7.390], + [-60.196, 98.188, -7.907, 0.340, -53.008, 0., 0., 0., 20.400], + [0, 0, 0, 0, 94.000, -147.200, 0., 53.200, 0.], + [0, 0, 0, 0, 0, 94.000, -147.200, 0, 0], + [0, 0, 0, 0, 0, 12.800, 0.000, -31.600, 0], + [0, 0, 0, 0, 12.800, 0.000, 0.000, 18.800, -31.600]]), + np.array([[0.010, -0.011, -0.151], + [0.003, -0.021, 0.000], + [0.009, -0.059, 0.000], + [0.024, -0.162, 0.000], + [0.068, -0.445, 0.000], + [0.000, 0.000, 0.000], + [0.000, 0.000, 0.000], + [0.000, 0.000, 0.000], + [0.000, 0.000, 0.000]]), + np.eye(9), + np.eye(3), + None), + # Test Case 5: carex #6 + (mat6['A'], mat6['B'], mat6['Q'], mat6['R'], None), + # Test Case 6: carex #7 + (np.array([[1, 0], [0, -2.]]), + np.array([[1e-6], [0]]), + np.ones((2, 2)), + 1., + 'Bad residual accuracy'), + # Test Case 7: carex #8 + (block_diag(-0.1, -0.02), + np.array([[0.100, 0.000], [0.001, 0.010]]), + np.array([[100, 1000], [1000, 10000]]), + np.ones((2, 2)) + block_diag(1e-6, 0), + None), + # Test Case 8: carex #9 + (np.array([[0, 1e6], [0, 0]]), + np.array([[0], [1.]]), + np.eye(2), + 1., + None), + # Test Case 9: carex #10 + (np.array([[1.0000001, 1], [1., 1.0000001]]), + np.eye(2), + np.eye(2), + np.eye(2), + None), + # Test Case 10: carex #11 + (np.array([[3, 1.], [4, 2]]), + np.array([[1], [1]]), + np.array([[-11, -5], [-5, -2.]]), + 1., + None), + # Test Case 11: carex #12 + (np.array([[7000000., 2000000., -0.], + [2000000., 6000000., -2000000.], + [0., -2000000., 5000000.]]) / 3, + np.eye(3), + np.array([[1., -2., -2.], [-2., 1., -2.], [-2., -2., 1.]]).dot( + np.diag([1e-6, 1, 1e6])).dot( + np.array([[1., -2., -2.], [-2., 1., -2.], [-2., -2., 1.]])) / 9, + np.eye(3) * 1e6, + 'Bad Residual Accuracy'), + # Test Case 12: carex #13 + (np.array([[0, 0.4, 0, 0], + [0, 0, 0.345, 0], + [0, -0.524e6, -0.465e6, 0.262e6], + [0, 0, 0, -1e6]]), + np.array([[0, 0, 0, 1e6]]).T, + np.diag([1, 0, 1, 0]), + 1., + None), + # Test Case 13: carex #14 + (np.array([[-1e-6, 1, 0, 0], + [-1, -1e-6, 0, 0], + [0, 0, 1e-6, 1], + [0, 0, -1, 1e-6]]), + np.ones((4, 1)), + np.ones((4, 4)), + 1., + None), + # Test Case 14: carex #15 + (mat15['A'], mat15['B'], mat15['Q'], mat15['R'], None), + # Test Case 15: carex #16 + (np.eye(64, 64, k=-1) + np.eye(64, 64)*(-2.) + np.rot90( + block_diag(1, np.zeros((62, 62)), 1)) + np.eye(64, 64, k=1), + np.eye(64), + np.eye(64), + np.eye(64), + None), + # Test Case 16: carex #17 + (np.diag(np.ones((20, )), 1), + np.flipud(np.eye(21, 1)), + np.eye(21, 1) * np.eye(21, 1).T, + 1, + 'Bad Residual Accuracy'), + # Test Case 17: carex #18 + (mat18['A'], mat18['B'], mat18['Q'], mat18['R'], None), + # Test Case 18: carex #19 + (mat19['A'], mat19['B'], mat19['Q'], mat19['R'], + 'Bad Residual Accuracy'), + # Test Case 19: carex #20 + (mat20['A'], mat20['B'], mat20['Q'], mat20['R'], + 'Bad Residual Accuracy') + ] + # Makes the minimum precision requirements customized to the test. + # Here numbers represent the number of decimals that agrees with zero + # matrix when the solution x is plugged in to the equation. + # + # res = array([[8e-3,1e-16],[1e-16,1e-20]]) --> min_decimal[k] = 2 + # + # If the test is failing use "None" for that entry. + # + min_decimal = (14, 12, 13, 14, 11, 6, None, 5, 7, 14, 14, + None, 9, 14, 13, 14, None, 12, None, None) + + @pytest.mark.parametrize("j, case", enumerate(cases)) + def test_solve_continuous_are(self, j, case): + """Checks if 0 = XA + A'X - XB(R)^{-1} B'X + Q is true""" + a, b, q, r, knownfailure = case + if knownfailure: + pytest.xfail(reason=knownfailure) + + dec = self.min_decimal[j] + x = solve_continuous_are(a, b, q, r) + res = x @ a + a.conj().T @ x + q + out_fact = x @ b + res -= out_fact @ solve(np.atleast_2d(r), out_fact.conj().T) + assert_array_almost_equal(res, np.zeros_like(res), decimal=dec) + + +class TestSolveDiscreteAre: + cases = [ + # Darex examples taken from (with default parameters): + # [1] P.BENNER, A.J. LAUB, V. MEHRMANN: 'A Collection of Benchmark + # Examples for the Numerical Solution of Algebraic Riccati + # Equations II: Discrete-Time Case', Tech. Report SPC 95_23, + # Fak. f. Mathematik, TU Chemnitz-Zwickau (Germany), 1995. + # [2] T. GUDMUNDSSON, C. KENNEY, A.J. LAUB: 'Scaling of the + # Discrete-Time Algebraic Riccati Equation to Enhance Stability + # of the Schur Solution Method', IEEE Trans.Aut.Cont., vol.37(4) + # + # The format of the data is (a, b, q, r, knownfailure), where + # knownfailure is None if the test passes or a string + # indicating the reason for failure. + # + # TEST CASE 0 : Complex a; real b, q, r + (np.array([[2, 1-2j], [0, -3j]]), + np.array([[0], [1]]), + np.array([[1, 0], [0, 2]]), + np.array([[1]]), + None), + # TEST CASE 1 :Real a, q, r; complex b + (np.array([[2, 1], [0, -1]]), + np.array([[-2j], [1j]]), + np.array([[1, 0], [0, 2]]), + np.array([[1]]), + None), + # TEST CASE 2 : Real a, b; complex q, r + (np.array([[3, 1], [0, -1]]), + np.array([[1, 2], [1, 3]]), + np.array([[1, 1+1j], [1-1j, 2]]), + np.array([[2, -2j], [2j, 3]]), + None), + # TEST CASE 3 : User-reported gh-2251 (Trac #1732) + (np.array([[0.63399379, 0.54906824, 0.76253406], + [0.5404729, 0.53745766, 0.08731853], + [0.27524045, 0.84922129, 0.4681622]]), + np.array([[0.96861695], [0.05532739], [0.78934047]]), + np.eye(3), + np.eye(1), + None), + # TEST CASE 4 : darex #1 + (np.array([[4, 3], [-4.5, -3.5]]), + np.array([[1], [-1]]), + np.array([[9, 6], [6, 4]]), + np.array([[1]]), + None), + # TEST CASE 5 : darex #2 + (np.array([[0.9512, 0], [0, 0.9048]]), + np.array([[4.877, 4.877], [-1.1895, 3.569]]), + np.array([[0.005, 0], [0, 0.02]]), + np.array([[1/3, 0], [0, 3]]), + None), + # TEST CASE 6 : darex #3 + (np.array([[2, -1], [1, 0]]), + np.array([[1], [0]]), + np.array([[0, 0], [0, 1]]), + np.array([[0]]), + None), + # TEST CASE 7 : darex #4 (skipped the gen. Ric. term S) + (np.array([[0, 1], [0, -1]]), + np.array([[1, 0], [2, 1]]), + np.array([[-4, -4], [-4, 7]]) * (1/11), + np.array([[9, 3], [3, 1]]), + None), + # TEST CASE 8 : darex #5 + (np.array([[0, 1], [0, 0]]), + np.array([[0], [1]]), + np.array([[1, 2], [2, 4]]), + np.array([[1]]), + None), + # TEST CASE 9 : darex #6 + (np.array([[0.998, 0.067, 0, 0], + [-.067, 0.998, 0, 0], + [0, 0, 0.998, 0.153], + [0, 0, -.153, 0.998]]), + np.array([[0.0033, 0.0200], + [0.1000, -.0007], + [0.0400, 0.0073], + [-.0028, 0.1000]]), + np.array([[1.87, 0, 0, -0.244], + [0, 0.744, 0.205, 0], + [0, 0.205, 0.589, 0], + [-0.244, 0, 0, 1.048]]), + np.eye(2), + None), + # TEST CASE 10 : darex #7 + (np.array([[0.984750, -.079903, 0.0009054, -.0010765], + [0.041588, 0.998990, -.0358550, 0.0126840], + [-.546620, 0.044916, -.3299100, 0.1931800], + [2.662400, -.100450, -.9245500, -.2632500]]), + np.array([[0.0037112, 0.0007361], + [-.0870510, 9.3411e-6], + [-1.198440, -4.1378e-4], + [-3.192700, 9.2535e-4]]), + np.eye(4)*1e-2, + np.eye(2), + None), + # TEST CASE 11 : darex #8 + (np.array([[-0.6000000, -2.2000000, -3.6000000, -5.4000180], + [1.0000000, 0.6000000, 0.8000000, 3.3999820], + [0.0000000, 1.0000000, 1.8000000, 3.7999820], + [0.0000000, 0.0000000, 0.0000000, -0.9999820]]), + np.array([[1.0, -1.0, -1.0, -1.0], + [0.0, 1.0, -1.0, -1.0], + [0.0, 0.0, 1.0, -1.0], + [0.0, 0.0, 0.0, 1.0]]), + np.array([[2, 1, 3, 6], + [1, 2, 2, 5], + [3, 2, 6, 11], + [6, 5, 11, 22]]), + np.eye(4), + None), + # TEST CASE 12 : darex #9 + (np.array([[95.4070, 1.9643, 0.3597, 0.0673, 0.0190], + [40.8490, 41.3170, 16.0840, 4.4679, 1.1971], + [12.2170, 26.3260, 36.1490, 15.9300, 12.3830], + [4.1118, 12.8580, 27.2090, 21.4420, 40.9760], + [0.1305, 0.5808, 1.8750, 3.6162, 94.2800]]) * 0.01, + np.array([[0.0434, -0.0122], + [2.6606, -1.0453], + [3.7530, -5.5100], + [3.6076, -6.6000], + [0.4617, -0.9148]]) * 0.01, + np.eye(5), + np.eye(2), + None), + # TEST CASE 13 : darex #10 + (np.kron(np.eye(2), np.diag([1, 1], k=1)), + np.kron(np.eye(2), np.array([[0], [0], [1]])), + np.array([[1, 1, 0, 0, 0, 0], + [1, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 1, -1, 0], + [0, 0, 0, -1, 1, 0], + [0, 0, 0, 0, 0, 0]]), + np.array([[3, 0], [0, 1]]), + None), + # TEST CASE 14 : darex #11 + (0.001 * np.array( + [[870.1, 135.0, 11.59, .5014, -37.22, .3484, 0, 4.242, 7.249], + [76.55, 897.4, 12.72, 0.5504, -40.16, .3743, 0, 4.53, 7.499], + [-127.2, 357.5, 817, 1.455, -102.8, .987, 0, 11.85, 18.72], + [-363.5, 633.9, 74.91, 796.6, -273.5, 2.653, 0, 31.72, 48.82], + [-960, 1645.9, -128.9, -5.597, 71.42, 7.108, 0, 84.52, 125.9], + [-664.4, 112.96, -88.89, -3.854, 84.47, 13.6, 0, 144.3, 101.6], + [-410.2, 693, -54.71, -2.371, 66.49, 12.49, .1063, 99.97, 69.67], + [-179.9, 301.7, -23.93, -1.035, 60.59, 22.16, 0, 213.9, 35.54], + [-345.1, 580.4, -45.96, -1.989, 105.6, 19.86, 0, 219.1, 215.2]]), + np.array([[4.7600, -0.5701, -83.6800], + [0.8790, -4.7730, -2.7300], + [1.4820, -13.1200, 8.8760], + [3.8920, -35.1300, 24.8000], + [10.3400, -92.7500, 66.8000], + [7.2030, -61.5900, 38.3400], + [4.4540, -36.8300, 20.2900], + [1.9710, -15.5400, 6.9370], + [3.7730, -30.2800, 14.6900]]) * 0.001, + np.diag([50, 0, 0, 0, 50, 0, 0, 0, 0]), + np.eye(3), + None), + # TEST CASE 15 : darex #12 - numerically least accurate example + (np.array([[0, 1e6], [0, 0]]), + np.array([[0], [1]]), + np.eye(2), + np.array([[1]]), + "Presumed issue with OpenBLAS, see gh-16926"), + # TEST CASE 16 : darex #13 + (np.array([[16, 10, -2], + [10, 13, -8], + [-2, -8, 7]]) * (1/9), + np.eye(3), + 1e6 * np.eye(3), + 1e6 * np.eye(3), + "Issue with OpenBLAS, see gh-16926"), + # TEST CASE 17 : darex #14 + (np.array([[1 - 1/1e8, 0, 0, 0], + [1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 1, 0]]), + np.array([[1e-08], [0], [0], [0]]), + np.diag([0, 0, 0, 1]), + np.array([[0.25]]), + None), + # TEST CASE 18 : darex #15 + (np.eye(100, k=1), + np.flipud(np.eye(100, 1)), + np.eye(100), + np.array([[1]]), + None) + ] + + # Makes the minimum precision requirements customized to the test. + # Here numbers represent the number of decimals that agrees with zero + # matrix when the solution x is plugged in to the equation. + # + # res = array([[8e-3,1e-16],[1e-16,1e-20]]) --> min_decimal[k] = 2 + # + # If the test is failing use "None" for that entry. + # + min_decimal = (12, 14, 13, 14, 13, 16, 18, 14, 14, 13, + 14, 13, 13, 14, 12, 2, 5, 6, 10) + max_tol = [1.5 * 10**-ind for ind in min_decimal] + # relaxed tolerance in gh-18012 after bump to OpenBLAS + max_tol[11] = 2.5e-13 + + @pytest.mark.parametrize("j, case", enumerate(cases)) + def test_solve_discrete_are(self, j, case): + """Checks if X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q) is true""" + a, b, q, r, knownfailure = case + if knownfailure: + pytest.xfail(reason=knownfailure) + + atol = self.max_tol[j] + + x = solve_discrete_are(a, b, q, r) + bH = b.conj().T + xa, xb = x @ a, x @ b + + res = a.conj().T @ xa - x + q + res -= a.conj().T @ xb @ (solve(r + bH @ xb, bH) @ xa) + + # changed from + # assert_array_almost_equal(res, np.zeros_like(res), decimal=dec) + # in gh-18012 as it's easier to relax a tolerance and allclose is + # preferred + assert_allclose(res, np.zeros_like(res), atol=atol) + + def test_infeasible(self): + # An infeasible example taken from https://arxiv.org/abs/1505.04861v1 + A = np.triu(np.ones((3, 3))) + A[0, 1] = -1 + B = np.array([[1, 1, 0], [0, 0, 1]]).T + Q = np.full_like(A, -2) + np.diag([8, -1, -1.9]) + R = np.diag([-10, 0.1]) + assert_raises(LinAlgError, solve_continuous_are, A, B, Q, R) + + +def test_solve_generalized_continuous_are(): + cases = [ + # Two random examples differ by s term + # in the absence of any literature for demanding examples. + (np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01], + [4.617139e-02, 6.948286e-01, 3.444608e-02], + [9.713178e-02, 3.170995e-01, 4.387444e-01]]), + np.array([[3.815585e-01, 1.868726e-01], + [7.655168e-01, 4.897644e-01], + [7.951999e-01, 4.455862e-01]]), + np.eye(3), + np.eye(2), + np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01], + [7.093648e-01, 6.797027e-01, 1.189977e-01], + [7.546867e-01, 6.550980e-01, 4.983641e-01]]), + np.zeros((3, 2)), + None), + (np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01], + [4.617139e-02, 6.948286e-01, 3.444608e-02], + [9.713178e-02, 3.170995e-01, 4.387444e-01]]), + np.array([[3.815585e-01, 1.868726e-01], + [7.655168e-01, 4.897644e-01], + [7.951999e-01, 4.455862e-01]]), + np.eye(3), + np.eye(2), + np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01], + [7.093648e-01, 6.797027e-01, 1.189977e-01], + [7.546867e-01, 6.550980e-01, 4.983641e-01]]), + np.ones((3, 2)), + None) + ] + + min_decimal = (10, 10) + + def _test_factory(case, dec): + """Checks if X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q) is true""" + a, b, q, r, e, s, knownfailure = case + if knownfailure: + pytest.xfail(reason=knownfailure) + + x = solve_continuous_are(a, b, q, r, e, s) + res = a.conj().T.dot(x.dot(e)) + e.conj().T.dot(x.dot(a)) + q + out_fact = e.conj().T.dot(x).dot(b) + s + res -= out_fact.dot(solve(np.atleast_2d(r), out_fact.conj().T)) + assert_array_almost_equal(res, np.zeros_like(res), decimal=dec) + + for ind, case in enumerate(cases): + _test_factory(case, min_decimal[ind]) + + +def test_solve_generalized_discrete_are(): + mat20170120 = _load_data('gendare_20170120_data.npz') + + cases = [ + # Two random examples differ by s term + # in the absence of any literature for demanding examples. + (np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01], + [4.617139e-02, 6.948286e-01, 3.444608e-02], + [9.713178e-02, 3.170995e-01, 4.387444e-01]]), + np.array([[3.815585e-01, 1.868726e-01], + [7.655168e-01, 4.897644e-01], + [7.951999e-01, 4.455862e-01]]), + np.eye(3), + np.eye(2), + np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01], + [7.093648e-01, 6.797027e-01, 1.189977e-01], + [7.546867e-01, 6.550980e-01, 4.983641e-01]]), + np.zeros((3, 2)), + None), + (np.array([[2.769230e-01, 8.234578e-01, 9.502220e-01], + [4.617139e-02, 6.948286e-01, 3.444608e-02], + [9.713178e-02, 3.170995e-01, 4.387444e-01]]), + np.array([[3.815585e-01, 1.868726e-01], + [7.655168e-01, 4.897644e-01], + [7.951999e-01, 4.455862e-01]]), + np.eye(3), + np.eye(2), + np.array([[6.463130e-01, 2.760251e-01, 1.626117e-01], + [7.093648e-01, 6.797027e-01, 1.189977e-01], + [7.546867e-01, 6.550980e-01, 4.983641e-01]]), + np.ones((3, 2)), + "Presumed issue with OpenBLAS, see gh-16926"), + # user-reported (under PR-6616) 20-Jan-2017 + # tests against the case where E is None but S is provided + (mat20170120['A'], + mat20170120['B'], + mat20170120['Q'], + mat20170120['R'], + None, + mat20170120['S'], + None), + ] + + max_atol = (1.5e-11, 1.5e-11, 3.5e-16) + + def _test_factory(case, atol): + """Checks if X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q) is true""" + a, b, q, r, e, s, knownfailure = case + if knownfailure: + pytest.xfail(reason=knownfailure) + + x = solve_discrete_are(a, b, q, r, e, s) + if e is None: + e = np.eye(a.shape[0]) + if s is None: + s = np.zeros_like(b) + res = a.conj().T.dot(x.dot(a)) - e.conj().T.dot(x.dot(e)) + q + res -= (a.conj().T.dot(x.dot(b)) + s).dot( + solve(r+b.conj().T.dot(x.dot(b)), + (b.conj().T.dot(x.dot(a)) + s.conj().T) + ) + ) + # changed from: + # assert_array_almost_equal(res, np.zeros_like(res), decimal=dec) + # in gh-17950 because of a Linux 32 bit fail. + assert_allclose(res, np.zeros_like(res), atol=atol) + + for ind, case in enumerate(cases): + _test_factory(case, max_atol[ind]) + + +def test_are_validate_args(): + + def test_square_shape(): + nsq = np.ones((3, 2)) + sq = np.eye(3) + for x in (solve_continuous_are, solve_discrete_are): + assert_raises(ValueError, x, nsq, 1, 1, 1) + assert_raises(ValueError, x, sq, sq, nsq, 1) + assert_raises(ValueError, x, sq, sq, sq, nsq) + assert_raises(ValueError, x, sq, sq, sq, sq, nsq) + + def test_compatible_sizes(): + nsq = np.ones((3, 2)) + sq = np.eye(4) + for x in (solve_continuous_are, solve_discrete_are): + assert_raises(ValueError, x, sq, nsq, 1, 1) + assert_raises(ValueError, x, sq, sq, sq, sq, sq, nsq) + assert_raises(ValueError, x, sq, sq, np.eye(3), sq) + assert_raises(ValueError, x, sq, sq, sq, np.eye(3)) + assert_raises(ValueError, x, sq, sq, sq, sq, np.eye(3)) + + def test_symmetry(): + nsym = np.arange(9).reshape(3, 3) + sym = np.eye(3) + for x in (solve_continuous_are, solve_discrete_are): + assert_raises(ValueError, x, sym, sym, nsym, sym) + assert_raises(ValueError, x, sym, sym, sym, nsym) + + def test_singularity(): + sing = np.full((3, 3), 1e12) + sing[2, 2] -= 1 + sq = np.eye(3) + for x in (solve_continuous_are, solve_discrete_are): + assert_raises(ValueError, x, sq, sq, sq, sq, sing) + + assert_raises(ValueError, solve_continuous_are, sq, sq, sq, sing) + + def test_finiteness(): + nm = np.full((2, 2), np.nan) + sq = np.eye(2) + for x in (solve_continuous_are, solve_discrete_are): + assert_raises(ValueError, x, nm, sq, sq, sq) + assert_raises(ValueError, x, sq, nm, sq, sq) + assert_raises(ValueError, x, sq, sq, nm, sq) + assert_raises(ValueError, x, sq, sq, sq, nm) + assert_raises(ValueError, x, sq, sq, sq, sq, nm) + assert_raises(ValueError, x, sq, sq, sq, sq, sq, nm) + + +class TestSolveSylvester: + + cases = [ + # a, b, c all real. + (np.array([[1, 2], [0, 4]]), + np.array([[5, 6], [0, 8]]), + np.array([[9, 10], [11, 12]])), + # a, b, c all real, 4x4. a and b have non-trival 2x2 blocks in their + # quasi-triangular form. + (np.array([[1.0, 0, 0, 0], + [0, 1.0, 2.0, 0.0], + [0, 0, 3.0, -4], + [0, 0, 2, 5]]), + np.array([[2.0, 0, 0, 1.0], + [0, 1.0, 0.0, 0.0], + [0, 0, 1.0, -1], + [0, 0, 1, 1]]), + np.array([[1.0, 0, 0, 0], + [0, 1.0, 0, 0], + [0, 0, 1.0, 0], + [0, 0, 0, 1.0]])), + # a, b, c all complex. + (np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]), + np.array([[-1.0, 2j], [3.0, 4.0]]), + np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])), + # a and b real; c complex. + (np.array([[1.0, 2.0], [3.0, 5.0]]), + np.array([[-1.0, 0], [3.0, 4.0]]), + np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])), + # a and c complex; b real. + (np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]), + np.array([[-1.0, 0], [3.0, 4.0]]), + np.array([[2.0-2j, 2.0+2j], [-1.0-1j, 2.0]])), + # a complex; b and c real. + (np.array([[1.0+1j, 2.0], [3.0-4.0j, 5.0]]), + np.array([[-1.0, 0], [3.0, 4.0]]), + np.array([[2.0, 2.0], [-1.0, 2.0]])), + # not square matrices, real + (np.array([[8, 1, 6], [3, 5, 7], [4, 9, 2]]), + np.array([[2, 3], [4, 5]]), + np.array([[1, 2], [3, 4], [5, 6]])), + # not square matrices, complex + (np.array([[8, 1j, 6+2j], [3, 5, 7], [4, 9, 2]]), + np.array([[2, 3], [4, 5-1j]]), + np.array([[1, 2j], [3, 4j], [5j, 6+7j]])), + ] + + def check_case(self, a, b, c): + x = solve_sylvester(a, b, c) + assert_array_almost_equal(np.dot(a, x) + np.dot(x, b), c) + + def test_cases(self): + for case in self.cases: + self.check_case(case[0], case[1], case[2]) + + def test_trivial(self): + a = np.array([[1.0, 0.0], [0.0, 1.0]]) + b = np.array([[1.0]]) + c = np.array([2.0, 2.0]).reshape(-1, 1) + x = solve_sylvester(a, b, c) + assert_array_almost_equal(x, np.array([1.0, 1.0]).reshape(-1, 1)) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_special_matrices.py b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_special_matrices.py new file mode 100644 index 0000000000000000000000000000000000000000..3edc8c176717fffe55268a12b2bdd6e609feb556 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/linalg/tests/test_special_matrices.py @@ -0,0 +1,597 @@ +import pytest +import numpy as np +from numpy import arange, array, eye, copy, sqrt +from numpy.testing import (assert_equal, assert_array_equal, + assert_array_almost_equal, assert_allclose) +from pytest import raises as assert_raises + +from scipy.fft import fft +from scipy.special import comb +from scipy.linalg import (toeplitz, hankel, circulant, hadamard, leslie, dft, + companion, kron, block_diag, + helmert, hilbert, invhilbert, pascal, invpascal, + fiedler, fiedler_companion, eigvals, + convolution_matrix) +from numpy.linalg import cond + + +class TestToeplitz: + + def test_basic(self): + y = toeplitz([1, 2, 3]) + assert_array_equal(y, [[1, 2, 3], [2, 1, 2], [3, 2, 1]]) + y = toeplitz([1, 2, 3], [1, 4, 5]) + assert_array_equal(y, [[1, 4, 5], [2, 1, 4], [3, 2, 1]]) + + def test_complex_01(self): + data = (1.0 + arange(3.0)) * (1.0 + 1.0j) + x = copy(data) + t = toeplitz(x) + # Calling toeplitz should not change x. + assert_array_equal(x, data) + # According to the docstring, x should be the first column of t. + col0 = t[:, 0] + assert_array_equal(col0, data) + assert_array_equal(t[0, 1:], data[1:].conj()) + + def test_scalar_00(self): + """Scalar arguments still produce a 2D array.""" + t = toeplitz(10) + assert_array_equal(t, [[10]]) + t = toeplitz(10, 20) + assert_array_equal(t, [[10]]) + + def test_scalar_01(self): + c = array([1, 2, 3]) + t = toeplitz(c, 1) + assert_array_equal(t, [[1], [2], [3]]) + + def test_scalar_02(self): + c = array([1, 2, 3]) + t = toeplitz(c, array(1)) + assert_array_equal(t, [[1], [2], [3]]) + + def test_scalar_03(self): + c = array([1, 2, 3]) + t = toeplitz(c, array([1])) + assert_array_equal(t, [[1], [2], [3]]) + + def test_scalar_04(self): + r = array([10, 2, 3]) + t = toeplitz(1, r) + assert_array_equal(t, [[1, 2, 3]]) + + +class TestHankel: + def test_basic(self): + y = hankel([1, 2, 3]) + assert_array_equal(y, [[1, 2, 3], [2, 3, 0], [3, 0, 0]]) + y = hankel([1, 2, 3], [3, 4, 5]) + assert_array_equal(y, [[1, 2, 3], [2, 3, 4], [3, 4, 5]]) + + +class TestCirculant: + def test_basic(self): + y = circulant([1, 2, 3]) + assert_array_equal(y, [[1, 3, 2], [2, 1, 3], [3, 2, 1]]) + + +class TestHadamard: + + def test_basic(self): + + y = hadamard(1) + assert_array_equal(y, [[1]]) + + y = hadamard(2, dtype=float) + assert_array_equal(y, [[1.0, 1.0], [1.0, -1.0]]) + + y = hadamard(4) + assert_array_equal(y, [[1, 1, 1, 1], + [1, -1, 1, -1], + [1, 1, -1, -1], + [1, -1, -1, 1]]) + + assert_raises(ValueError, hadamard, 0) + assert_raises(ValueError, hadamard, 5) + + +class TestLeslie: + + def test_bad_shapes(self): + assert_raises(ValueError, leslie, [[1, 1], [2, 2]], [3, 4, 5]) + assert_raises(ValueError, leslie, [3, 4, 5], [[1, 1], [2, 2]]) + assert_raises(ValueError, leslie, [1, 2], [1, 2]) + assert_raises(ValueError, leslie, [1], []) + + def test_basic(self): + a = leslie([1, 2, 3], [0.25, 0.5]) + expected = array([[1.0, 2.0, 3.0], + [0.25, 0.0, 0.0], + [0.0, 0.5, 0.0]]) + assert_array_equal(a, expected) + + +class TestCompanion: + + def test_bad_shapes(self): + assert_raises(ValueError, companion, [[1, 1], [2, 2]]) + assert_raises(ValueError, companion, [0, 4, 5]) + assert_raises(ValueError, companion, [1]) + assert_raises(ValueError, companion, []) + + def test_basic(self): + c = companion([1, 2, 3]) + expected = array([ + [-2.0, -3.0], + [1.0, 0.0]]) + assert_array_equal(c, expected) + + c = companion([2.0, 5.0, -10.0]) + expected = array([ + [-2.5, 5.0], + [1.0, 0.0]]) + assert_array_equal(c, expected) + + +class TestBlockDiag: + def test_basic(self): + x = block_diag(eye(2), [[1, 2], [3, 4], [5, 6]], [[1, 2, 3]]) + assert_array_equal(x, [[1, 0, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0, 0], + [0, 0, 1, 2, 0, 0, 0], + [0, 0, 3, 4, 0, 0, 0], + [0, 0, 5, 6, 0, 0, 0], + [0, 0, 0, 0, 1, 2, 3]]) + + def test_dtype(self): + x = block_diag([[1.5]]) + assert_equal(x.dtype, float) + + x = block_diag([[True]]) + assert_equal(x.dtype, bool) + + def test_mixed_dtypes(self): + actual = block_diag([[1]], [[1j]]) + desired = np.array([[1, 0], [0, 1j]]) + assert_array_equal(actual, desired) + + def test_scalar_and_1d_args(self): + a = block_diag(1) + assert_equal(a.shape, (1, 1)) + assert_array_equal(a, [[1]]) + + a = block_diag([2, 3], 4) + assert_array_equal(a, [[2, 3, 0], [0, 0, 4]]) + + def test_bad_arg(self): + assert_raises(ValueError, block_diag, [[[1]]]) + + def test_no_args(self): + a = block_diag() + assert_equal(a.ndim, 2) + assert_equal(a.nbytes, 0) + + def test_empty_matrix_arg(self): + # regression test for gh-4596: check the shape of the result + # for empty matrix inputs. Empty matrices are no longer ignored + # (gh-4908) it is viewed as a shape (1, 0) matrix. + a = block_diag([[1, 0], [0, 1]], + [], + [[2, 3], [4, 5], [6, 7]]) + assert_array_equal(a, [[1, 0, 0, 0], + [0, 1, 0, 0], + [0, 0, 0, 0], + [0, 0, 2, 3], + [0, 0, 4, 5], + [0, 0, 6, 7]]) + + def test_zerosized_matrix_arg(self): + # test for gh-4908: check the shape of the result for + # zero-sized matrix inputs, i.e. matrices with shape (0,n) or (n,0). + # note that [[]] takes shape (1,0) + a = block_diag([[1, 0], [0, 1]], + [[]], + [[2, 3], [4, 5], [6, 7]], + np.zeros([0, 2], dtype='int32')) + assert_array_equal(a, [[1, 0, 0, 0, 0, 0], + [0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 2, 3, 0, 0], + [0, 0, 4, 5, 0, 0], + [0, 0, 6, 7, 0, 0]]) + + +class TestKron: + + def test_basic(self): + + a = kron(array([[1, 2], [3, 4]]), array([[1, 1, 1]])) + assert_array_equal(a, array([[1, 1, 1, 2, 2, 2], + [3, 3, 3, 4, 4, 4]])) + + m1 = array([[1, 2], [3, 4]]) + m2 = array([[10], [11]]) + a = kron(m1, m2) + expected = array([[10, 20], + [11, 22], + [30, 40], + [33, 44]]) + assert_array_equal(a, expected) + + +class TestHelmert: + + def test_orthogonality(self): + for n in range(1, 7): + H = helmert(n, full=True) + Id = np.eye(n) + assert_allclose(H.dot(H.T), Id, atol=1e-12) + assert_allclose(H.T.dot(H), Id, atol=1e-12) + + def test_subspace(self): + for n in range(2, 7): + H_full = helmert(n, full=True) + H_partial = helmert(n) + for U in H_full[1:, :].T, H_partial.T: + C = np.eye(n) - np.full((n, n), 1 / n) + assert_allclose(U.dot(U.T), C) + assert_allclose(U.T.dot(U), np.eye(n-1), atol=1e-12) + + +class TestHilbert: + + def test_basic(self): + h3 = array([[1.0, 1/2., 1/3.], + [1/2., 1/3., 1/4.], + [1/3., 1/4., 1/5.]]) + assert_array_almost_equal(hilbert(3), h3) + + assert_array_equal(hilbert(1), [[1.0]]) + + h0 = hilbert(0) + assert_equal(h0.shape, (0, 0)) + + +class TestInvHilbert: + + def test_basic(self): + invh1 = array([[1]]) + assert_array_equal(invhilbert(1, exact=True), invh1) + assert_array_equal(invhilbert(1), invh1) + + invh2 = array([[4, -6], + [-6, 12]]) + assert_array_equal(invhilbert(2, exact=True), invh2) + assert_array_almost_equal(invhilbert(2), invh2) + + invh3 = array([[9, -36, 30], + [-36, 192, -180], + [30, -180, 180]]) + assert_array_equal(invhilbert(3, exact=True), invh3) + assert_array_almost_equal(invhilbert(3), invh3) + + invh4 = array([[16, -120, 240, -140], + [-120, 1200, -2700, 1680], + [240, -2700, 6480, -4200], + [-140, 1680, -4200, 2800]]) + assert_array_equal(invhilbert(4, exact=True), invh4) + assert_array_almost_equal(invhilbert(4), invh4) + + invh5 = array([[25, -300, 1050, -1400, 630], + [-300, 4800, -18900, 26880, -12600], + [1050, -18900, 79380, -117600, 56700], + [-1400, 26880, -117600, 179200, -88200], + [630, -12600, 56700, -88200, 44100]]) + assert_array_equal(invhilbert(5, exact=True), invh5) + assert_array_almost_equal(invhilbert(5), invh5) + + invh17 = array([ + [289, -41616, 1976760, -46124400, 629598060, -5540462928, + 33374693352, -143034400080, 446982500250, -1033026222800, + 1774926873720, -2258997839280, 2099709530100, -1384423866000, + 613101997800, -163493866080, 19835652870], + [-41616, 7990272, -426980160, 10627061760, -151103534400, + 1367702848512, -8410422724704, 36616806420480, -115857864064800, + 270465047424000, -468580694662080, 600545887119360, + -561522320049600, 372133135180800, -165537539406000, + 44316454993920, -5395297580640], + [1976760, -426980160, 24337869120, -630981792000, 9228108708000, + -85267724461920, 532660105897920, -2348052711713280, + 7504429831470000, -17664748409880000, 30818191841236800, + -39732544853164800, 37341234283298400, -24857330514030000, + 11100752642520000, -2982128117299200, 364182586693200], + [-46124400, 10627061760, -630981792000, 16826181120000, + -251209625940000, 2358021022156800, -14914482965141760, + 66409571644416000, -214015221119700000, 507295338950400000, + -890303319857952000, 1153715376477081600, -1089119333262870000, + 727848632044800000, -326170262829600000, 87894302404608000, + -10763618673376800], + [629598060, -151103534400, 9228108708000, + -251209625940000, 3810012660090000, -36210360321495360, + 231343968720664800, -1038687206500944000, 3370739732635275000, + -8037460526495400000, 14178080368737885600, -18454939322943942000, + 17489975175339030000, -11728977435138600000, 5272370630081100000, + -1424711708039692800, 174908803442373000], + [-5540462928, 1367702848512, -85267724461920, 2358021022156800, + -36210360321495360, 347619459086355456, -2239409617216035264, + 10124803292907663360, -33052510749726468000, + 79217210949138662400, -140362995650505067440, + 183420385176741672960, -174433352415381259200, + 117339159519533952000, -52892422160973595200, + 14328529177999196160, -1763080738699119840], + [33374693352, -8410422724704, 532660105897920, + -14914482965141760, 231343968720664800, -2239409617216035264, + 14527452132196331328, -66072377044391477760, + 216799987176909536400, -521925895055522958000, + 928414062734059661760, -1217424500995626443520, + 1161358898976091015200, -783401860847777371200, + 354015418167362952000, -96120549902411274240, + 11851820521255194480], + [-143034400080, 36616806420480, -2348052711713280, + 66409571644416000, -1038687206500944000, 10124803292907663360, + -66072377044391477760, 302045152202932469760, + -995510145200094810000, 2405996923185123840000, + -4294704507885446054400, 5649058909023744614400, + -5403874060541811254400, 3654352703663101440000, + -1655137020003255360000, 450325202737117593600, + -55630994283442749600], + [446982500250, -115857864064800, 7504429831470000, + -214015221119700000, 3370739732635275000, -33052510749726468000, + 216799987176909536400, -995510145200094810000, + 3293967392206196062500, -7988661659013106500000, + 14303908928401362270000, -18866974090684772052000, + 18093328327706957325000, -12263364009096700500000, + 5565847995255512250000, -1517208935002984080000, + 187754605706619279900], + [-1033026222800, 270465047424000, -17664748409880000, + 507295338950400000, -8037460526495400000, 79217210949138662400, + -521925895055522958000, 2405996923185123840000, + -7988661659013106500000, 19434404971634224000000, + -34894474126569249192000, 46141453390504792320000, + -44349976506971935800000, 30121928988527376000000, + -13697025107665828500000, 3740200989399948902400, + -463591619028689580000], + [1774926873720, -468580694662080, + 30818191841236800, -890303319857952000, 14178080368737885600, + -140362995650505067440, 928414062734059661760, + -4294704507885446054400, 14303908928401362270000, + -34894474126569249192000, 62810053427824648545600, + -83243376594051600326400, 80177044485212743068000, + -54558343880470209780000, 24851882355348879230400, + -6797096028813368678400, 843736746632215035600], + [-2258997839280, 600545887119360, -39732544853164800, + 1153715376477081600, -18454939322943942000, 183420385176741672960, + -1217424500995626443520, 5649058909023744614400, + -18866974090684772052000, 46141453390504792320000, + -83243376594051600326400, 110552468520163390156800, + -106681852579497947388000, 72720410752415168870400, + -33177973900974346080000, 9087761081682520473600, + -1129631016152221783200], + [2099709530100, -561522320049600, 37341234283298400, + -1089119333262870000, 17489975175339030000, + -174433352415381259200, 1161358898976091015200, + -5403874060541811254400, 18093328327706957325000, + -44349976506971935800000, 80177044485212743068000, + -106681852579497947388000, 103125790826848015808400, + -70409051543137015800000, 32171029219823375700000, + -8824053728865840192000, 1098252376814660067000], + [-1384423866000, 372133135180800, + -24857330514030000, 727848632044800000, -11728977435138600000, + 117339159519533952000, -783401860847777371200, + 3654352703663101440000, -12263364009096700500000, + 30121928988527376000000, -54558343880470209780000, + 72720410752415168870400, -70409051543137015800000, + 48142941226076592000000, -22027500987368499000000, + 6049545098753157120000, -753830033789944188000], + [613101997800, -165537539406000, + 11100752642520000, -326170262829600000, 5272370630081100000, + -52892422160973595200, 354015418167362952000, + -1655137020003255360000, 5565847995255512250000, + -13697025107665828500000, 24851882355348879230400, + -33177973900974346080000, 32171029219823375700000, + -22027500987368499000000, 10091416708498869000000, + -2774765838662800128000, 346146444087219270000], + [-163493866080, 44316454993920, -2982128117299200, + 87894302404608000, -1424711708039692800, + 14328529177999196160, -96120549902411274240, + 450325202737117593600, -1517208935002984080000, + 3740200989399948902400, -6797096028813368678400, + 9087761081682520473600, -8824053728865840192000, + 6049545098753157120000, -2774765838662800128000, + 763806510427609497600, -95382575704033754400], + [19835652870, -5395297580640, 364182586693200, -10763618673376800, + 174908803442373000, -1763080738699119840, 11851820521255194480, + -55630994283442749600, 187754605706619279900, + -463591619028689580000, 843736746632215035600, + -1129631016152221783200, 1098252376814660067000, + -753830033789944188000, 346146444087219270000, + -95382575704033754400, 11922821963004219300] + ]) + assert_array_equal(invhilbert(17, exact=True), invh17) + assert_allclose(invhilbert(17), invh17.astype(float), rtol=1e-12) + + def test_inverse(self): + for n in range(1, 10): + a = hilbert(n) + b = invhilbert(n) + # The Hilbert matrix is increasingly badly conditioned, + # so take that into account in the test + c = cond(a) + assert_allclose(a.dot(b), eye(n), atol=1e-15*c, rtol=1e-15*c) + + +class TestPascal: + + cases = [ + (1, array([[1]]), array([[1]])), + (2, array([[1, 1], + [1, 2]]), + array([[1, 0], + [1, 1]])), + (3, array([[1, 1, 1], + [1, 2, 3], + [1, 3, 6]]), + array([[1, 0, 0], + [1, 1, 0], + [1, 2, 1]])), + (4, array([[1, 1, 1, 1], + [1, 2, 3, 4], + [1, 3, 6, 10], + [1, 4, 10, 20]]), + array([[1, 0, 0, 0], + [1, 1, 0, 0], + [1, 2, 1, 0], + [1, 3, 3, 1]])), + ] + + def check_case(self, n, sym, low): + assert_array_equal(pascal(n), sym) + assert_array_equal(pascal(n, kind='lower'), low) + assert_array_equal(pascal(n, kind='upper'), low.T) + assert_array_almost_equal(pascal(n, exact=False), sym) + assert_array_almost_equal(pascal(n, exact=False, kind='lower'), low) + assert_array_almost_equal(pascal(n, exact=False, kind='upper'), low.T) + + def test_cases(self): + for n, sym, low in self.cases: + self.check_case(n, sym, low) + + def test_big(self): + p = pascal(50) + assert p[-1, -1] == comb(98, 49, exact=True) + + def test_threshold(self): + # Regression test. An early version of `pascal` returned an + # array of type np.uint64 for n=35, but that data type is too small + # to hold p[-1, -1]. The second assert_equal below would fail + # because p[-1, -1] overflowed. + p = pascal(34) + assert_equal(2*p.item(-1, -2), p.item(-1, -1), err_msg="n = 34") + p = pascal(35) + assert_equal(2.*p.item(-1, -2), 1.*p.item(-1, -1), err_msg="n = 35") + + +def test_invpascal(): + + def check_invpascal(n, kind, exact): + ip = invpascal(n, kind=kind, exact=exact) + p = pascal(n, kind=kind, exact=exact) + # Matrix-multiply ip and p, and check that we get the identity matrix. + # We can't use the simple expression e = ip.dot(p), because when + # n < 35 and exact is True, p.dtype is np.uint64 and ip.dtype is + # np.int64. The product of those dtypes is np.float64, which loses + # precision when n is greater than 18. Instead we'll cast both to + # object arrays, and then multiply. + e = ip.astype(object).dot(p.astype(object)) + assert_array_equal(e, eye(n), err_msg="n=%d kind=%r exact=%r" % + (n, kind, exact)) + + kinds = ['symmetric', 'lower', 'upper'] + + ns = [1, 2, 5, 18] + for n in ns: + for kind in kinds: + for exact in [True, False]: + check_invpascal(n, kind, exact) + + ns = [19, 34, 35, 50] + for n in ns: + for kind in kinds: + check_invpascal(n, kind, True) + + +def test_dft(): + m = dft(2) + expected = array([[1.0, 1.0], [1.0, -1.0]]) + assert_array_almost_equal(m, expected) + m = dft(2, scale='n') + assert_array_almost_equal(m, expected/2.0) + m = dft(2, scale='sqrtn') + assert_array_almost_equal(m, expected/sqrt(2.0)) + + x = array([0, 1, 2, 3, 4, 5, 0, 1]) + m = dft(8) + mx = m.dot(x) + fx = fft(x) + assert_array_almost_equal(mx, fx) + + +def test_fiedler(): + f = fiedler([]) + assert_equal(f.size, 0) + f = fiedler([123.]) + assert_array_equal(f, np.array([[0.]])) + f = fiedler(np.arange(1, 7)) + des = np.array([[0, 1, 2, 3, 4, 5], + [1, 0, 1, 2, 3, 4], + [2, 1, 0, 1, 2, 3], + [3, 2, 1, 0, 1, 2], + [4, 3, 2, 1, 0, 1], + [5, 4, 3, 2, 1, 0]]) + assert_array_equal(f, des) + + +def test_fiedler_companion(): + fc = fiedler_companion([]) + assert_equal(fc.size, 0) + fc = fiedler_companion([1.]) + assert_equal(fc.size, 0) + fc = fiedler_companion([1., 2.]) + assert_array_equal(fc, np.array([[-2.]])) + fc = fiedler_companion([1e-12, 2., 3.]) + assert_array_almost_equal(fc, companion([1e-12, 2., 3.])) + with assert_raises(ValueError): + fiedler_companion([0, 1, 2]) + fc = fiedler_companion([1., -16., 86., -176., 105.]) + assert_array_almost_equal(eigvals(fc), + np.array([7., 5., 3., 1.])) + + +class TestConvolutionMatrix: + """ + Test convolution_matrix vs. numpy.convolve for various parameters. + """ + + def create_vector(self, n, cpx): + """Make a complex or real test vector of length n.""" + x = np.linspace(-2.5, 2.2, n) + if cpx: + x = x + 1j*np.linspace(-1.5, 3.1, n) + return x + + def test_bad_n(self): + # n must be a positive integer + with pytest.raises(ValueError, match='n must be a positive integer'): + convolution_matrix([1, 2, 3], 0) + + def test_bad_first_arg(self): + # first arg must be a 1d array, otherwise ValueError + with pytest.raises(ValueError, match='one-dimensional'): + convolution_matrix(1, 4) + + def test_empty_first_arg(self): + # first arg must have at least one value + with pytest.raises(ValueError, match=r'len\(a\)'): + convolution_matrix([], 4) + + def test_bad_mode(self): + # mode must be in ('full', 'valid', 'same') + with pytest.raises(ValueError, match='mode.*must be one of'): + convolution_matrix((1, 1), 4, mode='invalid argument') + + @pytest.mark.parametrize('cpx', [False, True]) + @pytest.mark.parametrize('na', [1, 2, 9]) + @pytest.mark.parametrize('nv', [1, 2, 9]) + @pytest.mark.parametrize('mode', [None, 'full', 'valid', 'same']) + def test_against_numpy_convolve(self, cpx, na, nv, mode): + a = self.create_vector(na, cpx) + v = self.create_vector(nv, cpx) + if mode is None: + y1 = np.convolve(v, a) + A = convolution_matrix(a, nv) + else: + y1 = np.convolve(v, a, mode) + A = convolution_matrix(a, nv, mode) + y2 = A @ v + assert_array_almost_equal(y1, y2) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/misc/face.dat b/env-llmeval/lib/python3.10/site-packages/scipy/misc/face.dat new file mode 100644 index 0000000000000000000000000000000000000000..cd6128288bacd3f04131b9e8d13f133e3b6ecd4b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/misc/face.dat @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d8b0b4d081313e2b485748c770472e5a95ed1738146883d84c7030493e82886 +size 1581821 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/misc/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/misc/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2e196310ddefd47d13e7597110bcb5b8791d72b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/misc/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/misc/tests/__pycache__/test_common.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/misc/tests/__pycache__/test_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7bf1d7b00acf87e8e53eb584e67967d53febfd4f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/misc/tests/__pycache__/test_common.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/misc/tests/__pycache__/test_config.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/misc/tests/__pycache__/test_config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c9ca83fcc5ec5f1bc622906be79fcbdd2b02a69 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/misc/tests/__pycache__/test_config.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/misc/tests/__pycache__/test_doccer.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/misc/tests/__pycache__/test_doccer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a9cb1c4de7c0682b507d435d177ff2201ecca16 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/misc/tests/__pycache__/test_doccer.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/misc/tests/test_common.py b/env-llmeval/lib/python3.10/site-packages/scipy/misc/tests/test_common.py new file mode 100644 index 0000000000000000000000000000000000000000..7a1114390f905cca7f5db2975da1193c5cbf0935 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/misc/tests/test_common.py @@ -0,0 +1,26 @@ +from numpy.testing import assert_equal, assert_almost_equal, suppress_warnings + +from scipy.misc import face, ascent, electrocardiogram + + +def test_face(): + with suppress_warnings() as sup: + sup.filter(category=DeprecationWarning) + assert_equal(face().shape, (768, 1024, 3)) + + +def test_ascent(): + with suppress_warnings() as sup: + sup.filter(category=DeprecationWarning) + assert_equal(ascent().shape, (512, 512)) + + +def test_electrocardiogram(): + with suppress_warnings() as sup: + sup.filter(category=DeprecationWarning) + # Test shape, dtype and stats of signal + ecg = electrocardiogram() + assert ecg.dtype == float + assert_equal(ecg.shape, (108000,)) + assert_almost_equal(ecg.mean(), -0.16510875) + assert_almost_equal(ecg.std(), 0.5992473991177294) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/misc/tests/test_config.py b/env-llmeval/lib/python3.10/site-packages/scipy/misc/tests/test_config.py new file mode 100644 index 0000000000000000000000000000000000000000..b43d3f9f0da39a86b78cb2210cb21d670e5cf978 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/misc/tests/test_config.py @@ -0,0 +1,44 @@ +""" +Check the SciPy config is valid. +""" +import scipy +import pytest +from unittest.mock import patch + +pytestmark = pytest.mark.skipif( + not hasattr(scipy.__config__, "_built_with_meson"), + reason="Requires Meson builds", +) + + +class TestSciPyConfigs: + REQUIRED_CONFIG_KEYS = [ + "Compilers", + "Machine Information", + "Python Information", + ] + + @patch("scipy.__config__._check_pyyaml") + def test_pyyaml_not_found(self, mock_yaml_importer): + mock_yaml_importer.side_effect = ModuleNotFoundError() + with pytest.warns(UserWarning): + scipy.show_config() + + def test_dict_mode(self): + config = scipy.show_config(mode="dicts") + + assert isinstance(config, dict) + assert all([key in config for key in self.REQUIRED_CONFIG_KEYS]), ( + "Required key missing," + " see index of `False` with `REQUIRED_CONFIG_KEYS`" + ) + + def test_invalid_mode(self): + with pytest.raises(AttributeError): + scipy.show_config(mode="foo") + + def test_warn_to_add_tests(self): + assert len(scipy.__config__.DisplayModes) == 2, ( + "New mode detected," + " please add UT if applicable and increment this count" + ) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/misc/tests/test_doccer.py b/env-llmeval/lib/python3.10/site-packages/scipy/misc/tests/test_doccer.py new file mode 100644 index 0000000000000000000000000000000000000000..fa34228ddff73ab16624dcdd2dd6c438ff29027a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/misc/tests/test_doccer.py @@ -0,0 +1,134 @@ +''' Some tests for the documenting decorator and support functions ''' + +import sys +import pytest +from numpy.testing import assert_equal, suppress_warnings + +from scipy._lib import doccer + +# python -OO strips docstrings +DOCSTRINGS_STRIPPED = sys.flags.optimize > 1 + +docstring = \ +"""Docstring + %(strtest1)s + %(strtest2)s + %(strtest3)s +""" +param_doc1 = \ +"""Another test + with some indent""" + +param_doc2 = \ +"""Another test, one line""" + +param_doc3 = \ +""" Another test + with some indent""" + +doc_dict = {'strtest1':param_doc1, + 'strtest2':param_doc2, + 'strtest3':param_doc3} + +filled_docstring = \ +"""Docstring + Another test + with some indent + Another test, one line + Another test + with some indent +""" + + +def test_unindent(): + with suppress_warnings() as sup: + sup.filter(category=DeprecationWarning) + assert_equal(doccer.unindent_string(param_doc1), param_doc1) + assert_equal(doccer.unindent_string(param_doc2), param_doc2) + assert_equal(doccer.unindent_string(param_doc3), param_doc1) + + +def test_unindent_dict(): + with suppress_warnings() as sup: + sup.filter(category=DeprecationWarning) + d2 = doccer.unindent_dict(doc_dict) + assert_equal(d2['strtest1'], doc_dict['strtest1']) + assert_equal(d2['strtest2'], doc_dict['strtest2']) + assert_equal(d2['strtest3'], doc_dict['strtest1']) + + +def test_docformat(): + with suppress_warnings() as sup: + sup.filter(category=DeprecationWarning) + udd = doccer.unindent_dict(doc_dict) + formatted = doccer.docformat(docstring, udd) + assert_equal(formatted, filled_docstring) + single_doc = 'Single line doc %(strtest1)s' + formatted = doccer.docformat(single_doc, doc_dict) + # Note - initial indent of format string does not + # affect subsequent indent of inserted parameter + assert_equal(formatted, """Single line doc Another test + with some indent""") + + +@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstrings stripped") +def test_decorator(): + with suppress_warnings() as sup: + sup.filter(category=DeprecationWarning) + # with unindentation of parameters + decorator = doccer.filldoc(doc_dict, True) + + @decorator + def func(): + """ Docstring + %(strtest3)s + """ + assert_equal(func.__doc__, """ Docstring + Another test + with some indent + """) + + # without unindentation of parameters + decorator = doccer.filldoc(doc_dict, False) + + @decorator + def func(): + """ Docstring + %(strtest3)s + """ + assert_equal(func.__doc__, """ Docstring + Another test + with some indent + """) + + +@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstrings stripped") +def test_inherit_docstring_from(): + + with suppress_warnings() as sup: + sup.filter(category=DeprecationWarning) + + class Foo: + def func(self): + '''Do something useful.''' + return + + def func2(self): + '''Something else.''' + + class Bar(Foo): + @doccer.inherit_docstring_from(Foo) + def func(self): + '''%(super)sABC''' + return + + @doccer.inherit_docstring_from(Foo) + def func2(self): + # No docstring. + return + + assert_equal(Bar.func.__doc__, Foo.func.__doc__ + 'ABC') + assert_equal(Bar.func2.__doc__, Foo.func2.__doc__) + bar = Bar() + assert_equal(bar.func.__doc__, Foo.func.__doc__ + 'ABC') + assert_equal(bar.func2.__doc__, Foo.func2.__doc__) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_onenormest.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_onenormest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ef36ce256af9f16c23e1984de7cf00197ea464cc Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_onenormest.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_expm_multiply.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_expm_multiply.py new file mode 100644 index 0000000000000000000000000000000000000000..6bc8d83b75a7944193e8060aeb0f841f3a402b16 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_expm_multiply.py @@ -0,0 +1,810 @@ +"""Compute the action of the matrix exponential.""" +from warnings import warn + +import numpy as np + +import scipy.linalg +import scipy.sparse.linalg +from scipy.linalg._decomp_qr import qr +from scipy.sparse._sputils import is_pydata_spmatrix +from scipy.sparse.linalg import aslinearoperator +from scipy.sparse.linalg._interface import IdentityOperator +from scipy.sparse.linalg._onenormest import onenormest + +__all__ = ['expm_multiply'] + + +def _exact_inf_norm(A): + # A compatibility function which should eventually disappear. + if scipy.sparse.issparse(A): + return max(abs(A).sum(axis=1).flat) + elif is_pydata_spmatrix(A): + return max(abs(A).sum(axis=1)) + else: + return np.linalg.norm(A, np.inf) + + +def _exact_1_norm(A): + # A compatibility function which should eventually disappear. + if scipy.sparse.issparse(A): + return max(abs(A).sum(axis=0).flat) + elif is_pydata_spmatrix(A): + return max(abs(A).sum(axis=0)) + else: + return np.linalg.norm(A, 1) + + +def _trace(A): + # A compatibility function which should eventually disappear. + if is_pydata_spmatrix(A): + return A.to_scipy_sparse().trace() + else: + return A.trace() + + +def traceest(A, m3, seed=None): + """Estimate `np.trace(A)` using `3*m3` matrix-vector products. + + The result is not deterministic. + + Parameters + ---------- + A : LinearOperator + Linear operator whose trace will be estimated. Has to be square. + m3 : int + Number of matrix-vector products divided by 3 used to estimate the + trace. + seed : optional + Seed for `numpy.random.default_rng`. + Can be provided to obtain deterministic results. + + Returns + ------- + trace : LinearOperator.dtype + Estimate of the trace + + Notes + ----- + This is the Hutch++ algorithm given in [1]_. + + References + ---------- + .. [1] Meyer, Raphael A., Cameron Musco, Christopher Musco, and David P. + Woodruff. "Hutch++: Optimal Stochastic Trace Estimation." In Symposium + on Simplicity in Algorithms (SOSA), pp. 142-155. Society for Industrial + and Applied Mathematics, 2021 + https://doi.org/10.1137/1.9781611976496.16 + + """ + rng = np.random.default_rng(seed) + if len(A.shape) != 2 or A.shape[-1] != A.shape[-2]: + raise ValueError("Expected A to be like a square matrix.") + n = A.shape[-1] + S = rng.choice([-1.0, +1.0], [n, m3]) + Q, _ = qr(A.matmat(S), overwrite_a=True, mode='economic') + trQAQ = np.trace(Q.conj().T @ A.matmat(Q)) + G = rng.choice([-1, +1], [n, m3]) + right = G - Q@(Q.conj().T @ G) + trGAG = np.trace(right.conj().T @ A.matmat(right)) + return trQAQ + trGAG/m3 + + +def _ident_like(A): + # A compatibility function which should eventually disappear. + if scipy.sparse.issparse(A): + # Creates a sparse matrix in dia format + out = scipy.sparse.eye(A.shape[0], A.shape[1], dtype=A.dtype) + if isinstance(A, scipy.sparse.spmatrix): + return out.asformat(A.format) + return scipy.sparse.dia_array(out).asformat(A.format) + elif is_pydata_spmatrix(A): + import sparse + return sparse.eye(A.shape[0], A.shape[1], dtype=A.dtype) + elif isinstance(A, scipy.sparse.linalg.LinearOperator): + return IdentityOperator(A.shape, dtype=A.dtype) + else: + return np.eye(A.shape[0], A.shape[1], dtype=A.dtype) + + +def expm_multiply(A, B, start=None, stop=None, num=None, + endpoint=None, traceA=None): + """ + Compute the action of the matrix exponential of A on B. + + Parameters + ---------- + A : transposable linear operator + The operator whose exponential is of interest. + B : ndarray + The matrix or vector to be multiplied by the matrix exponential of A. + start : scalar, optional + The starting time point of the sequence. + stop : scalar, optional + The end time point of the sequence, unless `endpoint` is set to False. + In that case, the sequence consists of all but the last of ``num + 1`` + evenly spaced time points, so that `stop` is excluded. + Note that the step size changes when `endpoint` is False. + num : int, optional + Number of time points to use. + endpoint : bool, optional + If True, `stop` is the last time point. Otherwise, it is not included. + traceA : scalar, optional + Trace of `A`. If not given the trace is estimated for linear operators, + or calculated exactly for sparse matrices. It is used to precondition + `A`, thus an approximate trace is acceptable. + For linear operators, `traceA` should be provided to ensure performance + as the estimation is not guaranteed to be reliable for all cases. + + .. versionadded:: 1.9.0 + + Returns + ------- + expm_A_B : ndarray + The result of the action :math:`e^{t_k A} B`. + + Warns + ----- + UserWarning + If `A` is a linear operator and ``traceA=None`` (default). + + Notes + ----- + The optional arguments defining the sequence of evenly spaced time points + are compatible with the arguments of `numpy.linspace`. + + The output ndarray shape is somewhat complicated so I explain it here. + The ndim of the output could be either 1, 2, or 3. + It would be 1 if you are computing the expm action on a single vector + at a single time point. + It would be 2 if you are computing the expm action on a vector + at multiple time points, or if you are computing the expm action + on a matrix at a single time point. + It would be 3 if you want the action on a matrix with multiple + columns at multiple time points. + If multiple time points are requested, expm_A_B[0] will always + be the action of the expm at the first time point, + regardless of whether the action is on a vector or a matrix. + + References + ---------- + .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2011) + "Computing the Action of the Matrix Exponential, + with an Application to Exponential Integrators." + SIAM Journal on Scientific Computing, + 33 (2). pp. 488-511. ISSN 1064-8275 + http://eprints.ma.man.ac.uk/1591/ + + .. [2] Nicholas J. Higham and Awad H. Al-Mohy (2010) + "Computing Matrix Functions." + Acta Numerica, + 19. 159-208. ISSN 0962-4929 + http://eprints.ma.man.ac.uk/1451/ + + Examples + -------- + >>> import numpy as np + >>> from scipy.sparse import csc_matrix + >>> from scipy.sparse.linalg import expm, expm_multiply + >>> A = csc_matrix([[1, 0], [0, 1]]) + >>> A.toarray() + array([[1, 0], + [0, 1]], dtype=int64) + >>> B = np.array([np.exp(-1.), np.exp(-2.)]) + >>> B + array([ 0.36787944, 0.13533528]) + >>> expm_multiply(A, B, start=1, stop=2, num=3, endpoint=True) + array([[ 1. , 0.36787944], + [ 1.64872127, 0.60653066], + [ 2.71828183, 1. ]]) + >>> expm(A).dot(B) # Verify 1st timestep + array([ 1. , 0.36787944]) + >>> expm(1.5*A).dot(B) # Verify 2nd timestep + array([ 1.64872127, 0.60653066]) + >>> expm(2*A).dot(B) # Verify 3rd timestep + array([ 2.71828183, 1. ]) + """ + if all(arg is None for arg in (start, stop, num, endpoint)): + X = _expm_multiply_simple(A, B, traceA=traceA) + else: + X, status = _expm_multiply_interval(A, B, start, stop, num, + endpoint, traceA=traceA) + return X + + +def _expm_multiply_simple(A, B, t=1.0, traceA=None, balance=False): + """ + Compute the action of the matrix exponential at a single time point. + + Parameters + ---------- + A : transposable linear operator + The operator whose exponential is of interest. + B : ndarray + The matrix to be multiplied by the matrix exponential of A. + t : float + A time point. + traceA : scalar, optional + Trace of `A`. If not given the trace is estimated for linear operators, + or calculated exactly for sparse matrices. It is used to precondition + `A`, thus an approximate trace is acceptable + balance : bool + Indicates whether or not to apply balancing. + + Returns + ------- + F : ndarray + :math:`e^{t A} B` + + Notes + ----- + This is algorithm (3.2) in Al-Mohy and Higham (2011). + + """ + if balance: + raise NotImplementedError + if len(A.shape) != 2 or A.shape[0] != A.shape[1]: + raise ValueError('expected A to be like a square matrix') + if A.shape[1] != B.shape[0]: + raise ValueError('shapes of matrices A {} and B {} are incompatible' + .format(A.shape, B.shape)) + ident = _ident_like(A) + is_linear_operator = isinstance(A, scipy.sparse.linalg.LinearOperator) + n = A.shape[0] + if len(B.shape) == 1: + n0 = 1 + elif len(B.shape) == 2: + n0 = B.shape[1] + else: + raise ValueError('expected B to be like a matrix or a vector') + u_d = 2**-53 + tol = u_d + if traceA is None: + if is_linear_operator: + warn("Trace of LinearOperator not available, it will be estimated." + " Provide `traceA` to ensure performance.", stacklevel=3) + # m3=1 is bit arbitrary choice, a more accurate trace (larger m3) might + # speed up exponential calculation, but trace estimation is more costly + traceA = traceest(A, m3=1) if is_linear_operator else _trace(A) + mu = traceA / float(n) + A = A - mu * ident + A_1_norm = onenormest(A) if is_linear_operator else _exact_1_norm(A) + if t*A_1_norm == 0: + m_star, s = 0, 1 + else: + ell = 2 + norm_info = LazyOperatorNormInfo(t*A, A_1_norm=t*A_1_norm, ell=ell) + m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell) + return _expm_multiply_simple_core(A, B, t, mu, m_star, s, tol, balance) + + +def _expm_multiply_simple_core(A, B, t, mu, m_star, s, tol=None, balance=False): + """ + A helper function. + """ + if balance: + raise NotImplementedError + if tol is None: + u_d = 2 ** -53 + tol = u_d + F = B + eta = np.exp(t*mu / float(s)) + for i in range(s): + c1 = _exact_inf_norm(B) + for j in range(m_star): + coeff = t / float(s*(j+1)) + B = coeff * A.dot(B) + c2 = _exact_inf_norm(B) + F = F + B + if c1 + c2 <= tol * _exact_inf_norm(F): + break + c1 = c2 + F = eta * F + B = F + return F + + +# This table helps to compute bounds. +# They seem to have been difficult to calculate, involving symbolic +# manipulation of equations, followed by numerical root finding. +_theta = { + # The first 30 values are from table A.3 of Computing Matrix Functions. + 1: 2.29e-16, + 2: 2.58e-8, + 3: 1.39e-5, + 4: 3.40e-4, + 5: 2.40e-3, + 6: 9.07e-3, + 7: 2.38e-2, + 8: 5.00e-2, + 9: 8.96e-2, + 10: 1.44e-1, + # 11 + 11: 2.14e-1, + 12: 3.00e-1, + 13: 4.00e-1, + 14: 5.14e-1, + 15: 6.41e-1, + 16: 7.81e-1, + 17: 9.31e-1, + 18: 1.09, + 19: 1.26, + 20: 1.44, + # 21 + 21: 1.62, + 22: 1.82, + 23: 2.01, + 24: 2.22, + 25: 2.43, + 26: 2.64, + 27: 2.86, + 28: 3.08, + 29: 3.31, + 30: 3.54, + # The rest are from table 3.1 of + # Computing the Action of the Matrix Exponential. + 35: 4.7, + 40: 6.0, + 45: 7.2, + 50: 8.5, + 55: 9.9, + } + + +def _onenormest_matrix_power(A, p, + t=2, itmax=5, compute_v=False, compute_w=False): + """ + Efficiently estimate the 1-norm of A^p. + + Parameters + ---------- + A : ndarray + Matrix whose 1-norm of a power is to be computed. + p : int + Non-negative integer power. + t : int, optional + A positive parameter controlling the tradeoff between + accuracy versus time and memory usage. + Larger values take longer and use more memory + but give more accurate output. + itmax : int, optional + Use at most this many iterations. + compute_v : bool, optional + Request a norm-maximizing linear operator input vector if True. + compute_w : bool, optional + Request a norm-maximizing linear operator output vector if True. + + Returns + ------- + est : float + An underestimate of the 1-norm of the sparse matrix. + v : ndarray, optional + The vector such that ||Av||_1 == est*||v||_1. + It can be thought of as an input to the linear operator + that gives an output with particularly large norm. + w : ndarray, optional + The vector Av which has relatively large 1-norm. + It can be thought of as an output of the linear operator + that is relatively large in norm compared to the input. + + """ + #XXX Eventually turn this into an API function in the _onenormest module, + #XXX and remove its underscore, + #XXX but wait until expm_multiply goes into scipy. + from scipy.sparse.linalg._onenormest import onenormest + return onenormest(aslinearoperator(A) ** p) + +class LazyOperatorNormInfo: + """ + Information about an operator is lazily computed. + + The information includes the exact 1-norm of the operator, + in addition to estimates of 1-norms of powers of the operator. + This uses the notation of Computing the Action (2011). + This class is specialized enough to probably not be of general interest + outside of this module. + + """ + + def __init__(self, A, A_1_norm=None, ell=2, scale=1): + """ + Provide the operator and some norm-related information. + + Parameters + ---------- + A : linear operator + The operator of interest. + A_1_norm : float, optional + The exact 1-norm of A. + ell : int, optional + A technical parameter controlling norm estimation quality. + scale : int, optional + If specified, return the norms of scale*A instead of A. + + """ + self._A = A + self._A_1_norm = A_1_norm + self._ell = ell + self._d = {} + self._scale = scale + + def set_scale(self,scale): + """ + Set the scale parameter. + """ + self._scale = scale + + def onenorm(self): + """ + Compute the exact 1-norm. + """ + if self._A_1_norm is None: + self._A_1_norm = _exact_1_norm(self._A) + return self._scale*self._A_1_norm + + def d(self, p): + """ + Lazily estimate :math:`d_p(A) ~= || A^p ||^(1/p)` where :math:`||.||` is the 1-norm. + """ + if p not in self._d: + est = _onenormest_matrix_power(self._A, p, self._ell) + self._d[p] = est ** (1.0 / p) + return self._scale*self._d[p] + + def alpha(self, p): + """ + Lazily compute max(d(p), d(p+1)). + """ + return max(self.d(p), self.d(p+1)) + +def _compute_cost_div_m(m, p, norm_info): + """ + A helper function for computing bounds. + + This is equation (3.10). + It measures cost in terms of the number of required matrix products. + + Parameters + ---------- + m : int + A valid key of _theta. + p : int + A matrix power. + norm_info : LazyOperatorNormInfo + Information about 1-norms of related operators. + + Returns + ------- + cost_div_m : int + Required number of matrix products divided by m. + + """ + return int(np.ceil(norm_info.alpha(p) / _theta[m])) + + +def _compute_p_max(m_max): + """ + Compute the largest positive integer p such that p*(p-1) <= m_max + 1. + + Do this in a slightly dumb way, but safe and not too slow. + + Parameters + ---------- + m_max : int + A count related to bounds. + + """ + sqrt_m_max = np.sqrt(m_max) + p_low = int(np.floor(sqrt_m_max)) + p_high = int(np.ceil(sqrt_m_max + 1)) + return max(p for p in range(p_low, p_high+1) if p*(p-1) <= m_max + 1) + + +def _fragment_3_1(norm_info, n0, tol, m_max=55, ell=2): + """ + A helper function for the _expm_multiply_* functions. + + Parameters + ---------- + norm_info : LazyOperatorNormInfo + Information about norms of certain linear operators of interest. + n0 : int + Number of columns in the _expm_multiply_* B matrix. + tol : float + Expected to be + :math:`2^{-24}` for single precision or + :math:`2^{-53}` for double precision. + m_max : int + A value related to a bound. + ell : int + The number of columns used in the 1-norm approximation. + This is usually taken to be small, maybe between 1 and 5. + + Returns + ------- + best_m : int + Related to bounds for error control. + best_s : int + Amount of scaling. + + Notes + ----- + This is code fragment (3.1) in Al-Mohy and Higham (2011). + The discussion of default values for m_max and ell + is given between the definitions of equation (3.11) + and the definition of equation (3.12). + + """ + if ell < 1: + raise ValueError('expected ell to be a positive integer') + best_m = None + best_s = None + if _condition_3_13(norm_info.onenorm(), n0, m_max, ell): + for m, theta in _theta.items(): + s = int(np.ceil(norm_info.onenorm() / theta)) + if best_m is None or m * s < best_m * best_s: + best_m = m + best_s = s + else: + # Equation (3.11). + for p in range(2, _compute_p_max(m_max) + 1): + for m in range(p*(p-1)-1, m_max+1): + if m in _theta: + s = _compute_cost_div_m(m, p, norm_info) + if best_m is None or m * s < best_m * best_s: + best_m = m + best_s = s + best_s = max(best_s, 1) + return best_m, best_s + + +def _condition_3_13(A_1_norm, n0, m_max, ell): + """ + A helper function for the _expm_multiply_* functions. + + Parameters + ---------- + A_1_norm : float + The precomputed 1-norm of A. + n0 : int + Number of columns in the _expm_multiply_* B matrix. + m_max : int + A value related to a bound. + ell : int + The number of columns used in the 1-norm approximation. + This is usually taken to be small, maybe between 1 and 5. + + Returns + ------- + value : bool + Indicates whether or not the condition has been met. + + Notes + ----- + This is condition (3.13) in Al-Mohy and Higham (2011). + + """ + + # This is the rhs of equation (3.12). + p_max = _compute_p_max(m_max) + a = 2 * ell * p_max * (p_max + 3) + + # Evaluate the condition (3.13). + b = _theta[m_max] / float(n0 * m_max) + return A_1_norm <= a * b + + +def _expm_multiply_interval(A, B, start=None, stop=None, num=None, + endpoint=None, traceA=None, balance=False, + status_only=False): + """ + Compute the action of the matrix exponential at multiple time points. + + Parameters + ---------- + A : transposable linear operator + The operator whose exponential is of interest. + B : ndarray + The matrix to be multiplied by the matrix exponential of A. + start : scalar, optional + The starting time point of the sequence. + stop : scalar, optional + The end time point of the sequence, unless `endpoint` is set to False. + In that case, the sequence consists of all but the last of ``num + 1`` + evenly spaced time points, so that `stop` is excluded. + Note that the step size changes when `endpoint` is False. + num : int, optional + Number of time points to use. + traceA : scalar, optional + Trace of `A`. If not given the trace is estimated for linear operators, + or calculated exactly for sparse matrices. It is used to precondition + `A`, thus an approximate trace is acceptable + endpoint : bool, optional + If True, `stop` is the last time point. Otherwise, it is not included. + balance : bool + Indicates whether or not to apply balancing. + status_only : bool + A flag that is set to True for some debugging and testing operations. + + Returns + ------- + F : ndarray + :math:`e^{t_k A} B` + status : int + An integer status for testing and debugging. + + Notes + ----- + This is algorithm (5.2) in Al-Mohy and Higham (2011). + + There seems to be a typo, where line 15 of the algorithm should be + moved to line 6.5 (between lines 6 and 7). + + """ + if balance: + raise NotImplementedError + if len(A.shape) != 2 or A.shape[0] != A.shape[1]: + raise ValueError('expected A to be like a square matrix') + if A.shape[1] != B.shape[0]: + raise ValueError('shapes of matrices A {} and B {} are incompatible' + .format(A.shape, B.shape)) + ident = _ident_like(A) + is_linear_operator = isinstance(A, scipy.sparse.linalg.LinearOperator) + n = A.shape[0] + if len(B.shape) == 1: + n0 = 1 + elif len(B.shape) == 2: + n0 = B.shape[1] + else: + raise ValueError('expected B to be like a matrix or a vector') + u_d = 2**-53 + tol = u_d + if traceA is None: + if is_linear_operator: + warn("Trace of LinearOperator not available, it will be estimated." + " Provide `traceA` to ensure performance.", stacklevel=3) + # m3=5 is bit arbitrary choice, a more accurate trace (larger m3) might + # speed up exponential calculation, but trace estimation is also costly + # an educated guess would need to consider the number of time points + traceA = traceest(A, m3=5) if is_linear_operator else _trace(A) + mu = traceA / float(n) + + # Get the linspace samples, attempting to preserve the linspace defaults. + linspace_kwargs = {'retstep': True} + if num is not None: + linspace_kwargs['num'] = num + if endpoint is not None: + linspace_kwargs['endpoint'] = endpoint + samples, step = np.linspace(start, stop, **linspace_kwargs) + + # Convert the linspace output to the notation used by the publication. + nsamples = len(samples) + if nsamples < 2: + raise ValueError('at least two time points are required') + q = nsamples - 1 + h = step + t_0 = samples[0] + t_q = samples[q] + + # Define the output ndarray. + # Use an ndim=3 shape, such that the last two indices + # are the ones that may be involved in level 3 BLAS operations. + X_shape = (nsamples,) + B.shape + X = np.empty(X_shape, dtype=np.result_type(A.dtype, B.dtype, float)) + t = t_q - t_0 + A = A - mu * ident + A_1_norm = onenormest(A) if is_linear_operator else _exact_1_norm(A) + ell = 2 + norm_info = LazyOperatorNormInfo(t*A, A_1_norm=t*A_1_norm, ell=ell) + if t*A_1_norm == 0: + m_star, s = 0, 1 + else: + m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell) + + # Compute the expm action up to the initial time point. + X[0] = _expm_multiply_simple_core(A, B, t_0, mu, m_star, s) + + # Compute the expm action at the rest of the time points. + if q <= s: + if status_only: + return 0 + else: + return _expm_multiply_interval_core_0(A, X, + h, mu, q, norm_info, tol, ell,n0) + elif not (q % s): + if status_only: + return 1 + else: + return _expm_multiply_interval_core_1(A, X, + h, mu, m_star, s, q, tol) + elif (q % s): + if status_only: + return 2 + else: + return _expm_multiply_interval_core_2(A, X, + h, mu, m_star, s, q, tol) + else: + raise Exception('internal error') + + +def _expm_multiply_interval_core_0(A, X, h, mu, q, norm_info, tol, ell, n0): + """ + A helper function, for the case q <= s. + """ + + # Compute the new values of m_star and s which should be applied + # over intervals of size t/q + if norm_info.onenorm() == 0: + m_star, s = 0, 1 + else: + norm_info.set_scale(1./q) + m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell) + norm_info.set_scale(1) + + for k in range(q): + X[k+1] = _expm_multiply_simple_core(A, X[k], h, mu, m_star, s) + return X, 0 + + +def _expm_multiply_interval_core_1(A, X, h, mu, m_star, s, q, tol): + """ + A helper function, for the case q > s and q % s == 0. + """ + d = q // s + input_shape = X.shape[1:] + K_shape = (m_star + 1, ) + input_shape + K = np.empty(K_shape, dtype=X.dtype) + for i in range(s): + Z = X[i*d] + K[0] = Z + high_p = 0 + for k in range(1, d+1): + F = K[0] + c1 = _exact_inf_norm(F) + for p in range(1, m_star+1): + if p > high_p: + K[p] = h * A.dot(K[p-1]) / float(p) + coeff = float(pow(k, p)) + F = F + coeff * K[p] + inf_norm_K_p_1 = _exact_inf_norm(K[p]) + c2 = coeff * inf_norm_K_p_1 + if c1 + c2 <= tol * _exact_inf_norm(F): + break + c1 = c2 + X[k + i*d] = np.exp(k*h*mu) * F + return X, 1 + + +def _expm_multiply_interval_core_2(A, X, h, mu, m_star, s, q, tol): + """ + A helper function, for the case q > s and q % s > 0. + """ + d = q // s + j = q // d + r = q - d * j + input_shape = X.shape[1:] + K_shape = (m_star + 1, ) + input_shape + K = np.empty(K_shape, dtype=X.dtype) + for i in range(j + 1): + Z = X[i*d] + K[0] = Z + high_p = 0 + if i < j: + effective_d = d + else: + effective_d = r + for k in range(1, effective_d+1): + F = K[0] + c1 = _exact_inf_norm(F) + for p in range(1, m_star+1): + if p == high_p + 1: + K[p] = h * A.dot(K[p-1]) / float(p) + high_p = p + coeff = float(pow(k, p)) + F = F + coeff * K[p] + inf_norm_K_p_1 = _exact_inf_norm(K[p]) + c2 = coeff * inf_norm_K_p_1 + if c1 + c2 <= tol * _exact_inf_norm(F): + break + c1 = c2 + X[k + i*d] = np.exp(k*h*mu) * F + return X, 2 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_interface.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_interface.py new file mode 100644 index 0000000000000000000000000000000000000000..7c515167c326e27f4dce21cbfa5c052995afc7da --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_interface.py @@ -0,0 +1,896 @@ +"""Abstract linear algebra library. + +This module defines a class hierarchy that implements a kind of "lazy" +matrix representation, called the ``LinearOperator``. It can be used to do +linear algebra with extremely large sparse or structured matrices, without +representing those explicitly in memory. Such matrices can be added, +multiplied, transposed, etc. + +As a motivating example, suppose you want have a matrix where almost all of +the elements have the value one. The standard sparse matrix representation +skips the storage of zeros, but not ones. By contrast, a LinearOperator is +able to represent such matrices efficiently. First, we need a compact way to +represent an all-ones matrix:: + + >>> import numpy as np + >>> from scipy.sparse.linalg._interface import LinearOperator + >>> class Ones(LinearOperator): + ... def __init__(self, shape): + ... super().__init__(dtype=None, shape=shape) + ... def _matvec(self, x): + ... return np.repeat(x.sum(), self.shape[0]) + +Instances of this class emulate ``np.ones(shape)``, but using a constant +amount of storage, independent of ``shape``. The ``_matvec`` method specifies +how this linear operator multiplies with (operates on) a vector. We can now +add this operator to a sparse matrix that stores only offsets from one:: + + >>> from scipy.sparse.linalg._interface import aslinearoperator + >>> from scipy.sparse import csr_matrix + >>> offsets = csr_matrix([[1, 0, 2], [0, -1, 0], [0, 0, 3]]) + >>> A = aslinearoperator(offsets) + Ones(offsets.shape) + >>> A.dot([1, 2, 3]) + array([13, 4, 15]) + +The result is the same as that given by its dense, explicitly-stored +counterpart:: + + >>> (np.ones(A.shape, A.dtype) + offsets.toarray()).dot([1, 2, 3]) + array([13, 4, 15]) + +Several algorithms in the ``scipy.sparse`` library are able to operate on +``LinearOperator`` instances. +""" + +import warnings + +import numpy as np + +from scipy.sparse import issparse +from scipy.sparse._sputils import isshape, isintlike, asmatrix, is_pydata_spmatrix + +__all__ = ['LinearOperator', 'aslinearoperator'] + + +class LinearOperator: + """Common interface for performing matrix vector products + + Many iterative methods (e.g. cg, gmres) do not need to know the + individual entries of a matrix to solve a linear system A*x=b. + Such solvers only require the computation of matrix vector + products, A*v where v is a dense vector. This class serves as + an abstract interface between iterative solvers and matrix-like + objects. + + To construct a concrete LinearOperator, either pass appropriate + callables to the constructor of this class, or subclass it. + + A subclass must implement either one of the methods ``_matvec`` + and ``_matmat``, and the attributes/properties ``shape`` (pair of + integers) and ``dtype`` (may be None). It may call the ``__init__`` + on this class to have these attributes validated. Implementing + ``_matvec`` automatically implements ``_matmat`` (using a naive + algorithm) and vice-versa. + + Optionally, a subclass may implement ``_rmatvec`` or ``_adjoint`` + to implement the Hermitian adjoint (conjugate transpose). As with + ``_matvec`` and ``_matmat``, implementing either ``_rmatvec`` or + ``_adjoint`` implements the other automatically. Implementing + ``_adjoint`` is preferable; ``_rmatvec`` is mostly there for + backwards compatibility. + + Parameters + ---------- + shape : tuple + Matrix dimensions (M, N). + matvec : callable f(v) + Returns returns A * v. + rmatvec : callable f(v) + Returns A^H * v, where A^H is the conjugate transpose of A. + matmat : callable f(V) + Returns A * V, where V is a dense matrix with dimensions (N, K). + dtype : dtype + Data type of the matrix. + rmatmat : callable f(V) + Returns A^H * V, where V is a dense matrix with dimensions (M, K). + + Attributes + ---------- + args : tuple + For linear operators describing products etc. of other linear + operators, the operands of the binary operation. + ndim : int + Number of dimensions (this is always 2) + + See Also + -------- + aslinearoperator : Construct LinearOperators + + Notes + ----- + The user-defined matvec() function must properly handle the case + where v has shape (N,) as well as the (N,1) case. The shape of + the return type is handled internally by LinearOperator. + + LinearOperator instances can also be multiplied, added with each + other and exponentiated, all lazily: the result of these operations + is always a new, composite LinearOperator, that defers linear + operations to the original operators and combines the results. + + More details regarding how to subclass a LinearOperator and several + examples of concrete LinearOperator instances can be found in the + external project `PyLops `_. + + + Examples + -------- + >>> import numpy as np + >>> from scipy.sparse.linalg import LinearOperator + >>> def mv(v): + ... return np.array([2*v[0], 3*v[1]]) + ... + >>> A = LinearOperator((2,2), matvec=mv) + >>> A + <2x2 _CustomLinearOperator with dtype=float64> + >>> A.matvec(np.ones(2)) + array([ 2., 3.]) + >>> A * np.ones(2) + array([ 2., 3.]) + + """ + + ndim = 2 + # Necessary for right matmul with numpy arrays. + __array_ufunc__ = None + + def __new__(cls, *args, **kwargs): + if cls is LinearOperator: + # Operate as _CustomLinearOperator factory. + return super().__new__(_CustomLinearOperator) + else: + obj = super().__new__(cls) + + if (type(obj)._matvec == LinearOperator._matvec + and type(obj)._matmat == LinearOperator._matmat): + warnings.warn("LinearOperator subclass should implement" + " at least one of _matvec and _matmat.", + category=RuntimeWarning, stacklevel=2) + + return obj + + def __init__(self, dtype, shape): + """Initialize this LinearOperator. + + To be called by subclasses. ``dtype`` may be None; ``shape`` should + be convertible to a length-2 tuple. + """ + if dtype is not None: + dtype = np.dtype(dtype) + + shape = tuple(shape) + if not isshape(shape): + raise ValueError(f"invalid shape {shape!r} (must be 2-d)") + + self.dtype = dtype + self.shape = shape + + def _init_dtype(self): + """Called from subclasses at the end of the __init__ routine. + """ + if self.dtype is None: + v = np.zeros(self.shape[-1]) + self.dtype = np.asarray(self.matvec(v)).dtype + + def _matmat(self, X): + """Default matrix-matrix multiplication handler. + + Falls back on the user-defined _matvec method, so defining that will + define matrix multiplication (though in a very suboptimal way). + """ + + return np.hstack([self.matvec(col.reshape(-1,1)) for col in X.T]) + + def _matvec(self, x): + """Default matrix-vector multiplication handler. + + If self is a linear operator of shape (M, N), then this method will + be called on a shape (N,) or (N, 1) ndarray, and should return a + shape (M,) or (M, 1) ndarray. + + This default implementation falls back on _matmat, so defining that + will define matrix-vector multiplication as well. + """ + return self.matmat(x.reshape(-1, 1)) + + def matvec(self, x): + """Matrix-vector multiplication. + + Performs the operation y=A*x where A is an MxN linear + operator and x is a column vector or 1-d array. + + Parameters + ---------- + x : {matrix, ndarray} + An array with shape (N,) or (N,1). + + Returns + ------- + y : {matrix, ndarray} + A matrix or ndarray with shape (M,) or (M,1) depending + on the type and shape of the x argument. + + Notes + ----- + This matvec wraps the user-specified matvec routine or overridden + _matvec method to ensure that y has the correct shape and type. + + """ + + x = np.asanyarray(x) + + M,N = self.shape + + if x.shape != (N,) and x.shape != (N,1): + raise ValueError('dimension mismatch') + + y = self._matvec(x) + + if isinstance(x, np.matrix): + y = asmatrix(y) + else: + y = np.asarray(y) + + if x.ndim == 1: + y = y.reshape(M) + elif x.ndim == 2: + y = y.reshape(M,1) + else: + raise ValueError('invalid shape returned by user-defined matvec()') + + return y + + def rmatvec(self, x): + """Adjoint matrix-vector multiplication. + + Performs the operation y = A^H * x where A is an MxN linear + operator and x is a column vector or 1-d array. + + Parameters + ---------- + x : {matrix, ndarray} + An array with shape (M,) or (M,1). + + Returns + ------- + y : {matrix, ndarray} + A matrix or ndarray with shape (N,) or (N,1) depending + on the type and shape of the x argument. + + Notes + ----- + This rmatvec wraps the user-specified rmatvec routine or overridden + _rmatvec method to ensure that y has the correct shape and type. + + """ + + x = np.asanyarray(x) + + M,N = self.shape + + if x.shape != (M,) and x.shape != (M,1): + raise ValueError('dimension mismatch') + + y = self._rmatvec(x) + + if isinstance(x, np.matrix): + y = asmatrix(y) + else: + y = np.asarray(y) + + if x.ndim == 1: + y = y.reshape(N) + elif x.ndim == 2: + y = y.reshape(N,1) + else: + raise ValueError('invalid shape returned by user-defined rmatvec()') + + return y + + def _rmatvec(self, x): + """Default implementation of _rmatvec; defers to adjoint.""" + if type(self)._adjoint == LinearOperator._adjoint: + # _adjoint not overridden, prevent infinite recursion + raise NotImplementedError + else: + return self.H.matvec(x) + + def matmat(self, X): + """Matrix-matrix multiplication. + + Performs the operation y=A*X where A is an MxN linear + operator and X dense N*K matrix or ndarray. + + Parameters + ---------- + X : {matrix, ndarray} + An array with shape (N,K). + + Returns + ------- + Y : {matrix, ndarray} + A matrix or ndarray with shape (M,K) depending on + the type of the X argument. + + Notes + ----- + This matmat wraps any user-specified matmat routine or overridden + _matmat method to ensure that y has the correct type. + + """ + if not (issparse(X) or is_pydata_spmatrix(X)): + X = np.asanyarray(X) + + if X.ndim != 2: + raise ValueError(f'expected 2-d ndarray or matrix, not {X.ndim}-d') + + if X.shape[0] != self.shape[1]: + raise ValueError(f'dimension mismatch: {self.shape}, {X.shape}') + + try: + Y = self._matmat(X) + except Exception as e: + if issparse(X) or is_pydata_spmatrix(X): + raise TypeError( + "Unable to multiply a LinearOperator with a sparse matrix." + " Wrap the matrix in aslinearoperator first." + ) from e + raise + + if isinstance(Y, np.matrix): + Y = asmatrix(Y) + + return Y + + def rmatmat(self, X): + """Adjoint matrix-matrix multiplication. + + Performs the operation y = A^H * x where A is an MxN linear + operator and x is a column vector or 1-d array, or 2-d array. + The default implementation defers to the adjoint. + + Parameters + ---------- + X : {matrix, ndarray} + A matrix or 2D array. + + Returns + ------- + Y : {matrix, ndarray} + A matrix or 2D array depending on the type of the input. + + Notes + ----- + This rmatmat wraps the user-specified rmatmat routine. + + """ + if not (issparse(X) or is_pydata_spmatrix(X)): + X = np.asanyarray(X) + + if X.ndim != 2: + raise ValueError('expected 2-d ndarray or matrix, not %d-d' + % X.ndim) + + if X.shape[0] != self.shape[0]: + raise ValueError(f'dimension mismatch: {self.shape}, {X.shape}') + + try: + Y = self._rmatmat(X) + except Exception as e: + if issparse(X) or is_pydata_spmatrix(X): + raise TypeError( + "Unable to multiply a LinearOperator with a sparse matrix." + " Wrap the matrix in aslinearoperator() first." + ) from e + raise + + if isinstance(Y, np.matrix): + Y = asmatrix(Y) + return Y + + def _rmatmat(self, X): + """Default implementation of _rmatmat defers to rmatvec or adjoint.""" + if type(self)._adjoint == LinearOperator._adjoint: + return np.hstack([self.rmatvec(col.reshape(-1, 1)) for col in X.T]) + else: + return self.H.matmat(X) + + def __call__(self, x): + return self*x + + def __mul__(self, x): + return self.dot(x) + + def __truediv__(self, other): + if not np.isscalar(other): + raise ValueError("Can only divide a linear operator by a scalar.") + + return _ScaledLinearOperator(self, 1.0/other) + + def dot(self, x): + """Matrix-matrix or matrix-vector multiplication. + + Parameters + ---------- + x : array_like + 1-d or 2-d array, representing a vector or matrix. + + Returns + ------- + Ax : array + 1-d or 2-d array (depending on the shape of x) that represents + the result of applying this linear operator on x. + + """ + if isinstance(x, LinearOperator): + return _ProductLinearOperator(self, x) + elif np.isscalar(x): + return _ScaledLinearOperator(self, x) + else: + if not issparse(x) and not is_pydata_spmatrix(x): + # Sparse matrices shouldn't be converted to numpy arrays. + x = np.asarray(x) + + if x.ndim == 1 or x.ndim == 2 and x.shape[1] == 1: + return self.matvec(x) + elif x.ndim == 2: + return self.matmat(x) + else: + raise ValueError('expected 1-d or 2-d array or matrix, got %r' + % x) + + def __matmul__(self, other): + if np.isscalar(other): + raise ValueError("Scalar operands are not allowed, " + "use '*' instead") + return self.__mul__(other) + + def __rmatmul__(self, other): + if np.isscalar(other): + raise ValueError("Scalar operands are not allowed, " + "use '*' instead") + return self.__rmul__(other) + + def __rmul__(self, x): + if np.isscalar(x): + return _ScaledLinearOperator(self, x) + else: + return self._rdot(x) + + def _rdot(self, x): + """Matrix-matrix or matrix-vector multiplication from the right. + + Parameters + ---------- + x : array_like + 1-d or 2-d array, representing a vector or matrix. + + Returns + ------- + xA : array + 1-d or 2-d array (depending on the shape of x) that represents + the result of applying this linear operator on x from the right. + + Notes + ----- + This is copied from dot to implement right multiplication. + """ + if isinstance(x, LinearOperator): + return _ProductLinearOperator(x, self) + elif np.isscalar(x): + return _ScaledLinearOperator(self, x) + else: + if not issparse(x) and not is_pydata_spmatrix(x): + # Sparse matrices shouldn't be converted to numpy arrays. + x = np.asarray(x) + + # We use transpose instead of rmatvec/rmatmat to avoid + # unnecessary complex conjugation if possible. + if x.ndim == 1 or x.ndim == 2 and x.shape[0] == 1: + return self.T.matvec(x.T).T + elif x.ndim == 2: + return self.T.matmat(x.T).T + else: + raise ValueError('expected 1-d or 2-d array or matrix, got %r' + % x) + + def __pow__(self, p): + if np.isscalar(p): + return _PowerLinearOperator(self, p) + else: + return NotImplemented + + def __add__(self, x): + if isinstance(x, LinearOperator): + return _SumLinearOperator(self, x) + else: + return NotImplemented + + def __neg__(self): + return _ScaledLinearOperator(self, -1) + + def __sub__(self, x): + return self.__add__(-x) + + def __repr__(self): + M,N = self.shape + if self.dtype is None: + dt = 'unspecified dtype' + else: + dt = 'dtype=' + str(self.dtype) + + return '<%dx%d %s with %s>' % (M, N, self.__class__.__name__, dt) + + def adjoint(self): + """Hermitian adjoint. + + Returns the Hermitian adjoint of self, aka the Hermitian + conjugate or Hermitian transpose. For a complex matrix, the + Hermitian adjoint is equal to the conjugate transpose. + + Can be abbreviated self.H instead of self.adjoint(). + + Returns + ------- + A_H : LinearOperator + Hermitian adjoint of self. + """ + return self._adjoint() + + H = property(adjoint) + + def transpose(self): + """Transpose this linear operator. + + Returns a LinearOperator that represents the transpose of this one. + Can be abbreviated self.T instead of self.transpose(). + """ + return self._transpose() + + T = property(transpose) + + def _adjoint(self): + """Default implementation of _adjoint; defers to rmatvec.""" + return _AdjointLinearOperator(self) + + def _transpose(self): + """ Default implementation of _transpose; defers to rmatvec + conj""" + return _TransposedLinearOperator(self) + + +class _CustomLinearOperator(LinearOperator): + """Linear operator defined in terms of user-specified operations.""" + + def __init__(self, shape, matvec, rmatvec=None, matmat=None, + dtype=None, rmatmat=None): + super().__init__(dtype, shape) + + self.args = () + + self.__matvec_impl = matvec + self.__rmatvec_impl = rmatvec + self.__rmatmat_impl = rmatmat + self.__matmat_impl = matmat + + self._init_dtype() + + def _matmat(self, X): + if self.__matmat_impl is not None: + return self.__matmat_impl(X) + else: + return super()._matmat(X) + + def _matvec(self, x): + return self.__matvec_impl(x) + + def _rmatvec(self, x): + func = self.__rmatvec_impl + if func is None: + raise NotImplementedError("rmatvec is not defined") + return self.__rmatvec_impl(x) + + def _rmatmat(self, X): + if self.__rmatmat_impl is not None: + return self.__rmatmat_impl(X) + else: + return super()._rmatmat(X) + + def _adjoint(self): + return _CustomLinearOperator(shape=(self.shape[1], self.shape[0]), + matvec=self.__rmatvec_impl, + rmatvec=self.__matvec_impl, + matmat=self.__rmatmat_impl, + rmatmat=self.__matmat_impl, + dtype=self.dtype) + + +class _AdjointLinearOperator(LinearOperator): + """Adjoint of arbitrary Linear Operator""" + + def __init__(self, A): + shape = (A.shape[1], A.shape[0]) + super().__init__(dtype=A.dtype, shape=shape) + self.A = A + self.args = (A,) + + def _matvec(self, x): + return self.A._rmatvec(x) + + def _rmatvec(self, x): + return self.A._matvec(x) + + def _matmat(self, x): + return self.A._rmatmat(x) + + def _rmatmat(self, x): + return self.A._matmat(x) + +class _TransposedLinearOperator(LinearOperator): + """Transposition of arbitrary Linear Operator""" + + def __init__(self, A): + shape = (A.shape[1], A.shape[0]) + super().__init__(dtype=A.dtype, shape=shape) + self.A = A + self.args = (A,) + + def _matvec(self, x): + # NB. np.conj works also on sparse matrices + return np.conj(self.A._rmatvec(np.conj(x))) + + def _rmatvec(self, x): + return np.conj(self.A._matvec(np.conj(x))) + + def _matmat(self, x): + # NB. np.conj works also on sparse matrices + return np.conj(self.A._rmatmat(np.conj(x))) + + def _rmatmat(self, x): + return np.conj(self.A._matmat(np.conj(x))) + +def _get_dtype(operators, dtypes=None): + if dtypes is None: + dtypes = [] + for obj in operators: + if obj is not None and hasattr(obj, 'dtype'): + dtypes.append(obj.dtype) + return np.result_type(*dtypes) + + +class _SumLinearOperator(LinearOperator): + def __init__(self, A, B): + if not isinstance(A, LinearOperator) or \ + not isinstance(B, LinearOperator): + raise ValueError('both operands have to be a LinearOperator') + if A.shape != B.shape: + raise ValueError(f'cannot add {A} and {B}: shape mismatch') + self.args = (A, B) + super().__init__(_get_dtype([A, B]), A.shape) + + def _matvec(self, x): + return self.args[0].matvec(x) + self.args[1].matvec(x) + + def _rmatvec(self, x): + return self.args[0].rmatvec(x) + self.args[1].rmatvec(x) + + def _rmatmat(self, x): + return self.args[0].rmatmat(x) + self.args[1].rmatmat(x) + + def _matmat(self, x): + return self.args[0].matmat(x) + self.args[1].matmat(x) + + def _adjoint(self): + A, B = self.args + return A.H + B.H + + +class _ProductLinearOperator(LinearOperator): + def __init__(self, A, B): + if not isinstance(A, LinearOperator) or \ + not isinstance(B, LinearOperator): + raise ValueError('both operands have to be a LinearOperator') + if A.shape[1] != B.shape[0]: + raise ValueError(f'cannot multiply {A} and {B}: shape mismatch') + super().__init__(_get_dtype([A, B]), + (A.shape[0], B.shape[1])) + self.args = (A, B) + + def _matvec(self, x): + return self.args[0].matvec(self.args[1].matvec(x)) + + def _rmatvec(self, x): + return self.args[1].rmatvec(self.args[0].rmatvec(x)) + + def _rmatmat(self, x): + return self.args[1].rmatmat(self.args[0].rmatmat(x)) + + def _matmat(self, x): + return self.args[0].matmat(self.args[1].matmat(x)) + + def _adjoint(self): + A, B = self.args + return B.H * A.H + + +class _ScaledLinearOperator(LinearOperator): + def __init__(self, A, alpha): + if not isinstance(A, LinearOperator): + raise ValueError('LinearOperator expected as A') + if not np.isscalar(alpha): + raise ValueError('scalar expected as alpha') + if isinstance(A, _ScaledLinearOperator): + A, alpha_original = A.args + # Avoid in-place multiplication so that we don't accidentally mutate + # the original prefactor. + alpha = alpha * alpha_original + + dtype = _get_dtype([A], [type(alpha)]) + super().__init__(dtype, A.shape) + self.args = (A, alpha) + + def _matvec(self, x): + return self.args[1] * self.args[0].matvec(x) + + def _rmatvec(self, x): + return np.conj(self.args[1]) * self.args[0].rmatvec(x) + + def _rmatmat(self, x): + return np.conj(self.args[1]) * self.args[0].rmatmat(x) + + def _matmat(self, x): + return self.args[1] * self.args[0].matmat(x) + + def _adjoint(self): + A, alpha = self.args + return A.H * np.conj(alpha) + + +class _PowerLinearOperator(LinearOperator): + def __init__(self, A, p): + if not isinstance(A, LinearOperator): + raise ValueError('LinearOperator expected as A') + if A.shape[0] != A.shape[1]: + raise ValueError('square LinearOperator expected, got %r' % A) + if not isintlike(p) or p < 0: + raise ValueError('non-negative integer expected as p') + + super().__init__(_get_dtype([A]), A.shape) + self.args = (A, p) + + def _power(self, fun, x): + res = np.array(x, copy=True) + for i in range(self.args[1]): + res = fun(res) + return res + + def _matvec(self, x): + return self._power(self.args[0].matvec, x) + + def _rmatvec(self, x): + return self._power(self.args[0].rmatvec, x) + + def _rmatmat(self, x): + return self._power(self.args[0].rmatmat, x) + + def _matmat(self, x): + return self._power(self.args[0].matmat, x) + + def _adjoint(self): + A, p = self.args + return A.H ** p + + +class MatrixLinearOperator(LinearOperator): + def __init__(self, A): + super().__init__(A.dtype, A.shape) + self.A = A + self.__adj = None + self.args = (A,) + + def _matmat(self, X): + return self.A.dot(X) + + def _adjoint(self): + if self.__adj is None: + self.__adj = _AdjointMatrixOperator(self) + return self.__adj + +class _AdjointMatrixOperator(MatrixLinearOperator): + def __init__(self, adjoint): + self.A = adjoint.A.T.conj() + self.__adjoint = adjoint + self.args = (adjoint,) + self.shape = adjoint.shape[1], adjoint.shape[0] + + @property + def dtype(self): + return self.__adjoint.dtype + + def _adjoint(self): + return self.__adjoint + + +class IdentityOperator(LinearOperator): + def __init__(self, shape, dtype=None): + super().__init__(dtype, shape) + + def _matvec(self, x): + return x + + def _rmatvec(self, x): + return x + + def _rmatmat(self, x): + return x + + def _matmat(self, x): + return x + + def _adjoint(self): + return self + + +def aslinearoperator(A): + """Return A as a LinearOperator. + + 'A' may be any of the following types: + - ndarray + - matrix + - sparse matrix (e.g. csr_matrix, lil_matrix, etc.) + - LinearOperator + - An object with .shape and .matvec attributes + + See the LinearOperator documentation for additional information. + + Notes + ----- + If 'A' has no .dtype attribute, the data type is determined by calling + :func:`LinearOperator.matvec()` - set the .dtype attribute to prevent this + call upon the linear operator creation. + + Examples + -------- + >>> import numpy as np + >>> from scipy.sparse.linalg import aslinearoperator + >>> M = np.array([[1,2,3],[4,5,6]], dtype=np.int32) + >>> aslinearoperator(M) + <2x3 MatrixLinearOperator with dtype=int32> + """ + if isinstance(A, LinearOperator): + return A + + elif isinstance(A, np.ndarray) or isinstance(A, np.matrix): + if A.ndim > 2: + raise ValueError('array must have ndim <= 2') + A = np.atleast_2d(np.asarray(A)) + return MatrixLinearOperator(A) + + elif issparse(A) or is_pydata_spmatrix(A): + return MatrixLinearOperator(A) + + else: + if hasattr(A, 'shape') and hasattr(A, 'matvec'): + rmatvec = None + rmatmat = None + dtype = None + + if hasattr(A, 'rmatvec'): + rmatvec = A.rmatvec + if hasattr(A, 'rmatmat'): + rmatmat = A.rmatmat + if hasattr(A, 'dtype'): + dtype = A.dtype + return LinearOperator(A.shape, A.matvec, rmatvec=rmatvec, + rmatmat=rmatmat, dtype=dtype) + + else: + raise TypeError('type not understood') diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_matfuncs.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_matfuncs.py new file mode 100644 index 0000000000000000000000000000000000000000..525c97b2cca5555a917cc3bdd7617a50436538a5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_matfuncs.py @@ -0,0 +1,940 @@ +""" +Sparse matrix functions +""" + +# +# Authors: Travis Oliphant, March 2002 +# Anthony Scopatz, August 2012 (Sparse Updates) +# Jake Vanderplas, August 2012 (Sparse Updates) +# + +__all__ = ['expm', 'inv', 'matrix_power'] + +import numpy as np +from scipy.linalg._basic import solve, solve_triangular + +from scipy.sparse._base import issparse +from scipy.sparse.linalg import spsolve +from scipy.sparse._sputils import is_pydata_spmatrix, isintlike + +import scipy.sparse +import scipy.sparse.linalg +from scipy.sparse.linalg._interface import LinearOperator +from scipy.sparse._construct import eye + +from ._expm_multiply import _ident_like, _exact_1_norm as _onenorm + + +UPPER_TRIANGULAR = 'upper_triangular' + + +def inv(A): + """ + Compute the inverse of a sparse matrix + + Parameters + ---------- + A : (M, M) sparse matrix + square matrix to be inverted + + Returns + ------- + Ainv : (M, M) sparse matrix + inverse of `A` + + Notes + ----- + This computes the sparse inverse of `A`. If the inverse of `A` is expected + to be non-sparse, it will likely be faster to convert `A` to dense and use + `scipy.linalg.inv`. + + Examples + -------- + >>> from scipy.sparse import csc_matrix + >>> from scipy.sparse.linalg import inv + >>> A = csc_matrix([[1., 0.], [1., 2.]]) + >>> Ainv = inv(A) + >>> Ainv + <2x2 sparse matrix of type '' + with 3 stored elements in Compressed Sparse Column format> + >>> A.dot(Ainv) + <2x2 sparse matrix of type '' + with 2 stored elements in Compressed Sparse Column format> + >>> A.dot(Ainv).toarray() + array([[ 1., 0.], + [ 0., 1.]]) + + .. versionadded:: 0.12.0 + + """ + # Check input + if not (scipy.sparse.issparse(A) or is_pydata_spmatrix(A)): + raise TypeError('Input must be a sparse matrix') + + # Use sparse direct solver to solve "AX = I" accurately + I = _ident_like(A) + Ainv = spsolve(A, I) + return Ainv + + +def _onenorm_matrix_power_nnm(A, p): + """ + Compute the 1-norm of a non-negative integer power of a non-negative matrix. + + Parameters + ---------- + A : a square ndarray or matrix or sparse matrix + Input matrix with non-negative entries. + p : non-negative integer + The power to which the matrix is to be raised. + + Returns + ------- + out : float + The 1-norm of the matrix power p of A. + + """ + # Check input + if int(p) != p or p < 0: + raise ValueError('expected non-negative integer p') + p = int(p) + if len(A.shape) != 2 or A.shape[0] != A.shape[1]: + raise ValueError('expected A to be like a square matrix') + + # Explicitly make a column vector so that this works when A is a + # numpy matrix (in addition to ndarray and sparse matrix). + v = np.ones((A.shape[0], 1), dtype=float) + M = A.T + for i in range(p): + v = M.dot(v) + return np.max(v) + + +def _is_upper_triangular(A): + # This function could possibly be of wider interest. + if issparse(A): + lower_part = scipy.sparse.tril(A, -1) + # Check structural upper triangularity, + # then coincidental upper triangularity if needed. + return lower_part.nnz == 0 or lower_part.count_nonzero() == 0 + elif is_pydata_spmatrix(A): + import sparse + lower_part = sparse.tril(A, -1) + return lower_part.nnz == 0 + else: + return not np.tril(A, -1).any() + + +def _smart_matrix_product(A, B, alpha=None, structure=None): + """ + A matrix product that knows about sparse and structured matrices. + + Parameters + ---------- + A : 2d ndarray + First matrix. + B : 2d ndarray + Second matrix. + alpha : float + The matrix product will be scaled by this constant. + structure : str, optional + A string describing the structure of both matrices `A` and `B`. + Only `upper_triangular` is currently supported. + + Returns + ------- + M : 2d ndarray + Matrix product of A and B. + + """ + if len(A.shape) != 2: + raise ValueError('expected A to be a rectangular matrix') + if len(B.shape) != 2: + raise ValueError('expected B to be a rectangular matrix') + f = None + if structure == UPPER_TRIANGULAR: + if (not issparse(A) and not issparse(B) + and not is_pydata_spmatrix(A) and not is_pydata_spmatrix(B)): + f, = scipy.linalg.get_blas_funcs(('trmm',), (A, B)) + if f is not None: + if alpha is None: + alpha = 1. + out = f(alpha, A, B) + else: + if alpha is None: + out = A.dot(B) + else: + out = alpha * A.dot(B) + return out + + +class MatrixPowerOperator(LinearOperator): + + def __init__(self, A, p, structure=None): + if A.ndim != 2 or A.shape[0] != A.shape[1]: + raise ValueError('expected A to be like a square matrix') + if p < 0: + raise ValueError('expected p to be a non-negative integer') + self._A = A + self._p = p + self._structure = structure + self.dtype = A.dtype + self.ndim = A.ndim + self.shape = A.shape + + def _matvec(self, x): + for i in range(self._p): + x = self._A.dot(x) + return x + + def _rmatvec(self, x): + A_T = self._A.T + x = x.ravel() + for i in range(self._p): + x = A_T.dot(x) + return x + + def _matmat(self, X): + for i in range(self._p): + X = _smart_matrix_product(self._A, X, structure=self._structure) + return X + + @property + def T(self): + return MatrixPowerOperator(self._A.T, self._p) + + +class ProductOperator(LinearOperator): + """ + For now, this is limited to products of multiple square matrices. + """ + + def __init__(self, *args, **kwargs): + self._structure = kwargs.get('structure', None) + for A in args: + if len(A.shape) != 2 or A.shape[0] != A.shape[1]: + raise ValueError( + 'For now, the ProductOperator implementation is ' + 'limited to the product of multiple square matrices.') + if args: + n = args[0].shape[0] + for A in args: + for d in A.shape: + if d != n: + raise ValueError( + 'The square matrices of the ProductOperator ' + 'must all have the same shape.') + self.shape = (n, n) + self.ndim = len(self.shape) + self.dtype = np.result_type(*[x.dtype for x in args]) + self._operator_sequence = args + + def _matvec(self, x): + for A in reversed(self._operator_sequence): + x = A.dot(x) + return x + + def _rmatvec(self, x): + x = x.ravel() + for A in self._operator_sequence: + x = A.T.dot(x) + return x + + def _matmat(self, X): + for A in reversed(self._operator_sequence): + X = _smart_matrix_product(A, X, structure=self._structure) + return X + + @property + def T(self): + T_args = [A.T for A in reversed(self._operator_sequence)] + return ProductOperator(*T_args) + + +def _onenormest_matrix_power(A, p, + t=2, itmax=5, compute_v=False, compute_w=False, structure=None): + """ + Efficiently estimate the 1-norm of A^p. + + Parameters + ---------- + A : ndarray + Matrix whose 1-norm of a power is to be computed. + p : int + Non-negative integer power. + t : int, optional + A positive parameter controlling the tradeoff between + accuracy versus time and memory usage. + Larger values take longer and use more memory + but give more accurate output. + itmax : int, optional + Use at most this many iterations. + compute_v : bool, optional + Request a norm-maximizing linear operator input vector if True. + compute_w : bool, optional + Request a norm-maximizing linear operator output vector if True. + + Returns + ------- + est : float + An underestimate of the 1-norm of the sparse matrix. + v : ndarray, optional + The vector such that ||Av||_1 == est*||v||_1. + It can be thought of as an input to the linear operator + that gives an output with particularly large norm. + w : ndarray, optional + The vector Av which has relatively large 1-norm. + It can be thought of as an output of the linear operator + that is relatively large in norm compared to the input. + + """ + return scipy.sparse.linalg.onenormest( + MatrixPowerOperator(A, p, structure=structure)) + + +def _onenormest_product(operator_seq, + t=2, itmax=5, compute_v=False, compute_w=False, structure=None): + """ + Efficiently estimate the 1-norm of the matrix product of the args. + + Parameters + ---------- + operator_seq : linear operator sequence + Matrices whose 1-norm of product is to be computed. + t : int, optional + A positive parameter controlling the tradeoff between + accuracy versus time and memory usage. + Larger values take longer and use more memory + but give more accurate output. + itmax : int, optional + Use at most this many iterations. + compute_v : bool, optional + Request a norm-maximizing linear operator input vector if True. + compute_w : bool, optional + Request a norm-maximizing linear operator output vector if True. + structure : str, optional + A string describing the structure of all operators. + Only `upper_triangular` is currently supported. + + Returns + ------- + est : float + An underestimate of the 1-norm of the sparse matrix. + v : ndarray, optional + The vector such that ||Av||_1 == est*||v||_1. + It can be thought of as an input to the linear operator + that gives an output with particularly large norm. + w : ndarray, optional + The vector Av which has relatively large 1-norm. + It can be thought of as an output of the linear operator + that is relatively large in norm compared to the input. + + """ + return scipy.sparse.linalg.onenormest( + ProductOperator(*operator_seq, structure=structure)) + + +class _ExpmPadeHelper: + """ + Help lazily evaluate a matrix exponential. + + The idea is to not do more work than we need for high expm precision, + so we lazily compute matrix powers and store or precompute + other properties of the matrix. + + """ + + def __init__(self, A, structure=None, use_exact_onenorm=False): + """ + Initialize the object. + + Parameters + ---------- + A : a dense or sparse square numpy matrix or ndarray + The matrix to be exponentiated. + structure : str, optional + A string describing the structure of matrix `A`. + Only `upper_triangular` is currently supported. + use_exact_onenorm : bool, optional + If True then only the exact one-norm of matrix powers and products + will be used. Otherwise, the one-norm of powers and products + may initially be estimated. + """ + self.A = A + self._A2 = None + self._A4 = None + self._A6 = None + self._A8 = None + self._A10 = None + self._d4_exact = None + self._d6_exact = None + self._d8_exact = None + self._d10_exact = None + self._d4_approx = None + self._d6_approx = None + self._d8_approx = None + self._d10_approx = None + self.ident = _ident_like(A) + self.structure = structure + self.use_exact_onenorm = use_exact_onenorm + + @property + def A2(self): + if self._A2 is None: + self._A2 = _smart_matrix_product( + self.A, self.A, structure=self.structure) + return self._A2 + + @property + def A4(self): + if self._A4 is None: + self._A4 = _smart_matrix_product( + self.A2, self.A2, structure=self.structure) + return self._A4 + + @property + def A6(self): + if self._A6 is None: + self._A6 = _smart_matrix_product( + self.A4, self.A2, structure=self.structure) + return self._A6 + + @property + def A8(self): + if self._A8 is None: + self._A8 = _smart_matrix_product( + self.A6, self.A2, structure=self.structure) + return self._A8 + + @property + def A10(self): + if self._A10 is None: + self._A10 = _smart_matrix_product( + self.A4, self.A6, structure=self.structure) + return self._A10 + + @property + def d4_tight(self): + if self._d4_exact is None: + self._d4_exact = _onenorm(self.A4)**(1/4.) + return self._d4_exact + + @property + def d6_tight(self): + if self._d6_exact is None: + self._d6_exact = _onenorm(self.A6)**(1/6.) + return self._d6_exact + + @property + def d8_tight(self): + if self._d8_exact is None: + self._d8_exact = _onenorm(self.A8)**(1/8.) + return self._d8_exact + + @property + def d10_tight(self): + if self._d10_exact is None: + self._d10_exact = _onenorm(self.A10)**(1/10.) + return self._d10_exact + + @property + def d4_loose(self): + if self.use_exact_onenorm: + return self.d4_tight + if self._d4_exact is not None: + return self._d4_exact + else: + if self._d4_approx is None: + self._d4_approx = _onenormest_matrix_power(self.A2, 2, + structure=self.structure)**(1/4.) + return self._d4_approx + + @property + def d6_loose(self): + if self.use_exact_onenorm: + return self.d6_tight + if self._d6_exact is not None: + return self._d6_exact + else: + if self._d6_approx is None: + self._d6_approx = _onenormest_matrix_power(self.A2, 3, + structure=self.structure)**(1/6.) + return self._d6_approx + + @property + def d8_loose(self): + if self.use_exact_onenorm: + return self.d8_tight + if self._d8_exact is not None: + return self._d8_exact + else: + if self._d8_approx is None: + self._d8_approx = _onenormest_matrix_power(self.A4, 2, + structure=self.structure)**(1/8.) + return self._d8_approx + + @property + def d10_loose(self): + if self.use_exact_onenorm: + return self.d10_tight + if self._d10_exact is not None: + return self._d10_exact + else: + if self._d10_approx is None: + self._d10_approx = _onenormest_product((self.A4, self.A6), + structure=self.structure)**(1/10.) + return self._d10_approx + + def pade3(self): + b = (120., 60., 12., 1.) + U = _smart_matrix_product(self.A, + b[3]*self.A2 + b[1]*self.ident, + structure=self.structure) + V = b[2]*self.A2 + b[0]*self.ident + return U, V + + def pade5(self): + b = (30240., 15120., 3360., 420., 30., 1.) + U = _smart_matrix_product(self.A, + b[5]*self.A4 + b[3]*self.A2 + b[1]*self.ident, + structure=self.structure) + V = b[4]*self.A4 + b[2]*self.A2 + b[0]*self.ident + return U, V + + def pade7(self): + b = (17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.) + U = _smart_matrix_product(self.A, + b[7]*self.A6 + b[5]*self.A4 + b[3]*self.A2 + b[1]*self.ident, + structure=self.structure) + V = b[6]*self.A6 + b[4]*self.A4 + b[2]*self.A2 + b[0]*self.ident + return U, V + + def pade9(self): + b = (17643225600., 8821612800., 2075673600., 302702400., 30270240., + 2162160., 110880., 3960., 90., 1.) + U = _smart_matrix_product(self.A, + (b[9]*self.A8 + b[7]*self.A6 + b[5]*self.A4 + + b[3]*self.A2 + b[1]*self.ident), + structure=self.structure) + V = (b[8]*self.A8 + b[6]*self.A6 + b[4]*self.A4 + + b[2]*self.A2 + b[0]*self.ident) + return U, V + + def pade13_scaled(self, s): + b = (64764752532480000., 32382376266240000., 7771770303897600., + 1187353796428800., 129060195264000., 10559470521600., + 670442572800., 33522128640., 1323241920., 40840800., 960960., + 16380., 182., 1.) + B = self.A * 2**-s + B2 = self.A2 * 2**(-2*s) + B4 = self.A4 * 2**(-4*s) + B6 = self.A6 * 2**(-6*s) + U2 = _smart_matrix_product(B6, + b[13]*B6 + b[11]*B4 + b[9]*B2, + structure=self.structure) + U = _smart_matrix_product(B, + (U2 + b[7]*B6 + b[5]*B4 + + b[3]*B2 + b[1]*self.ident), + structure=self.structure) + V2 = _smart_matrix_product(B6, + b[12]*B6 + b[10]*B4 + b[8]*B2, + structure=self.structure) + V = V2 + b[6]*B6 + b[4]*B4 + b[2]*B2 + b[0]*self.ident + return U, V + + +def expm(A): + """ + Compute the matrix exponential using Pade approximation. + + Parameters + ---------- + A : (M,M) array_like or sparse matrix + 2D Array or Matrix (sparse or dense) to be exponentiated + + Returns + ------- + expA : (M,M) ndarray + Matrix exponential of `A` + + Notes + ----- + This is algorithm (6.1) which is a simplification of algorithm (5.1). + + .. versionadded:: 0.12.0 + + References + ---------- + .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2009) + "A New Scaling and Squaring Algorithm for the Matrix Exponential." + SIAM Journal on Matrix Analysis and Applications. + 31 (3). pp. 970-989. ISSN 1095-7162 + + Examples + -------- + >>> from scipy.sparse import csc_matrix + >>> from scipy.sparse.linalg import expm + >>> A = csc_matrix([[1, 0, 0], [0, 2, 0], [0, 0, 3]]) + >>> A.toarray() + array([[1, 0, 0], + [0, 2, 0], + [0, 0, 3]], dtype=int64) + >>> Aexp = expm(A) + >>> Aexp + <3x3 sparse matrix of type '' + with 3 stored elements in Compressed Sparse Column format> + >>> Aexp.toarray() + array([[ 2.71828183, 0. , 0. ], + [ 0. , 7.3890561 , 0. ], + [ 0. , 0. , 20.08553692]]) + """ + return _expm(A, use_exact_onenorm='auto') + + +def _expm(A, use_exact_onenorm): + # Core of expm, separated to allow testing exact and approximate + # algorithms. + + # Avoid indiscriminate asarray() to allow sparse or other strange arrays. + if isinstance(A, (list, tuple, np.matrix)): + A = np.asarray(A) + if len(A.shape) != 2 or A.shape[0] != A.shape[1]: + raise ValueError('expected a square matrix') + + # gracefully handle size-0 input, + # carefully handling sparse scenario + if A.shape == (0, 0): + out = np.zeros([0, 0], dtype=A.dtype) + if issparse(A) or is_pydata_spmatrix(A): + return A.__class__(out) + return out + + # Trivial case + if A.shape == (1, 1): + out = [[np.exp(A[0, 0])]] + + # Avoid indiscriminate casting to ndarray to + # allow for sparse or other strange arrays + if issparse(A) or is_pydata_spmatrix(A): + return A.__class__(out) + + return np.array(out) + + # Ensure input is of float type, to avoid integer overflows etc. + if ((isinstance(A, np.ndarray) or issparse(A) or is_pydata_spmatrix(A)) + and not np.issubdtype(A.dtype, np.inexact)): + A = A.astype(float) + + # Detect upper triangularity. + structure = UPPER_TRIANGULAR if _is_upper_triangular(A) else None + + if use_exact_onenorm == "auto": + # Hardcode a matrix order threshold for exact vs. estimated one-norms. + use_exact_onenorm = A.shape[0] < 200 + + # Track functions of A to help compute the matrix exponential. + h = _ExpmPadeHelper( + A, structure=structure, use_exact_onenorm=use_exact_onenorm) + + # Try Pade order 3. + eta_1 = max(h.d4_loose, h.d6_loose) + if eta_1 < 1.495585217958292e-002 and _ell(h.A, 3) == 0: + U, V = h.pade3() + return _solve_P_Q(U, V, structure=structure) + + # Try Pade order 5. + eta_2 = max(h.d4_tight, h.d6_loose) + if eta_2 < 2.539398330063230e-001 and _ell(h.A, 5) == 0: + U, V = h.pade5() + return _solve_P_Q(U, V, structure=structure) + + # Try Pade orders 7 and 9. + eta_3 = max(h.d6_tight, h.d8_loose) + if eta_3 < 9.504178996162932e-001 and _ell(h.A, 7) == 0: + U, V = h.pade7() + return _solve_P_Q(U, V, structure=structure) + if eta_3 < 2.097847961257068e+000 and _ell(h.A, 9) == 0: + U, V = h.pade9() + return _solve_P_Q(U, V, structure=structure) + + # Use Pade order 13. + eta_4 = max(h.d8_loose, h.d10_loose) + eta_5 = min(eta_3, eta_4) + theta_13 = 4.25 + + # Choose smallest s>=0 such that 2**(-s) eta_5 <= theta_13 + if eta_5 == 0: + # Nilpotent special case + s = 0 + else: + s = max(int(np.ceil(np.log2(eta_5 / theta_13))), 0) + s = s + _ell(2**-s * h.A, 13) + U, V = h.pade13_scaled(s) + X = _solve_P_Q(U, V, structure=structure) + if structure == UPPER_TRIANGULAR: + # Invoke Code Fragment 2.1. + X = _fragment_2_1(X, h.A, s) + else: + # X = r_13(A)^(2^s) by repeated squaring. + for i in range(s): + X = X.dot(X) + return X + + +def _solve_P_Q(U, V, structure=None): + """ + A helper function for expm_2009. + + Parameters + ---------- + U : ndarray + Pade numerator. + V : ndarray + Pade denominator. + structure : str, optional + A string describing the structure of both matrices `U` and `V`. + Only `upper_triangular` is currently supported. + + Notes + ----- + The `structure` argument is inspired by similar args + for theano and cvxopt functions. + + """ + P = U + V + Q = -U + V + if issparse(U) or is_pydata_spmatrix(U): + return spsolve(Q, P) + elif structure is None: + return solve(Q, P) + elif structure == UPPER_TRIANGULAR: + return solve_triangular(Q, P) + else: + raise ValueError('unsupported matrix structure: ' + str(structure)) + + +def _exp_sinch(a, x): + """ + Stably evaluate exp(a)*sinh(x)/x + + Notes + ----- + The strategy of falling back to a sixth order Taylor expansion + was suggested by the Spallation Neutron Source docs + which was found on the internet by google search. + http://www.ornl.gov/~t6p/resources/xal/javadoc/gov/sns/tools/math/ElementaryFunction.html + The details of the cutoff point and the Horner-like evaluation + was picked without reference to anything in particular. + + Note that sinch is not currently implemented in scipy.special, + whereas the "engineer's" definition of sinc is implemented. + The implementation of sinc involves a scaling factor of pi + that distinguishes it from the "mathematician's" version of sinc. + + """ + + # If x is small then use sixth order Taylor expansion. + # How small is small? I am using the point where the relative error + # of the approximation is less than 1e-14. + # If x is large then directly evaluate sinh(x) / x. + if abs(x) < 0.0135: + x2 = x*x + return np.exp(a) * (1 + (x2/6.)*(1 + (x2/20.)*(1 + (x2/42.)))) + else: + return (np.exp(a + x) - np.exp(a - x)) / (2*x) + + +def _eq_10_42(lam_1, lam_2, t_12): + """ + Equation (10.42) of Functions of Matrices: Theory and Computation. + + Notes + ----- + This is a helper function for _fragment_2_1 of expm_2009. + Equation (10.42) is on page 251 in the section on Schur algorithms. + In particular, section 10.4.3 explains the Schur-Parlett algorithm. + expm([[lam_1, t_12], [0, lam_1]) + = + [[exp(lam_1), t_12*exp((lam_1 + lam_2)/2)*sinch((lam_1 - lam_2)/2)], + [0, exp(lam_2)] + """ + + # The plain formula t_12 * (exp(lam_2) - exp(lam_2)) / (lam_2 - lam_1) + # apparently suffers from cancellation, according to Higham's textbook. + # A nice implementation of sinch, defined as sinh(x)/x, + # will apparently work around the cancellation. + a = 0.5 * (lam_1 + lam_2) + b = 0.5 * (lam_1 - lam_2) + return t_12 * _exp_sinch(a, b) + + +def _fragment_2_1(X, T, s): + """ + A helper function for expm_2009. + + Notes + ----- + The argument X is modified in-place, but this modification is not the same + as the returned value of the function. + This function also takes pains to do things in ways that are compatible + with sparse matrices, for example by avoiding fancy indexing + and by using methods of the matrices whenever possible instead of + using functions of the numpy or scipy libraries themselves. + + """ + # Form X = r_m(2^-s T) + # Replace diag(X) by exp(2^-s diag(T)). + n = X.shape[0] + diag_T = np.ravel(T.diagonal().copy()) + + # Replace diag(X) by exp(2^-s diag(T)). + scale = 2 ** -s + exp_diag = np.exp(scale * diag_T) + for k in range(n): + X[k, k] = exp_diag[k] + + for i in range(s-1, -1, -1): + X = X.dot(X) + + # Replace diag(X) by exp(2^-i diag(T)). + scale = 2 ** -i + exp_diag = np.exp(scale * diag_T) + for k in range(n): + X[k, k] = exp_diag[k] + + # Replace (first) superdiagonal of X by explicit formula + # for superdiagonal of exp(2^-i T) from Eq (10.42) of + # the author's 2008 textbook + # Functions of Matrices: Theory and Computation. + for k in range(n-1): + lam_1 = scale * diag_T[k] + lam_2 = scale * diag_T[k+1] + t_12 = scale * T[k, k+1] + value = _eq_10_42(lam_1, lam_2, t_12) + X[k, k+1] = value + + # Return the updated X matrix. + return X + + +def _ell(A, m): + """ + A helper function for expm_2009. + + Parameters + ---------- + A : linear operator + A linear operator whose norm of power we care about. + m : int + The power of the linear operator + + Returns + ------- + value : int + A value related to a bound. + + """ + if len(A.shape) != 2 or A.shape[0] != A.shape[1]: + raise ValueError('expected A to be like a square matrix') + + # The c_i are explained in (2.2) and (2.6) of the 2005 expm paper. + # They are coefficients of terms of a generating function series expansion. + c_i = {3: 100800., + 5: 10059033600., + 7: 4487938430976000., + 9: 5914384781877411840000., + 13: 113250775606021113483283660800000000. + } + abs_c_recip = c_i[m] + + # This is explained after Eq. (1.2) of the 2009 expm paper. + # It is the "unit roundoff" of IEEE double precision arithmetic. + u = 2**-53 + + # Compute the one-norm of matrix power p of abs(A). + A_abs_onenorm = _onenorm_matrix_power_nnm(abs(A), 2*m + 1) + + # Treat zero norm as a special case. + if not A_abs_onenorm: + return 0 + + alpha = A_abs_onenorm / (_onenorm(A) * abs_c_recip) + log2_alpha_div_u = np.log2(alpha/u) + value = int(np.ceil(log2_alpha_div_u / (2 * m))) + return max(value, 0) + +def matrix_power(A, power): + """ + Raise a square matrix to the integer power, `power`. + + For non-negative integers, ``A**power`` is computed using repeated + matrix multiplications. Negative integers are not supported. + + Parameters + ---------- + A : (M, M) square sparse array or matrix + sparse array that will be raised to power `power` + power : int + Exponent used to raise sparse array `A` + + Returns + ------- + A**power : (M, M) sparse array or matrix + The output matrix will be the same shape as A, and will preserve + the class of A, but the format of the output may be changed. + + Notes + ----- + This uses a recursive implementation of the matrix power. For computing + the matrix power using a reasonably large `power`, this may be less efficient + than computing the product directly, using A @ A @ ... @ A. + This is contingent upon the number of nonzero entries in the matrix. + + .. versionadded:: 1.12.0 + + Examples + -------- + >>> from scipy import sparse + >>> A = sparse.csc_array([[0,1,0],[1,0,1],[0,1,0]]) + >>> A.todense() + array([[0, 1, 0], + [1, 0, 1], + [0, 1, 0]]) + >>> (A @ A).todense() + array([[1, 0, 1], + [0, 2, 0], + [1, 0, 1]]) + >>> A2 = sparse.linalg.matrix_power(A, 2) + >>> A2.todense() + array([[1, 0, 1], + [0, 2, 0], + [1, 0, 1]]) + >>> A4 = sparse.linalg.matrix_power(A, 4) + >>> A4.todense() + array([[2, 0, 2], + [0, 4, 0], + [2, 0, 2]]) + + """ + M, N = A.shape + if M != N: + raise TypeError('sparse matrix is not square') + + if isintlike(power): + power = int(power) + if power < 0: + raise ValueError('exponent must be >= 0') + + if power == 0: + return eye(M, dtype=A.dtype) + + if power == 1: + return A.copy() + + tmp = matrix_power(A, power // 2) + if power % 2: + return A @ tmp @ tmp + else: + return tmp @ tmp + else: + raise ValueError("exponent must be an integer") diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_norm.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_norm.py new file mode 100644 index 0000000000000000000000000000000000000000..38f3a6d7a6f84ec315b3177b384eef5c5d93311a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_norm.py @@ -0,0 +1,193 @@ +"""Sparse matrix norms. + +""" +import numpy as np +from scipy.sparse import issparse +from scipy.sparse.linalg import svds +import scipy.sparse as sp + +from numpy import sqrt, abs + +__all__ = ['norm'] + + +def _sparse_frobenius_norm(x): + data = sp._sputils._todata(x) + return np.linalg.norm(data) + + +def norm(x, ord=None, axis=None): + """ + Norm of a sparse matrix + + This function is able to return one of seven different matrix norms, + depending on the value of the ``ord`` parameter. + + Parameters + ---------- + x : a sparse matrix + Input sparse matrix. + ord : {non-zero int, inf, -inf, 'fro'}, optional + Order of the norm (see table under ``Notes``). inf means numpy's + `inf` object. + axis : {int, 2-tuple of ints, None}, optional + If `axis` is an integer, it specifies the axis of `x` along which to + compute the vector norms. If `axis` is a 2-tuple, it specifies the + axes that hold 2-D matrices, and the matrix norms of these matrices + are computed. If `axis` is None then either a vector norm (when `x` + is 1-D) or a matrix norm (when `x` is 2-D) is returned. + + Returns + ------- + n : float or ndarray + + Notes + ----- + Some of the ord are not implemented because some associated functions like, + _multi_svd_norm, are not yet available for sparse matrix. + + This docstring is modified based on numpy.linalg.norm. + https://github.com/numpy/numpy/blob/main/numpy/linalg/linalg.py + + The following norms can be calculated: + + ===== ============================ + ord norm for sparse matrices + ===== ============================ + None Frobenius norm + 'fro' Frobenius norm + inf max(sum(abs(x), axis=1)) + -inf min(sum(abs(x), axis=1)) + 0 abs(x).sum(axis=axis) + 1 max(sum(abs(x), axis=0)) + -1 min(sum(abs(x), axis=0)) + 2 Spectral norm (the largest singular value) + -2 Not implemented + other Not implemented + ===== ============================ + + The Frobenius norm is given by [1]_: + + :math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}` + + References + ---------- + .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*, + Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15 + + Examples + -------- + >>> from scipy.sparse import * + >>> import numpy as np + >>> from scipy.sparse.linalg import norm + >>> a = np.arange(9) - 4 + >>> a + array([-4, -3, -2, -1, 0, 1, 2, 3, 4]) + >>> b = a.reshape((3, 3)) + >>> b + array([[-4, -3, -2], + [-1, 0, 1], + [ 2, 3, 4]]) + + >>> b = csr_matrix(b) + >>> norm(b) + 7.745966692414834 + >>> norm(b, 'fro') + 7.745966692414834 + >>> norm(b, np.inf) + 9 + >>> norm(b, -np.inf) + 2 + >>> norm(b, 1) + 7 + >>> norm(b, -1) + 6 + + The matrix 2-norm or the spectral norm is the largest singular + value, computed approximately and with limitations. + + >>> b = diags([-1, 1], [0, 1], shape=(9, 10)) + >>> norm(b, 2) + 1.9753... + """ + if not issparse(x): + raise TypeError("input is not sparse. use numpy.linalg.norm") + + # Check the default case first and handle it immediately. + if axis is None and ord in (None, 'fro', 'f'): + return _sparse_frobenius_norm(x) + + # Some norms require functions that are not implemented for all types. + x = x.tocsr() + + if axis is None: + axis = (0, 1) + elif not isinstance(axis, tuple): + msg = "'axis' must be None, an integer or a tuple of integers" + try: + int_axis = int(axis) + except TypeError as e: + raise TypeError(msg) from e + if axis != int_axis: + raise TypeError(msg) + axis = (int_axis,) + + nd = 2 + if len(axis) == 2: + row_axis, col_axis = axis + if not (-nd <= row_axis < nd and -nd <= col_axis < nd): + message = f'Invalid axis {axis!r} for an array with shape {x.shape!r}' + raise ValueError(message) + if row_axis % nd == col_axis % nd: + raise ValueError('Duplicate axes given.') + if ord == 2: + # Only solver="lobpcg" supports all numpy dtypes + _, s, _ = svds(x, k=1, solver="lobpcg") + return s[0] + elif ord == -2: + raise NotImplementedError + #return _multi_svd_norm(x, row_axis, col_axis, amin) + elif ord == 1: + return abs(x).sum(axis=row_axis).max(axis=col_axis)[0,0] + elif ord == np.inf: + return abs(x).sum(axis=col_axis).max(axis=row_axis)[0,0] + elif ord == -1: + return abs(x).sum(axis=row_axis).min(axis=col_axis)[0,0] + elif ord == -np.inf: + return abs(x).sum(axis=col_axis).min(axis=row_axis)[0,0] + elif ord in (None, 'f', 'fro'): + # The axis order does not matter for this norm. + return _sparse_frobenius_norm(x) + else: + raise ValueError("Invalid norm order for matrices.") + elif len(axis) == 1: + a, = axis + if not (-nd <= a < nd): + message = f'Invalid axis {axis!r} for an array with shape {x.shape!r}' + raise ValueError(message) + if ord == np.inf: + M = abs(x).max(axis=a) + elif ord == -np.inf: + M = abs(x).min(axis=a) + elif ord == 0: + # Zero norm + M = (x != 0).sum(axis=a) + elif ord == 1: + # special case for speedup + M = abs(x).sum(axis=a) + elif ord in (2, None): + M = sqrt(abs(x).power(2).sum(axis=a)) + else: + try: + ord + 1 + except TypeError as e: + raise ValueError('Invalid norm order for vectors.') from e + M = np.power(abs(x).power(ord).sum(axis=a), 1 / ord) + if hasattr(M, 'toarray'): + return M.toarray().ravel() + elif hasattr(M, 'A'): + return M.A.ravel() + else: + return M.ravel() + else: + raise ValueError("Improper number of dimensions to norm.") diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_onenormest.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_onenormest.py new file mode 100644 index 0000000000000000000000000000000000000000..c3e383aa6370b3ef11b8c2cf57d2cf85da66d02d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_onenormest.py @@ -0,0 +1,467 @@ +"""Sparse block 1-norm estimator. +""" + +import numpy as np +from scipy.sparse.linalg import aslinearoperator + + +__all__ = ['onenormest'] + + +def onenormest(A, t=2, itmax=5, compute_v=False, compute_w=False): + """ + Compute a lower bound of the 1-norm of a sparse matrix. + + Parameters + ---------- + A : ndarray or other linear operator + A linear operator that can be transposed and that can + produce matrix products. + t : int, optional + A positive parameter controlling the tradeoff between + accuracy versus time and memory usage. + Larger values take longer and use more memory + but give more accurate output. + itmax : int, optional + Use at most this many iterations. + compute_v : bool, optional + Request a norm-maximizing linear operator input vector if True. + compute_w : bool, optional + Request a norm-maximizing linear operator output vector if True. + + Returns + ------- + est : float + An underestimate of the 1-norm of the sparse matrix. + v : ndarray, optional + The vector such that ||Av||_1 == est*||v||_1. + It can be thought of as an input to the linear operator + that gives an output with particularly large norm. + w : ndarray, optional + The vector Av which has relatively large 1-norm. + It can be thought of as an output of the linear operator + that is relatively large in norm compared to the input. + + Notes + ----- + This is algorithm 2.4 of [1]. + + In [2] it is described as follows. + "This algorithm typically requires the evaluation of + about 4t matrix-vector products and almost invariably + produces a norm estimate (which is, in fact, a lower + bound on the norm) correct to within a factor 3." + + .. versionadded:: 0.13.0 + + References + ---------- + .. [1] Nicholas J. Higham and Francoise Tisseur (2000), + "A Block Algorithm for Matrix 1-Norm Estimation, + with an Application to 1-Norm Pseudospectra." + SIAM J. Matrix Anal. Appl. Vol. 21, No. 4, pp. 1185-1201. + + .. [2] Awad H. Al-Mohy and Nicholas J. Higham (2009), + "A new scaling and squaring algorithm for the matrix exponential." + SIAM J. Matrix Anal. Appl. Vol. 31, No. 3, pp. 970-989. + + Examples + -------- + >>> import numpy as np + >>> from scipy.sparse import csc_matrix + >>> from scipy.sparse.linalg import onenormest + >>> A = csc_matrix([[1., 0., 0.], [5., 8., 2.], [0., -1., 0.]], dtype=float) + >>> A.toarray() + array([[ 1., 0., 0.], + [ 5., 8., 2.], + [ 0., -1., 0.]]) + >>> onenormest(A) + 9.0 + >>> np.linalg.norm(A.toarray(), ord=1) + 9.0 + """ + + # Check the input. + A = aslinearoperator(A) + if A.shape[0] != A.shape[1]: + raise ValueError('expected the operator to act like a square matrix') + + # If the operator size is small compared to t, + # then it is easier to compute the exact norm. + # Otherwise estimate the norm. + n = A.shape[1] + if t >= n: + A_explicit = np.asarray(aslinearoperator(A).matmat(np.identity(n))) + if A_explicit.shape != (n, n): + raise Exception('internal error: ', + 'unexpected shape ' + str(A_explicit.shape)) + col_abs_sums = abs(A_explicit).sum(axis=0) + if col_abs_sums.shape != (n, ): + raise Exception('internal error: ', + 'unexpected shape ' + str(col_abs_sums.shape)) + argmax_j = np.argmax(col_abs_sums) + v = elementary_vector(n, argmax_j) + w = A_explicit[:, argmax_j] + est = col_abs_sums[argmax_j] + else: + est, v, w, nmults, nresamples = _onenormest_core(A, A.H, t, itmax) + + # Report the norm estimate along with some certificates of the estimate. + if compute_v or compute_w: + result = (est,) + if compute_v: + result += (v,) + if compute_w: + result += (w,) + return result + else: + return est + + +def _blocked_elementwise(func): + """ + Decorator for an elementwise function, to apply it blockwise along + first dimension, to avoid excessive memory usage in temporaries. + """ + block_size = 2**20 + + def wrapper(x): + if x.shape[0] < block_size: + return func(x) + else: + y0 = func(x[:block_size]) + y = np.zeros((x.shape[0],) + y0.shape[1:], dtype=y0.dtype) + y[:block_size] = y0 + del y0 + for j in range(block_size, x.shape[0], block_size): + y[j:j+block_size] = func(x[j:j+block_size]) + return y + return wrapper + + +@_blocked_elementwise +def sign_round_up(X): + """ + This should do the right thing for both real and complex matrices. + + From Higham and Tisseur: + "Everything in this section remains valid for complex matrices + provided that sign(A) is redefined as the matrix (aij / |aij|) + (and sign(0) = 1) transposes are replaced by conjugate transposes." + + """ + Y = X.copy() + Y[Y == 0] = 1 + Y /= np.abs(Y) + return Y + + +@_blocked_elementwise +def _max_abs_axis1(X): + return np.max(np.abs(X), axis=1) + + +def _sum_abs_axis0(X): + block_size = 2**20 + r = None + for j in range(0, X.shape[0], block_size): + y = np.sum(np.abs(X[j:j+block_size]), axis=0) + if r is None: + r = y + else: + r += y + return r + + +def elementary_vector(n, i): + v = np.zeros(n, dtype=float) + v[i] = 1 + return v + + +def vectors_are_parallel(v, w): + # Columns are considered parallel when they are equal or negative. + # Entries are required to be in {-1, 1}, + # which guarantees that the magnitudes of the vectors are identical. + if v.ndim != 1 or v.shape != w.shape: + raise ValueError('expected conformant vectors with entries in {-1,1}') + n = v.shape[0] + return np.dot(v, w) == n + + +def every_col_of_X_is_parallel_to_a_col_of_Y(X, Y): + for v in X.T: + if not any(vectors_are_parallel(v, w) for w in Y.T): + return False + return True + + +def column_needs_resampling(i, X, Y=None): + # column i of X needs resampling if either + # it is parallel to a previous column of X or + # it is parallel to a column of Y + n, t = X.shape + v = X[:, i] + if any(vectors_are_parallel(v, X[:, j]) for j in range(i)): + return True + if Y is not None: + if any(vectors_are_parallel(v, w) for w in Y.T): + return True + return False + + +def resample_column(i, X): + X[:, i] = np.random.randint(0, 2, size=X.shape[0])*2 - 1 + + +def less_than_or_close(a, b): + return np.allclose(a, b) or (a < b) + + +def _algorithm_2_2(A, AT, t): + """ + This is Algorithm 2.2. + + Parameters + ---------- + A : ndarray or other linear operator + A linear operator that can produce matrix products. + AT : ndarray or other linear operator + The transpose of A. + t : int, optional + A positive parameter controlling the tradeoff between + accuracy versus time and memory usage. + + Returns + ------- + g : sequence + A non-negative decreasing vector + such that g[j] is a lower bound for the 1-norm + of the column of A of jth largest 1-norm. + The first entry of this vector is therefore a lower bound + on the 1-norm of the linear operator A. + This sequence has length t. + ind : sequence + The ith entry of ind is the index of the column A whose 1-norm + is given by g[i]. + This sequence of indices has length t, and its entries are + chosen from range(n), possibly with repetition, + where n is the order of the operator A. + + Notes + ----- + This algorithm is mainly for testing. + It uses the 'ind' array in a way that is similar to + its usage in algorithm 2.4. This algorithm 2.2 may be easier to test, + so it gives a chance of uncovering bugs related to indexing + which could have propagated less noticeably to algorithm 2.4. + + """ + A_linear_operator = aslinearoperator(A) + AT_linear_operator = aslinearoperator(AT) + n = A_linear_operator.shape[0] + + # Initialize the X block with columns of unit 1-norm. + X = np.ones((n, t)) + if t > 1: + X[:, 1:] = np.random.randint(0, 2, size=(n, t-1))*2 - 1 + X /= float(n) + + # Iteratively improve the lower bounds. + # Track extra things, to assert invariants for debugging. + g_prev = None + h_prev = None + k = 1 + ind = range(t) + while True: + Y = np.asarray(A_linear_operator.matmat(X)) + g = _sum_abs_axis0(Y) + best_j = np.argmax(g) + g.sort() + g = g[::-1] + S = sign_round_up(Y) + Z = np.asarray(AT_linear_operator.matmat(S)) + h = _max_abs_axis1(Z) + + # If this algorithm runs for fewer than two iterations, + # then its return values do not have the properties indicated + # in the description of the algorithm. + # In particular, the entries of g are not 1-norms of any + # column of A until the second iteration. + # Therefore we will require the algorithm to run for at least + # two iterations, even though this requirement is not stated + # in the description of the algorithm. + if k >= 2: + if less_than_or_close(max(h), np.dot(Z[:, best_j], X[:, best_j])): + break + ind = np.argsort(h)[::-1][:t] + h = h[ind] + for j in range(t): + X[:, j] = elementary_vector(n, ind[j]) + + # Check invariant (2.2). + if k >= 2: + if not less_than_or_close(g_prev[0], h_prev[0]): + raise Exception('invariant (2.2) is violated') + if not less_than_or_close(h_prev[0], g[0]): + raise Exception('invariant (2.2) is violated') + + # Check invariant (2.3). + if k >= 3: + for j in range(t): + if not less_than_or_close(g[j], g_prev[j]): + raise Exception('invariant (2.3) is violated') + + # Update for the next iteration. + g_prev = g + h_prev = h + k += 1 + + # Return the lower bounds and the corresponding column indices. + return g, ind + + +def _onenormest_core(A, AT, t, itmax): + """ + Compute a lower bound of the 1-norm of a sparse matrix. + + Parameters + ---------- + A : ndarray or other linear operator + A linear operator that can produce matrix products. + AT : ndarray or other linear operator + The transpose of A. + t : int, optional + A positive parameter controlling the tradeoff between + accuracy versus time and memory usage. + itmax : int, optional + Use at most this many iterations. + + Returns + ------- + est : float + An underestimate of the 1-norm of the sparse matrix. + v : ndarray, optional + The vector such that ||Av||_1 == est*||v||_1. + It can be thought of as an input to the linear operator + that gives an output with particularly large norm. + w : ndarray, optional + The vector Av which has relatively large 1-norm. + It can be thought of as an output of the linear operator + that is relatively large in norm compared to the input. + nmults : int, optional + The number of matrix products that were computed. + nresamples : int, optional + The number of times a parallel column was observed, + necessitating a re-randomization of the column. + + Notes + ----- + This is algorithm 2.4. + + """ + # This function is a more or less direct translation + # of Algorithm 2.4 from the Higham and Tisseur (2000) paper. + A_linear_operator = aslinearoperator(A) + AT_linear_operator = aslinearoperator(AT) + if itmax < 2: + raise ValueError('at least two iterations are required') + if t < 1: + raise ValueError('at least one column is required') + n = A.shape[0] + if t >= n: + raise ValueError('t should be smaller than the order of A') + # Track the number of big*small matrix multiplications + # and the number of resamplings. + nmults = 0 + nresamples = 0 + # "We now explain our choice of starting matrix. We take the first + # column of X to be the vector of 1s [...] This has the advantage that + # for a matrix with nonnegative elements the algorithm converges + # with an exact estimate on the second iteration, and such matrices + # arise in applications [...]" + X = np.ones((n, t), dtype=float) + # "The remaining columns are chosen as rand{-1,1}, + # with a check for and correction of parallel columns, + # exactly as for S in the body of the algorithm." + if t > 1: + for i in range(1, t): + # These are technically initial samples, not resamples, + # so the resampling count is not incremented. + resample_column(i, X) + for i in range(t): + while column_needs_resampling(i, X): + resample_column(i, X) + nresamples += 1 + # "Choose starting matrix X with columns of unit 1-norm." + X /= float(n) + # "indices of used unit vectors e_j" + ind_hist = np.zeros(0, dtype=np.intp) + est_old = 0 + S = np.zeros((n, t), dtype=float) + k = 1 + ind = None + while True: + Y = np.asarray(A_linear_operator.matmat(X)) + nmults += 1 + mags = _sum_abs_axis0(Y) + est = np.max(mags) + best_j = np.argmax(mags) + if est > est_old or k == 2: + if k >= 2: + ind_best = ind[best_j] + w = Y[:, best_j] + # (1) + if k >= 2 and est <= est_old: + est = est_old + break + est_old = est + S_old = S + if k > itmax: + break + S = sign_round_up(Y) + del Y + # (2) + if every_col_of_X_is_parallel_to_a_col_of_Y(S, S_old): + break + if t > 1: + # "Ensure that no column of S is parallel to another column of S + # or to a column of S_old by replacing columns of S by rand{-1,1}." + for i in range(t): + while column_needs_resampling(i, S, S_old): + resample_column(i, S) + nresamples += 1 + del S_old + # (3) + Z = np.asarray(AT_linear_operator.matmat(S)) + nmults += 1 + h = _max_abs_axis1(Z) + del Z + # (4) + if k >= 2 and max(h) == h[ind_best]: + break + # "Sort h so that h_first >= ... >= h_last + # and re-order ind correspondingly." + # + # Later on, we will need at most t+len(ind_hist) largest + # entries, so drop the rest + ind = np.argsort(h)[::-1][:t+len(ind_hist)].copy() + del h + if t > 1: + # (5) + # Break if the most promising t vectors have been visited already. + if np.isin(ind[:t], ind_hist).all(): + break + # Put the most promising unvisited vectors at the front of the list + # and put the visited vectors at the end of the list. + # Preserve the order of the indices induced by the ordering of h. + seen = np.isin(ind, ind_hist) + ind = np.concatenate((ind[~seen], ind[seen])) + for j in range(t): + X[:, j] = elementary_vector(n, ind[j]) + + new_ind = ind[:t][~np.isin(ind[:t], ind_hist)] + ind_hist = np.concatenate((ind_hist, new_ind)) + k += 1 + v = elementary_vector(n, ind_best) + return est, v, w, nmults, nresamples diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_svdp.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_svdp.py new file mode 100644 index 0000000000000000000000000000000000000000..9b85d6c7eefe59c7049f42e0c6ff00331085afa0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_svdp.py @@ -0,0 +1,315 @@ +""" +Python wrapper for PROPACK +-------------------------- + +PROPACK is a collection of Fortran routines for iterative computation +of partial SVDs of large matrices or linear operators. + +Based on BSD licensed pypropack project: + http://github.com/jakevdp/pypropack + Author: Jake Vanderplas + +PROPACK source is BSD licensed, and available at + http://soi.stanford.edu/~rmunk/PROPACK/ +""" + +__all__ = ['_svdp'] + +import numpy as np + +from scipy._lib._util import check_random_state +from scipy.sparse.linalg import aslinearoperator +from scipy.linalg import LinAlgError + +from ._propack import _spropack # type: ignore[attr-defined] +from ._propack import _dpropack # type: ignore[attr-defined] +from ._propack import _cpropack # type: ignore[attr-defined] +from ._propack import _zpropack # type: ignore[attr-defined] + + +_lansvd_dict = { + 'f': _spropack.slansvd, + 'd': _dpropack.dlansvd, + 'F': _cpropack.clansvd, + 'D': _zpropack.zlansvd, +} + + +_lansvd_irl_dict = { + 'f': _spropack.slansvd_irl, + 'd': _dpropack.dlansvd_irl, + 'F': _cpropack.clansvd_irl, + 'D': _zpropack.zlansvd_irl, +} + +_which_converter = { + 'LM': 'L', + 'SM': 'S', +} + + +class _AProd: + """ + Wrapper class for linear operator + + The call signature of the __call__ method matches the callback of + the PROPACK routines. + """ + def __init__(self, A): + try: + self.A = aslinearoperator(A) + except TypeError: + self.A = aslinearoperator(np.asarray(A)) + + def __call__(self, transa, m, n, x, y, sparm, iparm): + if transa == 'n': + y[:] = self.A.matvec(x) + else: + y[:] = self.A.rmatvec(x) + + @property + def shape(self): + return self.A.shape + + @property + def dtype(self): + try: + return self.A.dtype + except AttributeError: + return self.A.matvec(np.zeros(self.A.shape[1])).dtype + + +def _svdp(A, k, which='LM', irl_mode=True, kmax=None, + compute_u=True, compute_v=True, v0=None, full_output=False, tol=0, + delta=None, eta=None, anorm=0, cgs=False, elr=True, + min_relgap=0.002, shifts=None, maxiter=None, random_state=None): + """ + Compute the singular value decomposition of a linear operator using PROPACK + + Parameters + ---------- + A : array_like, sparse matrix, or LinearOperator + Operator for which SVD will be computed. If `A` is a LinearOperator + object, it must define both ``matvec`` and ``rmatvec`` methods. + k : int + Number of singular values/vectors to compute + which : {"LM", "SM"} + Which singular triplets to compute: + - 'LM': compute triplets corresponding to the `k` largest singular + values + - 'SM': compute triplets corresponding to the `k` smallest singular + values + `which='SM'` requires `irl_mode=True`. Computes largest singular + values by default. + irl_mode : bool, optional + If `True`, then compute SVD using IRL (implicitly restarted Lanczos) + mode. Default is `True`. + kmax : int, optional + Maximal number of iterations / maximal dimension of the Krylov + subspace. Default is ``10 * k``. + compute_u : bool, optional + If `True` (default) then compute left singular vectors, `u`. + compute_v : bool, optional + If `True` (default) then compute right singular vectors, `v`. + tol : float, optional + The desired relative accuracy for computed singular values. + If not specified, it will be set based on machine precision. + v0 : array_like, optional + Starting vector for iterations: must be of length ``A.shape[0]``. + If not specified, PROPACK will generate a starting vector. + full_output : bool, optional + If `True`, then return sigma_bound. Default is `False`. + delta : float, optional + Level of orthogonality to maintain between Lanczos vectors. + Default is set based on machine precision. + eta : float, optional + Orthogonality cutoff. During reorthogonalization, vectors with + component larger than `eta` along the Lanczos vector will be purged. + Default is set based on machine precision. + anorm : float, optional + Estimate of ``||A||``. Default is `0`. + cgs : bool, optional + If `True`, reorthogonalization is done using classical Gram-Schmidt. + If `False` (default), it is done using modified Gram-Schmidt. + elr : bool, optional + If `True` (default), then extended local orthogonality is enforced + when obtaining singular vectors. + min_relgap : float, optional + The smallest relative gap allowed between any shift in IRL mode. + Default is `0.001`. Accessed only if ``irl_mode=True``. + shifts : int, optional + Number of shifts per restart in IRL mode. Default is determined + to satisfy ``k <= min(kmax-shifts, m, n)``. Must be + >= 0, but choosing 0 might lead to performance degradation. + Accessed only if ``irl_mode=True``. + maxiter : int, optional + Maximum number of restarts in IRL mode. Default is `1000`. + Accessed only if ``irl_mode=True``. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + Pseudorandom number generator state used to generate resamples. + + If `random_state` is ``None`` (or `np.random`), the + `numpy.random.RandomState` singleton is used. + If `random_state` is an int, a new ``RandomState`` instance is used, + seeded with `random_state`. + If `random_state` is already a ``Generator`` or ``RandomState`` + instance then that instance is used. + + Returns + ------- + u : ndarray + The `k` largest (``which="LM"``) or smallest (``which="SM"``) left + singular vectors, ``shape == (A.shape[0], 3)``, returned only if + ``compute_u=True``. + sigma : ndarray + The top `k` singular values, ``shape == (k,)`` + vt : ndarray + The `k` largest (``which="LM"``) or smallest (``which="SM"``) right + singular vectors, ``shape == (3, A.shape[1])``, returned only if + ``compute_v=True``. + sigma_bound : ndarray + the error bounds on the singular values sigma, returned only if + ``full_output=True``. + + """ + random_state = check_random_state(random_state) + + which = which.upper() + if which not in {'LM', 'SM'}: + raise ValueError("`which` must be either 'LM' or 'SM'") + if not irl_mode and which == 'SM': + raise ValueError("`which`='SM' requires irl_mode=True") + + aprod = _AProd(A) + typ = aprod.dtype.char + + try: + lansvd_irl = _lansvd_irl_dict[typ] + lansvd = _lansvd_dict[typ] + except KeyError: + # work with non-supported types using native system precision + if np.iscomplexobj(np.empty(0, dtype=typ)): + typ = np.dtype(complex).char + else: + typ = np.dtype(float).char + lansvd_irl = _lansvd_irl_dict[typ] + lansvd = _lansvd_dict[typ] + + m, n = aprod.shape + if (k < 1) or (k > min(m, n)): + raise ValueError("k must be positive and not greater than m or n") + + if kmax is None: + kmax = 10*k + if maxiter is None: + maxiter = 1000 + + # guard against unnecessarily large kmax + kmax = min(m + 1, n + 1, kmax) + if kmax < k: + raise ValueError( + "kmax must be greater than or equal to k, " + f"but kmax ({kmax}) < k ({k})") + + # convert python args to fortran args + jobu = 'y' if compute_u else 'n' + jobv = 'y' if compute_v else 'n' + + # these will be the output arrays + u = np.zeros((m, kmax + 1), order='F', dtype=typ) + v = np.zeros((n, kmax), order='F', dtype=typ) + + # Specify the starting vector. if v0 is all zero, PROPACK will generate + # a random starting vector: the random seed cannot be controlled in that + # case, so we'll instead use numpy to generate a random vector + if v0 is None: + u[:, 0] = random_state.uniform(size=m) + if np.iscomplexobj(np.empty(0, dtype=typ)): # complex type + u[:, 0] += 1j * random_state.uniform(size=m) + else: + try: + u[:, 0] = v0 + except ValueError: + raise ValueError(f"v0 must be of length {m}") + + # process options for the fit + if delta is None: + delta = np.sqrt(np.finfo(typ).eps) + if eta is None: + eta = np.finfo(typ).eps ** 0.75 + + if irl_mode: + doption = np.array((delta, eta, anorm, min_relgap), dtype=typ.lower()) + + # validate or find default shifts + if shifts is None: + shifts = kmax - k + if k > min(kmax - shifts, m, n): + raise ValueError('shifts must satisfy ' + 'k <= min(kmax-shifts, m, n)!') + elif shifts < 0: + raise ValueError('shifts must be >= 0!') + + else: + doption = np.array((delta, eta, anorm), dtype=typ.lower()) + + ioption = np.array((int(bool(cgs)), int(bool(elr))), dtype='i') + + # If computing `u` or `v` (left and right singular vectors, + # respectively), `blocksize` controls how large a fraction of the + # work is done via fast BLAS level 3 operations. A larger blocksize + # may lead to faster computation at the expense of greater memory + # consumption. `blocksize` must be ``>= 1``. Choosing blocksize + # of 16, but docs don't specify; it's almost surely a + # power of 2. + blocksize = 16 + + # Determine lwork & liwork: + # the required lengths are specified in the PROPACK documentation + if compute_u or compute_v: + lwork = m + n + 9*kmax + 5*kmax*kmax + 4 + max( + 3*kmax*kmax + 4*kmax + 4, + blocksize*max(m, n)) + liwork = 8*kmax + else: + lwork = m + n + 9*kmax + 2*kmax*kmax + 4 + max(m + n, 4*kmax + 4) + liwork = 2*kmax + 1 + work = np.empty(lwork, dtype=typ.lower()) + iwork = np.empty(liwork, dtype=np.int32) + + # dummy arguments: these are passed to aprod, and not used in this wrapper + dparm = np.empty(1, dtype=typ.lower()) + iparm = np.empty(1, dtype=np.int32) + + if typ.isupper(): + # PROPACK documentation is unclear on the required length of zwork. + # Use the same length Julia's wrapper uses + # see https://github.com/JuliaSmoothOptimizers/PROPACK.jl/ + zwork = np.empty(m + n + 32*m, dtype=typ) + works = work, zwork, iwork + else: + works = work, iwork + + if irl_mode: + u, sigma, bnd, v, info = lansvd_irl(_which_converter[which], jobu, + jobv, m, n, shifts, k, maxiter, + aprod, u, v, tol, *works, doption, + ioption, dparm, iparm) + else: + u, sigma, bnd, v, info = lansvd(jobu, jobv, m, n, k, aprod, u, v, tol, + *works, doption, ioption, dparm, iparm) + + if info > 0: + raise LinAlgError( + f"An invariant subspace of dimension {info} was found.") + elif info < 0: + raise LinAlgError( + f"k={k} singular triplets did not converge within " + f"kmax={kmax} iterations") + + # info == 0: The K largest (or smallest) singular triplets were computed + # successfully! + + return u[:, :k], sigma, v[:, :k].conj().T, bnd diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/interface.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/interface.py new file mode 100644 index 0000000000000000000000000000000000000000..9f4c635911066ef50cd7601ea23cd2a862543dfd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/interface.py @@ -0,0 +1,22 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.sparse.linalg` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'LinearOperator', 'aslinearoperator', + 'isshape', 'isintlike', 'asmatrix', + 'is_pydata_spmatrix', 'MatrixLinearOperator', 'IdentityOperator' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="sparse.linalg", module="interface", + private_modules=["_interface"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/isolve.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/isolve.py new file mode 100644 index 0000000000000000000000000000000000000000..61e5655caf4b7701ddbad814560b98a58e5992f4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/isolve.py @@ -0,0 +1,22 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.sparse.linalg` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'bicg', 'bicgstab', 'cg', 'cgs', 'gcrotmk', 'gmres', + 'lgmres', 'lsmr', 'lsqr', + 'minres', 'qmr', 'tfqmr', 'utils', 'iterative', 'test' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="sparse.linalg", module="isolve", + private_modules=["_isolve"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/matfuncs.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/matfuncs.py new file mode 100644 index 0000000000000000000000000000000000000000..3535a83aaf9854daa23827b314d93551f00ebe36 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/matfuncs.py @@ -0,0 +1,22 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.sparse.linalg` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'expm', 'inv', 'solve', 'solve_triangular', + 'spsolve', 'is_pydata_spmatrix', 'LinearOperator', + 'UPPER_TRIANGULAR', 'MatrixPowerOperator', 'ProductOperator' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="sparse.linalg", module="matfuncs", + private_modules=["_matfuncs"], all=__all__, + attribute=name)