diff --git a/.gitattributes b/.gitattributes index 69478f42d814cdc77df00567d19da836370d192c..9c360263ef2f7543f94524c98b0ebb7b35a9d6df 100644 --- a/.gitattributes +++ b/.gitattributes @@ -156,3 +156,4 @@ env-llmeval/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcheckpoint.so env-llmeval/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcupti.so.12 filter=lfs diff=lfs merge=lfs -text env-llmeval/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_host.so filter=lfs diff=lfs merge=lfs -text env-llmeval/lib/python3.10/site-packages/tokenizers/tokenizers.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +llmeval-env/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so filter=lfs diff=lfs merge=lfs -text diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/data/bug-1310.npz b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/data/bug-1310.npz new file mode 100644 index 0000000000000000000000000000000000000000..8bddf805c36b29dc449556c27a2b489691f841af --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/data/bug-1310.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d6803c0b398f2704c236f1d1b9e8e5ede06bd165a0abb0f228281abbd455ae9 +size 2648 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/data/estimate_gradients_hang.npy b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/data/estimate_gradients_hang.npy new file mode 100644 index 0000000000000000000000000000000000000000..c5ef8f63f263a476823ddeacf2571551c2fe4690 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/data/estimate_gradients_hang.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:406c10857417ff5ea98d8cd28945c9d0e4f5c24f92a48ad0e8fab955bf2477f1 +size 35680 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/data/gcvspl.npz b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/data/gcvspl.npz new file mode 100644 index 0000000000000000000000000000000000000000..50e9348dcca79eae861e67092add93cdb8ff1ca3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/data/gcvspl.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03ce8155a6cba0c1bf0a2441a10c228191f916dec36cb820723429811296bba8 +size 3138 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98917f621b2934f45d92d945663960a85c7d83ed Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_dia.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_dia.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12b960c0872bd049189919a79fa940ab660a82ed Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_dia.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_sputils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_sputils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b3da11a8c293813d85f8f2c7daf65ec86ab2014 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_sputils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/base.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a38fbf4cc0ee5e34a4961dab89db490f405737d4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/base.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/bsr.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/bsr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40a31e42cd10a3206367ff969e90dd7b5241cddf Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/bsr.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/csr.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/csr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d854f683d36c1dfc3c4edc52db2ab42d636e2b6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/csr.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/extract.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/extract.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1fe4aab518a3aa7e0cef41e8d354b33e6d86441c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/extract.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/sputils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/sputils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b35719b208519ff5cf5292f3eb329db87b1d26d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/sputils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_connected_components.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_connected_components.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d982bcde5e193a3a302bc0a836436e88bc95cab2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_connected_components.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_conversions.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_conversions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58f86269750b1793513fe016c8331dde3973a59b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_conversions.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_flow.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_flow.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9cee7dfbaf71766da9b07f89b7ad79c70ae075f3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_flow.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_graph_laplacian.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_graph_laplacian.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9cec3d454cc050d869ff9077cef174a27377f2f0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_graph_laplacian.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_matching.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_matching.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9478d8d3264a5874aa23ec5f6e45f43965077fb0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_matching.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_pydata_sparse.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_pydata_sparse.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e96ac17e05e8b4f9520863e15131a34955696f7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_pydata_sparse.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_reordering.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_reordering.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d8326045abd536908725f8793f74cf45c24f7ed5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_reordering.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_shortest_path.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_shortest_path.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9cb5fc43d7e95617c8d3e3ec6c05c8115a9161a1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_shortest_path.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_traversal.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_traversal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01dbe49e5ac75ed99d008bdfc8ccc7f6c8dbe7b7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_traversal.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__init__.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0d20b194dcbac5c2f48947d37e6b233edc2baf2b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__init__.py @@ -0,0 +1,146 @@ +""" +Sparse linear algebra (:mod:`scipy.sparse.linalg`) +================================================== + +.. currentmodule:: scipy.sparse.linalg + +Abstract linear operators +------------------------- + +.. autosummary:: + :toctree: generated/ + + LinearOperator -- abstract representation of a linear operator + aslinearoperator -- convert an object to an abstract linear operator + +Matrix Operations +----------------- + +.. autosummary:: + :toctree: generated/ + + inv -- compute the sparse matrix inverse + expm -- compute the sparse matrix exponential + expm_multiply -- compute the product of a matrix exponential and a matrix + matrix_power -- compute the matrix power by raising a matrix to an exponent + +Matrix norms +------------ + +.. autosummary:: + :toctree: generated/ + + norm -- Norm of a sparse matrix + onenormest -- Estimate the 1-norm of a sparse matrix + +Solving linear problems +----------------------- + +Direct methods for linear equation systems: + +.. autosummary:: + :toctree: generated/ + + spsolve -- Solve the sparse linear system Ax=b + spsolve_triangular -- Solve sparse linear system Ax=b for a triangular A. + factorized -- Pre-factorize matrix to a function solving a linear system + MatrixRankWarning -- Warning on exactly singular matrices + use_solver -- Select direct solver to use + +Iterative methods for linear equation systems: + +.. autosummary:: + :toctree: generated/ + + bicg -- Use BIConjugate Gradient iteration to solve Ax = b + bicgstab -- Use BIConjugate Gradient STABilized iteration to solve Ax = b + cg -- Use Conjugate Gradient iteration to solve Ax = b + cgs -- Use Conjugate Gradient Squared iteration to solve Ax = b + gmres -- Use Generalized Minimal RESidual iteration to solve Ax = b + lgmres -- Solve a matrix equation using the LGMRES algorithm + minres -- Use MINimum RESidual iteration to solve Ax = b + qmr -- Use Quasi-Minimal Residual iteration to solve Ax = b + gcrotmk -- Solve a matrix equation using the GCROT(m,k) algorithm + tfqmr -- Use Transpose-Free Quasi-Minimal Residual iteration to solve Ax = b + +Iterative methods for least-squares problems: + +.. autosummary:: + :toctree: generated/ + + lsqr -- Find the least-squares solution to a sparse linear equation system + lsmr -- Find the least-squares solution to a sparse linear equation system + +Matrix factorizations +--------------------- + +Eigenvalue problems: + +.. autosummary:: + :toctree: generated/ + + eigs -- Find k eigenvalues and eigenvectors of the square matrix A + eigsh -- Find k eigenvalues and eigenvectors of a symmetric matrix + lobpcg -- Solve symmetric partial eigenproblems with optional preconditioning + +Singular values problems: + +.. autosummary:: + :toctree: generated/ + + svds -- Compute k singular values/vectors for a sparse matrix + +The `svds` function supports the following solvers: + +.. toctree:: + + sparse.linalg.svds-arpack + sparse.linalg.svds-lobpcg + sparse.linalg.svds-propack + +Complete or incomplete LU factorizations + +.. autosummary:: + :toctree: generated/ + + splu -- Compute a LU decomposition for a sparse matrix + spilu -- Compute an incomplete LU decomposition for a sparse matrix + SuperLU -- Object representing an LU factorization + +Sparse arrays with structure +---------------------------- + +.. autosummary:: + :toctree: generated/ + + LaplacianNd -- Laplacian on a uniform rectangular grid in ``N`` dimensions + +Exceptions +---------- + +.. autosummary:: + :toctree: generated/ + + ArpackNoConvergence + ArpackError + +""" + +from ._isolve import * +from ._dsolve import * +from ._interface import * +from ._eigen import * +from ._matfuncs import * +from ._onenormest import * +from ._norm import * +from ._expm_multiply import * +from ._special_sparse_arrays import * + +# Deprecated namespaces, to be removed in v2.0.0 +from . import isolve, dsolve, interface, eigen, matfuncs + +__all__ = [s for s in dir() if not s.startswith('_')] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a5cf1ecbca194613886811bbc5bc2a1cf47b6db Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_expm_multiply.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_expm_multiply.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35af79bc8862f2cacf2a4e5875850508ed1f43e0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_expm_multiply.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_interface.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_interface.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f06997d451cd687df9ae5468786e8faadd4acf20 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_interface.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_matfuncs.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_matfuncs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ceb53bc3c348514f436e457546ee35699876c4e1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_matfuncs.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_norm.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_norm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..86f3c511f51c9013cf99d117cb16eafdf15a8183 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_norm.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_special_sparse_arrays.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_special_sparse_arrays.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67f813300277af52eb0c8f3a6efa50f936100883 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_special_sparse_arrays.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_svdp.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_svdp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3bcc418f01fabdfc30218a71ee30e327885636f5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_svdp.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/dsolve.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/dsolve.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d764fca51a29c172357ba9109c1c546a7814c13b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/dsolve.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/eigen.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/eigen.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..226fe22e4a37748e3e0220a94e2a2f7356488bd5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/eigen.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/interface.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/interface.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ed77009aacb2e3a6c38525c176fc4f1a6e2d485 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/interface.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/isolve.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/isolve.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e74ed99d8090f29d7e61bc77a987b4488623e95 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/isolve.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/matfuncs.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/matfuncs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea9df44d1c043a9f3b8a50740ebdac4350a499d7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/matfuncs.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__init__.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ca1a0f435498acbeb4dc2fbdadf0baac86efcaa1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__init__.py @@ -0,0 +1,71 @@ +""" +Linear Solvers +============== + +The default solver is SuperLU (included in the scipy distribution), +which can solve real or complex linear systems in both single and +double precisions. It is automatically replaced by UMFPACK, if +available. Note that UMFPACK works in double precision only, so +switch it off by:: + + >>> from scipy.sparse.linalg import spsolve, use_solver + >>> use_solver(useUmfpack=False) + +to solve in the single precision. See also use_solver documentation. + +Example session:: + + >>> from scipy.sparse import csc_matrix, spdiags + >>> from numpy import array + >>> + >>> print("Inverting a sparse linear system:") + >>> print("The sparse matrix (constructed from diagonals):") + >>> a = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5) + >>> b = array([1, 2, 3, 4, 5]) + >>> print("Solve: single precision complex:") + >>> use_solver( useUmfpack = False ) + >>> a = a.astype('F') + >>> x = spsolve(a, b) + >>> print(x) + >>> print("Error: ", a@x-b) + >>> + >>> print("Solve: double precision complex:") + >>> use_solver( useUmfpack = True ) + >>> a = a.astype('D') + >>> x = spsolve(a, b) + >>> print(x) + >>> print("Error: ", a@x-b) + >>> + >>> print("Solve: double precision:") + >>> a = a.astype('d') + >>> x = spsolve(a, b) + >>> print(x) + >>> print("Error: ", a@x-b) + >>> + >>> print("Solve: single precision:") + >>> use_solver( useUmfpack = False ) + >>> a = a.astype('f') + >>> x = spsolve(a, b.astype('f')) + >>> print(x) + >>> print("Error: ", a@x-b) + +""" + +#import umfpack +#__doc__ = '\n\n'.join( (__doc__, umfpack.__doc__) ) +#del umfpack + +from .linsolve import * +from ._superlu import SuperLU +from . import _add_newdocs +from . import linsolve + +__all__ = [ + 'MatrixRankWarning', 'SuperLU', 'factorized', + 'spilu', 'splu', 'spsolve', + 'spsolve_triangular', 'use_solver' +] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..900cd8a8ef03ddcb5561087c070871e5cee1b4a8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/_add_newdocs.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/_add_newdocs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54878e8b46169940313b2ca13ee07e58dbbf990f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/_add_newdocs.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/_add_newdocs.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/_add_newdocs.py new file mode 100644 index 0000000000000000000000000000000000000000..e7f7b1d16fd33b4beb5620e0a931a4c800d11cd4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/_add_newdocs.py @@ -0,0 +1,153 @@ +from numpy.lib import add_newdoc + +add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', + """ + LU factorization of a sparse matrix. + + Factorization is represented as:: + + Pr @ A @ Pc = L @ U + + To construct these `SuperLU` objects, call the `splu` and `spilu` + functions. + + Attributes + ---------- + shape + nnz + perm_c + perm_r + L + U + + Methods + ------- + solve + + Notes + ----- + + .. versionadded:: 0.14.0 + + Examples + -------- + The LU decomposition can be used to solve matrix equations. Consider: + + >>> import numpy as np + >>> from scipy.sparse import csc_matrix + >>> from scipy.sparse.linalg import splu + >>> A = csc_matrix([[1,2,0,4], [1,0,0,1], [1,0,2,1], [2,2,1,0.]]) + + This can be solved for a given right-hand side: + + >>> lu = splu(A) + >>> b = np.array([1, 2, 3, 4]) + >>> x = lu.solve(b) + >>> A.dot(x) + array([ 1., 2., 3., 4.]) + + The ``lu`` object also contains an explicit representation of the + decomposition. The permutations are represented as mappings of + indices: + + >>> lu.perm_r + array([2, 1, 3, 0], dtype=int32) # may vary + >>> lu.perm_c + array([0, 1, 3, 2], dtype=int32) # may vary + + The L and U factors are sparse matrices in CSC format: + + >>> lu.L.toarray() + array([[ 1. , 0. , 0. , 0. ], # may vary + [ 0.5, 1. , 0. , 0. ], + [ 0.5, -1. , 1. , 0. ], + [ 0.5, 1. , 0. , 1. ]]) + >>> lu.U.toarray() + array([[ 2. , 2. , 0. , 1. ], # may vary + [ 0. , -1. , 1. , -0.5], + [ 0. , 0. , 5. , -1. ], + [ 0. , 0. , 0. , 2. ]]) + + The permutation matrices can be constructed: + + >>> Pr = csc_matrix((np.ones(4), (lu.perm_r, np.arange(4)))) + >>> Pc = csc_matrix((np.ones(4), (np.arange(4), lu.perm_c))) + + We can reassemble the original matrix: + + >>> (Pr.T @ (lu.L @ lu.U) @ Pc.T).toarray() + array([[ 1., 2., 0., 4.], + [ 1., 0., 0., 1.], + [ 1., 0., 2., 1.], + [ 2., 2., 1., 0.]]) + """) + +add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('solve', + """ + solve(rhs[, trans]) + + Solves linear system of equations with one or several right-hand sides. + + Parameters + ---------- + rhs : ndarray, shape (n,) or (n, k) + Right hand side(s) of equation + trans : {'N', 'T', 'H'}, optional + Type of system to solve:: + + 'N': A @ x == rhs (default) + 'T': A^T @ x == rhs + 'H': A^H @ x == rhs + + i.e., normal, transposed, and hermitian conjugate. + + Returns + ------- + x : ndarray, shape ``rhs.shape`` + Solution vector(s) + """)) + +add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('L', + """ + Lower triangular factor with unit diagonal as a + `scipy.sparse.csc_matrix`. + + .. versionadded:: 0.14.0 + """)) + +add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('U', + """ + Upper triangular factor as a `scipy.sparse.csc_matrix`. + + .. versionadded:: 0.14.0 + """)) + +add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('shape', + """ + Shape of the original matrix as a tuple of ints. + """)) + +add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('nnz', + """ + Number of nonzero elements in the matrix. + """)) + +add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('perm_c', + """ + Permutation Pc represented as an array of indices. + + The column permutation matrix can be reconstructed via: + + >>> Pc = np.zeros((n, n)) + >>> Pc[np.arange(n), perm_c] = 1 + """)) + +add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('perm_r', + """ + Permutation Pr represented as an array of indices. + + The row permutation matrix can be reconstructed via: + + >>> Pr = np.zeros((n, n)) + >>> Pr[perm_r, np.arange(n)] = 1 + """)) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/_superlu.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/_superlu.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..96a77687bf5e9eb05d5e2d58b19a9cce3acd4234 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/_superlu.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/linsolve.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/linsolve.py new file mode 100644 index 0000000000000000000000000000000000000000..e37721c76c133ee3ddbd44a6df03dfd99ee41e06 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/linsolve.py @@ -0,0 +1,746 @@ +from warnings import warn + +import numpy as np +from numpy import asarray +from scipy.sparse import (issparse, + SparseEfficiencyWarning, csc_matrix, csr_matrix) +from scipy.sparse._sputils import is_pydata_spmatrix, convert_pydata_sparse_to_scipy +from scipy.linalg import LinAlgError +import copy + +from . import _superlu + +noScikit = False +try: + import scikits.umfpack as umfpack +except ImportError: + noScikit = True + +useUmfpack = not noScikit + +__all__ = ['use_solver', 'spsolve', 'splu', 'spilu', 'factorized', + 'MatrixRankWarning', 'spsolve_triangular'] + + +class MatrixRankWarning(UserWarning): + pass + + +def use_solver(**kwargs): + """ + Select default sparse direct solver to be used. + + Parameters + ---------- + useUmfpack : bool, optional + Use UMFPACK [1]_, [2]_, [3]_, [4]_. over SuperLU. Has effect only + if ``scikits.umfpack`` is installed. Default: True + assumeSortedIndices : bool, optional + Allow UMFPACK to skip the step of sorting indices for a CSR/CSC matrix. + Has effect only if useUmfpack is True and ``scikits.umfpack`` is + installed. Default: False + + Notes + ----- + The default sparse solver is UMFPACK when available + (``scikits.umfpack`` is installed). This can be changed by passing + useUmfpack = False, which then causes the always present SuperLU + based solver to be used. + + UMFPACK requires a CSR/CSC matrix to have sorted column/row indices. If + sure that the matrix fulfills this, pass ``assumeSortedIndices=True`` + to gain some speed. + + References + ---------- + .. [1] T. A. Davis, Algorithm 832: UMFPACK - an unsymmetric-pattern + multifrontal method with a column pre-ordering strategy, ACM + Trans. on Mathematical Software, 30(2), 2004, pp. 196--199. + https://dl.acm.org/doi/abs/10.1145/992200.992206 + + .. [2] T. A. Davis, A column pre-ordering strategy for the + unsymmetric-pattern multifrontal method, ACM Trans. + on Mathematical Software, 30(2), 2004, pp. 165--195. + https://dl.acm.org/doi/abs/10.1145/992200.992205 + + .. [3] T. A. Davis and I. S. Duff, A combined unifrontal/multifrontal + method for unsymmetric sparse matrices, ACM Trans. on + Mathematical Software, 25(1), 1999, pp. 1--19. + https://doi.org/10.1145/305658.287640 + + .. [4] T. A. Davis and I. S. Duff, An unsymmetric-pattern multifrontal + method for sparse LU factorization, SIAM J. Matrix Analysis and + Computations, 18(1), 1997, pp. 140--158. + https://doi.org/10.1137/S0895479894246905T. + + Examples + -------- + >>> import numpy as np + >>> from scipy.sparse.linalg import use_solver, spsolve + >>> from scipy.sparse import csc_matrix + >>> R = np.random.randn(5, 5) + >>> A = csc_matrix(R) + >>> b = np.random.randn(5) + >>> use_solver(useUmfpack=False) # enforce superLU over UMFPACK + >>> x = spsolve(A, b) + >>> np.allclose(A.dot(x), b) + True + >>> use_solver(useUmfpack=True) # reset umfPack usage to default + """ + if 'useUmfpack' in kwargs: + globals()['useUmfpack'] = kwargs['useUmfpack'] + if useUmfpack and 'assumeSortedIndices' in kwargs: + umfpack.configure(assumeSortedIndices=kwargs['assumeSortedIndices']) + +def _get_umf_family(A): + """Get umfpack family string given the sparse matrix dtype.""" + _families = { + (np.float64, np.int32): 'di', + (np.complex128, np.int32): 'zi', + (np.float64, np.int64): 'dl', + (np.complex128, np.int64): 'zl' + } + + # A.dtype.name can only be "float64" or + # "complex128" in control flow + f_type = getattr(np, A.dtype.name) + # control flow may allow for more index + # types to get through here + i_type = getattr(np, A.indices.dtype.name) + + try: + family = _families[(f_type, i_type)] + + except KeyError as e: + msg = ('only float64 or complex128 matrices with int32 or int64 ' + f'indices are supported! (got: matrix: {f_type}, indices: {i_type})') + raise ValueError(msg) from e + + # See gh-8278. Considered converting only if + # A.shape[0]*A.shape[1] > np.iinfo(np.int32).max, + # but that didn't always fix the issue. + family = family[0] + "l" + A_new = copy.copy(A) + A_new.indptr = np.asarray(A.indptr, dtype=np.int64) + A_new.indices = np.asarray(A.indices, dtype=np.int64) + + return family, A_new + +def _safe_downcast_indices(A): + # check for safe downcasting + max_value = np.iinfo(np.intc).max + + if A.indptr[-1] > max_value: # indptr[-1] is max b/c indptr always sorted + raise ValueError("indptr values too large for SuperLU") + + if max(*A.shape) > max_value: # only check large enough arrays + if np.any(A.indices > max_value): + raise ValueError("indices values too large for SuperLU") + + indices = A.indices.astype(np.intc, copy=False) + indptr = A.indptr.astype(np.intc, copy=False) + return indices, indptr + +def spsolve(A, b, permc_spec=None, use_umfpack=True): + """Solve the sparse linear system Ax=b, where b may be a vector or a matrix. + + Parameters + ---------- + A : ndarray or sparse matrix + The square matrix A will be converted into CSC or CSR form + b : ndarray or sparse matrix + The matrix or vector representing the right hand side of the equation. + If a vector, b.shape must be (n,) or (n, 1). + permc_spec : str, optional + How to permute the columns of the matrix for sparsity preservation. + (default: 'COLAMD') + + - ``NATURAL``: natural ordering. + - ``MMD_ATA``: minimum degree ordering on the structure of A^T A. + - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A. + - ``COLAMD``: approximate minimum degree column ordering [1]_, [2]_. + + use_umfpack : bool, optional + if True (default) then use UMFPACK for the solution [3]_, [4]_, [5]_, + [6]_ . This is only referenced if b is a vector and + ``scikits.umfpack`` is installed. + + Returns + ------- + x : ndarray or sparse matrix + the solution of the sparse linear equation. + If b is a vector, then x is a vector of size A.shape[1] + If b is a matrix, then x is a matrix of size (A.shape[1], b.shape[1]) + + Notes + ----- + For solving the matrix expression AX = B, this solver assumes the resulting + matrix X is sparse, as is often the case for very sparse inputs. If the + resulting X is dense, the construction of this sparse result will be + relatively expensive. In that case, consider converting A to a dense + matrix and using scipy.linalg.solve or its variants. + + References + ---------- + .. [1] T. A. Davis, J. R. Gilbert, S. Larimore, E. Ng, Algorithm 836: + COLAMD, an approximate column minimum degree ordering algorithm, + ACM Trans. on Mathematical Software, 30(3), 2004, pp. 377--380. + :doi:`10.1145/1024074.1024080` + + .. [2] T. A. Davis, J. R. Gilbert, S. Larimore, E. Ng, A column approximate + minimum degree ordering algorithm, ACM Trans. on Mathematical + Software, 30(3), 2004, pp. 353--376. :doi:`10.1145/1024074.1024079` + + .. [3] T. A. Davis, Algorithm 832: UMFPACK - an unsymmetric-pattern + multifrontal method with a column pre-ordering strategy, ACM + Trans. on Mathematical Software, 30(2), 2004, pp. 196--199. + https://dl.acm.org/doi/abs/10.1145/992200.992206 + + .. [4] T. A. Davis, A column pre-ordering strategy for the + unsymmetric-pattern multifrontal method, ACM Trans. + on Mathematical Software, 30(2), 2004, pp. 165--195. + https://dl.acm.org/doi/abs/10.1145/992200.992205 + + .. [5] T. A. Davis and I. S. Duff, A combined unifrontal/multifrontal + method for unsymmetric sparse matrices, ACM Trans. on + Mathematical Software, 25(1), 1999, pp. 1--19. + https://doi.org/10.1145/305658.287640 + + .. [6] T. A. Davis and I. S. Duff, An unsymmetric-pattern multifrontal + method for sparse LU factorization, SIAM J. Matrix Analysis and + Computations, 18(1), 1997, pp. 140--158. + https://doi.org/10.1137/S0895479894246905T. + + + Examples + -------- + >>> import numpy as np + >>> from scipy.sparse import csc_matrix + >>> from scipy.sparse.linalg import spsolve + >>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float) + >>> B = csc_matrix([[2, 0], [-1, 0], [2, 0]], dtype=float) + >>> x = spsolve(A, B) + >>> np.allclose(A.dot(x).toarray(), B.toarray()) + True + """ + is_pydata_sparse = is_pydata_spmatrix(b) + pydata_sparse_cls = b.__class__ if is_pydata_sparse else None + A = convert_pydata_sparse_to_scipy(A) + b = convert_pydata_sparse_to_scipy(b) + + if not (issparse(A) and A.format in ("csc", "csr")): + A = csc_matrix(A) + warn('spsolve requires A be CSC or CSR matrix format', + SparseEfficiencyWarning, stacklevel=2) + + # b is a vector only if b have shape (n,) or (n, 1) + b_is_sparse = issparse(b) + if not b_is_sparse: + b = asarray(b) + b_is_vector = ((b.ndim == 1) or (b.ndim == 2 and b.shape[1] == 1)) + + # sum duplicates for non-canonical format + A.sum_duplicates() + A = A._asfptype() # upcast to a floating point format + result_dtype = np.promote_types(A.dtype, b.dtype) + if A.dtype != result_dtype: + A = A.astype(result_dtype) + if b.dtype != result_dtype: + b = b.astype(result_dtype) + + # validate input shapes + M, N = A.shape + if (M != N): + raise ValueError(f"matrix must be square (has shape {(M, N)})") + + if M != b.shape[0]: + raise ValueError(f"matrix - rhs dimension mismatch ({A.shape} - {b.shape[0]})") + + use_umfpack = use_umfpack and useUmfpack + + if b_is_vector and use_umfpack: + if b_is_sparse: + b_vec = b.toarray() + else: + b_vec = b + b_vec = asarray(b_vec, dtype=A.dtype).ravel() + + if noScikit: + raise RuntimeError('Scikits.umfpack not installed.') + + if A.dtype.char not in 'dD': + raise ValueError("convert matrix data to double, please, using" + " .astype(), or set linsolve.useUmfpack = False") + + umf_family, A = _get_umf_family(A) + umf = umfpack.UmfpackContext(umf_family) + x = umf.linsolve(umfpack.UMFPACK_A, A, b_vec, + autoTranspose=True) + else: + if b_is_vector and b_is_sparse: + b = b.toarray() + b_is_sparse = False + + if not b_is_sparse: + if A.format == "csc": + flag = 1 # CSC format + else: + flag = 0 # CSR format + + indices = A.indices.astype(np.intc, copy=False) + indptr = A.indptr.astype(np.intc, copy=False) + options = dict(ColPerm=permc_spec) + x, info = _superlu.gssv(N, A.nnz, A.data, indices, indptr, + b, flag, options=options) + if info != 0: + warn("Matrix is exactly singular", MatrixRankWarning, stacklevel=2) + x.fill(np.nan) + if b_is_vector: + x = x.ravel() + else: + # b is sparse + Afactsolve = factorized(A) + + if not (b.format == "csc" or is_pydata_spmatrix(b)): + warn('spsolve is more efficient when sparse b ' + 'is in the CSC matrix format', + SparseEfficiencyWarning, stacklevel=2) + b = csc_matrix(b) + + # Create a sparse output matrix by repeatedly applying + # the sparse factorization to solve columns of b. + data_segs = [] + row_segs = [] + col_segs = [] + for j in range(b.shape[1]): + # TODO: replace this with + # bj = b[:, j].toarray().ravel() + # once 1D sparse arrays are supported. + # That is a slightly faster code path. + bj = b[:, [j]].toarray().ravel() + xj = Afactsolve(bj) + w = np.flatnonzero(xj) + segment_length = w.shape[0] + row_segs.append(w) + col_segs.append(np.full(segment_length, j, dtype=int)) + data_segs.append(np.asarray(xj[w], dtype=A.dtype)) + sparse_data = np.concatenate(data_segs) + sparse_row = np.concatenate(row_segs) + sparse_col = np.concatenate(col_segs) + x = A.__class__((sparse_data, (sparse_row, sparse_col)), + shape=b.shape, dtype=A.dtype) + + if is_pydata_sparse: + x = pydata_sparse_cls.from_scipy_sparse(x) + + return x + + +def splu(A, permc_spec=None, diag_pivot_thresh=None, + relax=None, panel_size=None, options=dict()): + """ + Compute the LU decomposition of a sparse, square matrix. + + Parameters + ---------- + A : sparse matrix + Sparse matrix to factorize. Most efficient when provided in CSC + format. Other formats will be converted to CSC before factorization. + permc_spec : str, optional + How to permute the columns of the matrix for sparsity preservation. + (default: 'COLAMD') + + - ``NATURAL``: natural ordering. + - ``MMD_ATA``: minimum degree ordering on the structure of A^T A. + - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A. + - ``COLAMD``: approximate minimum degree column ordering + + diag_pivot_thresh : float, optional + Threshold used for a diagonal entry to be an acceptable pivot. + See SuperLU user's guide for details [1]_ + relax : int, optional + Expert option for customizing the degree of relaxing supernodes. + See SuperLU user's guide for details [1]_ + panel_size : int, optional + Expert option for customizing the panel size. + See SuperLU user's guide for details [1]_ + options : dict, optional + Dictionary containing additional expert options to SuperLU. + See SuperLU user guide [1]_ (section 2.4 on the 'Options' argument) + for more details. For example, you can specify + ``options=dict(Equil=False, IterRefine='SINGLE'))`` + to turn equilibration off and perform a single iterative refinement. + + Returns + ------- + invA : scipy.sparse.linalg.SuperLU + Object, which has a ``solve`` method. + + See also + -------- + spilu : incomplete LU decomposition + + Notes + ----- + This function uses the SuperLU library. + + References + ---------- + .. [1] SuperLU https://portal.nersc.gov/project/sparse/superlu/ + + Examples + -------- + >>> import numpy as np + >>> from scipy.sparse import csc_matrix + >>> from scipy.sparse.linalg import splu + >>> A = csc_matrix([[1., 0., 0.], [5., 0., 2.], [0., -1., 0.]], dtype=float) + >>> B = splu(A) + >>> x = np.array([1., 2., 3.], dtype=float) + >>> B.solve(x) + array([ 1. , -3. , -1.5]) + >>> A.dot(B.solve(x)) + array([ 1., 2., 3.]) + >>> B.solve(A.dot(x)) + array([ 1., 2., 3.]) + """ + + if is_pydata_spmatrix(A): + def csc_construct_func(*a, cls=type(A)): + return cls.from_scipy_sparse(csc_matrix(*a)) + A = A.to_scipy_sparse().tocsc() + else: + csc_construct_func = csc_matrix + + if not (issparse(A) and A.format == "csc"): + A = csc_matrix(A) + warn('splu converted its input to CSC format', + SparseEfficiencyWarning, stacklevel=2) + + # sum duplicates for non-canonical format + A.sum_duplicates() + A = A._asfptype() # upcast to a floating point format + + M, N = A.shape + if (M != N): + raise ValueError("can only factor square matrices") # is this true? + + indices, indptr = _safe_downcast_indices(A) + + _options = dict(DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec, + PanelSize=panel_size, Relax=relax) + if options is not None: + _options.update(options) + + # Ensure that no column permutations are applied + if (_options["ColPerm"] == "NATURAL"): + _options["SymmetricMode"] = True + + return _superlu.gstrf(N, A.nnz, A.data, indices, indptr, + csc_construct_func=csc_construct_func, + ilu=False, options=_options) + + +def spilu(A, drop_tol=None, fill_factor=None, drop_rule=None, permc_spec=None, + diag_pivot_thresh=None, relax=None, panel_size=None, options=None): + """ + Compute an incomplete LU decomposition for a sparse, square matrix. + + The resulting object is an approximation to the inverse of `A`. + + Parameters + ---------- + A : (N, N) array_like + Sparse matrix to factorize. Most efficient when provided in CSC format. + Other formats will be converted to CSC before factorization. + drop_tol : float, optional + Drop tolerance (0 <= tol <= 1) for an incomplete LU decomposition. + (default: 1e-4) + fill_factor : float, optional + Specifies the fill ratio upper bound (>= 1.0) for ILU. (default: 10) + drop_rule : str, optional + Comma-separated string of drop rules to use. + Available rules: ``basic``, ``prows``, ``column``, ``area``, + ``secondary``, ``dynamic``, ``interp``. (Default: ``basic,area``) + + See SuperLU documentation for details. + + Remaining other options + Same as for `splu` + + Returns + ------- + invA_approx : scipy.sparse.linalg.SuperLU + Object, which has a ``solve`` method. + + See also + -------- + splu : complete LU decomposition + + Notes + ----- + To improve the better approximation to the inverse, you may need to + increase `fill_factor` AND decrease `drop_tol`. + + This function uses the SuperLU library. + + Examples + -------- + >>> import numpy as np + >>> from scipy.sparse import csc_matrix + >>> from scipy.sparse.linalg import spilu + >>> A = csc_matrix([[1., 0., 0.], [5., 0., 2.], [0., -1., 0.]], dtype=float) + >>> B = spilu(A) + >>> x = np.array([1., 2., 3.], dtype=float) + >>> B.solve(x) + array([ 1. , -3. , -1.5]) + >>> A.dot(B.solve(x)) + array([ 1., 2., 3.]) + >>> B.solve(A.dot(x)) + array([ 1., 2., 3.]) + """ + + if is_pydata_spmatrix(A): + def csc_construct_func(*a, cls=type(A)): + return cls.from_scipy_sparse(csc_matrix(*a)) + A = A.to_scipy_sparse().tocsc() + else: + csc_construct_func = csc_matrix + + if not (issparse(A) and A.format == "csc"): + A = csc_matrix(A) + warn('spilu converted its input to CSC format', + SparseEfficiencyWarning, stacklevel=2) + + # sum duplicates for non-canonical format + A.sum_duplicates() + A = A._asfptype() # upcast to a floating point format + + M, N = A.shape + if (M != N): + raise ValueError("can only factor square matrices") # is this true? + + indices, indptr = _safe_downcast_indices(A) + + _options = dict(ILU_DropRule=drop_rule, ILU_DropTol=drop_tol, + ILU_FillFactor=fill_factor, + DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec, + PanelSize=panel_size, Relax=relax) + if options is not None: + _options.update(options) + + # Ensure that no column permutations are applied + if (_options["ColPerm"] == "NATURAL"): + _options["SymmetricMode"] = True + + return _superlu.gstrf(N, A.nnz, A.data, indices, indptr, + csc_construct_func=csc_construct_func, + ilu=True, options=_options) + + +def factorized(A): + """ + Return a function for solving a sparse linear system, with A pre-factorized. + + Parameters + ---------- + A : (N, N) array_like + Input. A in CSC format is most efficient. A CSR format matrix will + be converted to CSC before factorization. + + Returns + ------- + solve : callable + To solve the linear system of equations given in `A`, the `solve` + callable should be passed an ndarray of shape (N,). + + Examples + -------- + >>> import numpy as np + >>> from scipy.sparse.linalg import factorized + >>> from scipy.sparse import csc_matrix + >>> A = np.array([[ 3. , 2. , -1. ], + ... [ 2. , -2. , 4. ], + ... [-1. , 0.5, -1. ]]) + >>> solve = factorized(csc_matrix(A)) # Makes LU decomposition. + >>> rhs1 = np.array([1, -2, 0]) + >>> solve(rhs1) # Uses the LU factors. + array([ 1., -2., -2.]) + + """ + if is_pydata_spmatrix(A): + A = A.to_scipy_sparse().tocsc() + + if useUmfpack: + if noScikit: + raise RuntimeError('Scikits.umfpack not installed.') + + if not (issparse(A) and A.format == "csc"): + A = csc_matrix(A) + warn('splu converted its input to CSC format', + SparseEfficiencyWarning, stacklevel=2) + + A = A._asfptype() # upcast to a floating point format + + if A.dtype.char not in 'dD': + raise ValueError("convert matrix data to double, please, using" + " .astype(), or set linsolve.useUmfpack = False") + + umf_family, A = _get_umf_family(A) + umf = umfpack.UmfpackContext(umf_family) + + # Make LU decomposition. + umf.numeric(A) + + def solve(b): + with np.errstate(divide="ignore", invalid="ignore"): + # Ignoring warnings with numpy >= 1.23.0, see gh-16523 + result = umf.solve(umfpack.UMFPACK_A, A, b, autoTranspose=True) + + return result + + return solve + else: + return splu(A).solve + + +def spsolve_triangular(A, b, lower=True, overwrite_A=False, overwrite_b=False, + unit_diagonal=False): + """ + Solve the equation ``A x = b`` for `x`, assuming A is a triangular matrix. + + Parameters + ---------- + A : (M, M) sparse matrix + A sparse square triangular matrix. Should be in CSR format. + b : (M,) or (M, N) array_like + Right-hand side matrix in ``A x = b`` + lower : bool, optional + Whether `A` is a lower or upper triangular matrix. + Default is lower triangular matrix. + overwrite_A : bool, optional + Allow changing `A`. The indices of `A` are going to be sorted and zero + entries are going to be removed. + Enabling gives a performance gain. Default is False. + overwrite_b : bool, optional + Allow overwriting data in `b`. + Enabling gives a performance gain. Default is False. + If `overwrite_b` is True, it should be ensured that + `b` has an appropriate dtype to be able to store the result. + unit_diagonal : bool, optional + If True, diagonal elements of `a` are assumed to be 1 and will not be + referenced. + + .. versionadded:: 1.4.0 + + Returns + ------- + x : (M,) or (M, N) ndarray + Solution to the system ``A x = b``. Shape of return matches shape + of `b`. + + Raises + ------ + LinAlgError + If `A` is singular or not triangular. + ValueError + If shape of `A` or shape of `b` do not match the requirements. + + Notes + ----- + .. versionadded:: 0.19.0 + + Examples + -------- + >>> import numpy as np + >>> from scipy.sparse import csr_matrix + >>> from scipy.sparse.linalg import spsolve_triangular + >>> A = csr_matrix([[3, 0, 0], [1, -1, 0], [2, 0, 1]], dtype=float) + >>> B = np.array([[2, 0], [-1, 0], [2, 0]], dtype=float) + >>> x = spsolve_triangular(A, B) + >>> np.allclose(A.dot(x), B) + True + """ + + if is_pydata_spmatrix(A): + A = A.to_scipy_sparse().tocsr() + + # Check the input for correct type and format. + if not (issparse(A) and A.format == "csr"): + warn('CSR matrix format is required. Converting to CSR matrix.', + SparseEfficiencyWarning, stacklevel=2) + A = csr_matrix(A) + elif not overwrite_A: + A = A.copy() + + if A.shape[0] != A.shape[1]: + raise ValueError( + f'A must be a square matrix but its shape is {A.shape}.') + + # sum duplicates for non-canonical format + A.sum_duplicates() + + b = np.asanyarray(b) + + if b.ndim not in [1, 2]: + raise ValueError( + f'b must have 1 or 2 dims but its shape is {b.shape}.') + if A.shape[0] != b.shape[0]: + raise ValueError( + 'The size of the dimensions of A must be equal to ' + 'the size of the first dimension of b but the shape of A is ' + f'{A.shape} and the shape of b is {b.shape}.' + ) + + # Init x as (a copy of) b. + x_dtype = np.result_type(A.data, b, np.float64) + if overwrite_b: + if np.can_cast(b.dtype, x_dtype, casting='same_kind'): + x = b + else: + raise ValueError( + f'Cannot overwrite b (dtype {b.dtype}) with result ' + f'of type {x_dtype}.' + ) + else: + x = b.astype(x_dtype, copy=True) + + # Choose forward or backward order. + if lower: + row_indices = range(len(b)) + else: + row_indices = range(len(b) - 1, -1, -1) + + # Fill x iteratively. + for i in row_indices: + + # Get indices for i-th row. + indptr_start = A.indptr[i] + indptr_stop = A.indptr[i + 1] + + if lower: + A_diagonal_index_row_i = indptr_stop - 1 + A_off_diagonal_indices_row_i = slice(indptr_start, indptr_stop - 1) + else: + A_diagonal_index_row_i = indptr_start + A_off_diagonal_indices_row_i = slice(indptr_start + 1, indptr_stop) + + # Check regularity and triangularity of A. + if not unit_diagonal and (indptr_stop <= indptr_start + or A.indices[A_diagonal_index_row_i] < i): + raise LinAlgError( + f'A is singular: diagonal {i} is zero.') + if not unit_diagonal and A.indices[A_diagonal_index_row_i] > i: + raise LinAlgError( + 'A is not triangular: A[{}, {}] is nonzero.' + ''.format(i, A.indices[A_diagonal_index_row_i])) + + # Incorporate off-diagonal entries. + A_column_indices_in_row_i = A.indices[A_off_diagonal_indices_row_i] + A_values_in_row_i = A.data[A_off_diagonal_indices_row_i] + x[i] -= np.dot(x[A_column_indices_in_row_i].T, A_values_in_row_i) + + # Compute i-th entry of x. + if not unit_diagonal: + x[i] /= A.data[A_diagonal_index_row_i] + + return x diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6d2a90add9acb0a0352f4932768df310cd9258d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/tests/__pycache__/test_linsolve.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/tests/__pycache__/test_linsolve.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84c25e77546774d068144a16382deb62cf79c816 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/tests/__pycache__/test_linsolve.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/tests/test_linsolve.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/tests/test_linsolve.py new file mode 100644 index 0000000000000000000000000000000000000000..f1684b562ff20812a4280e3bdbac2f56086c5b1d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/tests/test_linsolve.py @@ -0,0 +1,805 @@ +import sys +import threading + +import numpy as np +from numpy import array, finfo, arange, eye, all, unique, ones, dot +import numpy.random as random +from numpy.testing import ( + assert_array_almost_equal, assert_almost_equal, + assert_equal, assert_array_equal, assert_, assert_allclose, + assert_warns, suppress_warnings) +import pytest +from pytest import raises as assert_raises + +import scipy.linalg +from scipy.linalg import norm, inv +from scipy.sparse import (spdiags, SparseEfficiencyWarning, csc_matrix, + csr_matrix, identity, issparse, dok_matrix, lil_matrix, bsr_matrix) +from scipy.sparse.linalg import SuperLU +from scipy.sparse.linalg._dsolve import (spsolve, use_solver, splu, spilu, + MatrixRankWarning, _superlu, spsolve_triangular, factorized) +import scipy.sparse + +from scipy._lib._testutils import check_free_memory +from scipy._lib._util import ComplexWarning + + +sup_sparse_efficiency = suppress_warnings() +sup_sparse_efficiency.filter(SparseEfficiencyWarning) + +# scikits.umfpack is not a SciPy dependency but it is optionally used in +# dsolve, so check whether it's available +try: + import scikits.umfpack as umfpack + has_umfpack = True +except ImportError: + has_umfpack = False + +def toarray(a): + if issparse(a): + return a.toarray() + else: + return a + + +def setup_bug_8278(): + N = 2 ** 6 + h = 1/N + Ah1D = scipy.sparse.diags([-1, 2, -1], [-1, 0, 1], + shape=(N-1, N-1))/(h**2) + eyeN = scipy.sparse.eye(N - 1) + A = (scipy.sparse.kron(eyeN, scipy.sparse.kron(eyeN, Ah1D)) + + scipy.sparse.kron(eyeN, scipy.sparse.kron(Ah1D, eyeN)) + + scipy.sparse.kron(Ah1D, scipy.sparse.kron(eyeN, eyeN))) + b = np.random.rand((N-1)**3) + return A, b + + +class TestFactorized: + def setup_method(self): + n = 5 + d = arange(n) + 1 + self.n = n + self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n).tocsc() + random.seed(1234) + + def _check_singular(self): + A = csc_matrix((5,5), dtype='d') + b = ones(5) + assert_array_almost_equal(0. * b, factorized(A)(b)) + + def _check_non_singular(self): + # Make a diagonal dominant, to make sure it is not singular + n = 5 + a = csc_matrix(random.rand(n, n)) + b = ones(n) + + expected = splu(a).solve(b) + assert_array_almost_equal(factorized(a)(b), expected) + + def test_singular_without_umfpack(self): + use_solver(useUmfpack=False) + with assert_raises(RuntimeError, match="Factor is exactly singular"): + self._check_singular() + + @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") + def test_singular_with_umfpack(self): + use_solver(useUmfpack=True) + with suppress_warnings() as sup: + sup.filter(RuntimeWarning, "divide by zero encountered in double_scalars") + assert_warns(umfpack.UmfpackWarning, self._check_singular) + + def test_non_singular_without_umfpack(self): + use_solver(useUmfpack=False) + self._check_non_singular() + + @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") + def test_non_singular_with_umfpack(self): + use_solver(useUmfpack=True) + self._check_non_singular() + + def test_cannot_factorize_nonsquare_matrix_without_umfpack(self): + use_solver(useUmfpack=False) + msg = "can only factor square matrices" + with assert_raises(ValueError, match=msg): + factorized(self.A[:, :4]) + + @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") + def test_factorizes_nonsquare_matrix_with_umfpack(self): + use_solver(useUmfpack=True) + # does not raise + factorized(self.A[:,:4]) + + def test_call_with_incorrectly_sized_matrix_without_umfpack(self): + use_solver(useUmfpack=False) + solve = factorized(self.A) + b = random.rand(4) + B = random.rand(4, 3) + BB = random.rand(self.n, 3, 9) + + with assert_raises(ValueError, match="is of incompatible size"): + solve(b) + with assert_raises(ValueError, match="is of incompatible size"): + solve(B) + with assert_raises(ValueError, + match="object too deep for desired array"): + solve(BB) + + @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") + def test_call_with_incorrectly_sized_matrix_with_umfpack(self): + use_solver(useUmfpack=True) + solve = factorized(self.A) + b = random.rand(4) + B = random.rand(4, 3) + BB = random.rand(self.n, 3, 9) + + # does not raise + solve(b) + msg = "object too deep for desired array" + with assert_raises(ValueError, match=msg): + solve(B) + with assert_raises(ValueError, match=msg): + solve(BB) + + def test_call_with_cast_to_complex_without_umfpack(self): + use_solver(useUmfpack=False) + solve = factorized(self.A) + b = random.rand(4) + for t in [np.complex64, np.complex128]: + with assert_raises(TypeError, match="Cannot cast array data"): + solve(b.astype(t)) + + @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") + def test_call_with_cast_to_complex_with_umfpack(self): + use_solver(useUmfpack=True) + solve = factorized(self.A) + b = random.rand(4) + for t in [np.complex64, np.complex128]: + assert_warns(ComplexWarning, solve, b.astype(t)) + + @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") + def test_assume_sorted_indices_flag(self): + # a sparse matrix with unsorted indices + unsorted_inds = np.array([2, 0, 1, 0]) + data = np.array([10, 16, 5, 0.4]) + indptr = np.array([0, 1, 2, 4]) + A = csc_matrix((data, unsorted_inds, indptr), (3, 3)) + b = ones(3) + + # should raise when incorrectly assuming indices are sorted + use_solver(useUmfpack=True, assumeSortedIndices=True) + with assert_raises(RuntimeError, + match="UMFPACK_ERROR_invalid_matrix"): + factorized(A) + + # should sort indices and succeed when not assuming indices are sorted + use_solver(useUmfpack=True, assumeSortedIndices=False) + expected = splu(A.copy()).solve(b) + + assert_equal(A.has_sorted_indices, 0) + assert_array_almost_equal(factorized(A)(b), expected) + + @pytest.mark.slow + @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") + def test_bug_8278(self): + check_free_memory(8000) + use_solver(useUmfpack=True) + A, b = setup_bug_8278() + A = A.tocsc() + f = factorized(A) + x = f(b) + assert_array_almost_equal(A @ x, b) + + +class TestLinsolve: + def setup_method(self): + use_solver(useUmfpack=False) + + def test_singular(self): + A = csc_matrix((5,5), dtype='d') + b = array([1, 2, 3, 4, 5],dtype='d') + with suppress_warnings() as sup: + sup.filter(MatrixRankWarning, "Matrix is exactly singular") + x = spsolve(A, b) + assert_(not np.isfinite(x).any()) + + def test_singular_gh_3312(self): + # "Bad" test case that leads SuperLU to call LAPACK with invalid + # arguments. Check that it fails moderately gracefully. + ij = np.array([(17, 0), (17, 6), (17, 12), (10, 13)], dtype=np.int32) + v = np.array([0.284213, 0.94933781, 0.15767017, 0.38797296]) + A = csc_matrix((v, ij.T), shape=(20, 20)) + b = np.arange(20) + + try: + # should either raise a runtime error or return value + # appropriate for singular input (which yields the warning) + with suppress_warnings() as sup: + sup.filter(MatrixRankWarning, "Matrix is exactly singular") + x = spsolve(A, b) + assert not np.isfinite(x).any() + except RuntimeError: + pass + + @pytest.mark.parametrize('format', ['csc', 'csr']) + @pytest.mark.parametrize('idx_dtype', [np.int32, np.int64]) + def test_twodiags(self, format: str, idx_dtype: np.dtype): + A = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5, + format=format) + b = array([1, 2, 3, 4, 5]) + + # condition number of A + cond_A = norm(A.toarray(), 2) * norm(inv(A.toarray()), 2) + + for t in ['f','d','F','D']: + eps = finfo(t).eps # floating point epsilon + b = b.astype(t) + Asp = A.astype(t) + Asp.indices = Asp.indices.astype(idx_dtype, copy=False) + Asp.indptr = Asp.indptr.astype(idx_dtype, copy=False) + + x = spsolve(Asp, b) + assert_(norm(b - Asp@x) < 10 * cond_A * eps) + + def test_bvector_smoketest(self): + Adense = array([[0., 1., 1.], + [1., 0., 1.], + [0., 0., 1.]]) + As = csc_matrix(Adense) + random.seed(1234) + x = random.randn(3) + b = As@x + x2 = spsolve(As, b) + + assert_array_almost_equal(x, x2) + + def test_bmatrix_smoketest(self): + Adense = array([[0., 1., 1.], + [1., 0., 1.], + [0., 0., 1.]]) + As = csc_matrix(Adense) + random.seed(1234) + x = random.randn(3, 4) + Bdense = As.dot(x) + Bs = csc_matrix(Bdense) + x2 = spsolve(As, Bs) + assert_array_almost_equal(x, x2.toarray()) + + @sup_sparse_efficiency + def test_non_square(self): + # A is not square. + A = ones((3, 4)) + b = ones((4, 1)) + assert_raises(ValueError, spsolve, A, b) + # A2 and b2 have incompatible shapes. + A2 = csc_matrix(eye(3)) + b2 = array([1.0, 2.0]) + assert_raises(ValueError, spsolve, A2, b2) + + @sup_sparse_efficiency + def test_example_comparison(self): + row = array([0,0,1,2,2,2]) + col = array([0,2,2,0,1,2]) + data = array([1,2,3,-4,5,6]) + sM = csr_matrix((data,(row,col)), shape=(3,3), dtype=float) + M = sM.toarray() + + row = array([0,0,1,1,0,0]) + col = array([0,2,1,1,0,0]) + data = array([1,1,1,1,1,1]) + sN = csr_matrix((data, (row,col)), shape=(3,3), dtype=float) + N = sN.toarray() + + sX = spsolve(sM, sN) + X = scipy.linalg.solve(M, N) + + assert_array_almost_equal(X, sX.toarray()) + + @sup_sparse_efficiency + @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") + def test_shape_compatibility(self): + use_solver(useUmfpack=True) + A = csc_matrix([[1., 0], [0, 2]]) + bs = [ + [1, 6], + array([1, 6]), + [[1], [6]], + array([[1], [6]]), + csc_matrix([[1], [6]]), + csr_matrix([[1], [6]]), + dok_matrix([[1], [6]]), + bsr_matrix([[1], [6]]), + array([[1., 2., 3.], [6., 8., 10.]]), + csc_matrix([[1., 2., 3.], [6., 8., 10.]]), + csr_matrix([[1., 2., 3.], [6., 8., 10.]]), + dok_matrix([[1., 2., 3.], [6., 8., 10.]]), + bsr_matrix([[1., 2., 3.], [6., 8., 10.]]), + ] + + for b in bs: + x = np.linalg.solve(A.toarray(), toarray(b)) + for spmattype in [csc_matrix, csr_matrix, dok_matrix, lil_matrix]: + x1 = spsolve(spmattype(A), b, use_umfpack=True) + x2 = spsolve(spmattype(A), b, use_umfpack=False) + + # check solution + if x.ndim == 2 and x.shape[1] == 1: + # interprets also these as "vectors" + x = x.ravel() + + assert_array_almost_equal(toarray(x1), x, + err_msg=repr((b, spmattype, 1))) + assert_array_almost_equal(toarray(x2), x, + err_msg=repr((b, spmattype, 2))) + + # dense vs. sparse output ("vectors" are always dense) + if issparse(b) and x.ndim > 1: + assert_(issparse(x1), repr((b, spmattype, 1))) + assert_(issparse(x2), repr((b, spmattype, 2))) + else: + assert_(isinstance(x1, np.ndarray), repr((b, spmattype, 1))) + assert_(isinstance(x2, np.ndarray), repr((b, spmattype, 2))) + + # check output shape + if x.ndim == 1: + # "vector" + assert_equal(x1.shape, (A.shape[1],)) + assert_equal(x2.shape, (A.shape[1],)) + else: + # "matrix" + assert_equal(x1.shape, x.shape) + assert_equal(x2.shape, x.shape) + + A = csc_matrix((3, 3)) + b = csc_matrix((1, 3)) + assert_raises(ValueError, spsolve, A, b) + + @sup_sparse_efficiency + def test_ndarray_support(self): + A = array([[1., 2.], [2., 0.]]) + x = array([[1., 1.], [0.5, -0.5]]) + b = array([[2., 0.], [2., 2.]]) + + assert_array_almost_equal(x, spsolve(A, b)) + + def test_gssv_badinput(self): + N = 10 + d = arange(N) + 1.0 + A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), N, N) + + for spmatrix in (csc_matrix, csr_matrix): + A = spmatrix(A) + b = np.arange(N) + + def not_c_contig(x): + return x.repeat(2)[::2] + + def not_1dim(x): + return x[:,None] + + def bad_type(x): + return x.astype(bool) + + def too_short(x): + return x[:-1] + + badops = [not_c_contig, not_1dim, bad_type, too_short] + + for badop in badops: + msg = f"{spmatrix!r} {badop!r}" + # Not C-contiguous + assert_raises((ValueError, TypeError), _superlu.gssv, + N, A.nnz, badop(A.data), A.indices, A.indptr, + b, int(spmatrix == csc_matrix), err_msg=msg) + assert_raises((ValueError, TypeError), _superlu.gssv, + N, A.nnz, A.data, badop(A.indices), A.indptr, + b, int(spmatrix == csc_matrix), err_msg=msg) + assert_raises((ValueError, TypeError), _superlu.gssv, + N, A.nnz, A.data, A.indices, badop(A.indptr), + b, int(spmatrix == csc_matrix), err_msg=msg) + + def test_sparsity_preservation(self): + ident = csc_matrix([ + [1, 0, 0], + [0, 1, 0], + [0, 0, 1]]) + b = csc_matrix([ + [0, 1], + [1, 0], + [0, 0]]) + x = spsolve(ident, b) + assert_equal(ident.nnz, 3) + assert_equal(b.nnz, 2) + assert_equal(x.nnz, 2) + assert_allclose(x.A, b.A, atol=1e-12, rtol=1e-12) + + def test_dtype_cast(self): + A_real = scipy.sparse.csr_matrix([[1, 2, 0], + [0, 0, 3], + [4, 0, 5]]) + A_complex = scipy.sparse.csr_matrix([[1, 2, 0], + [0, 0, 3], + [4, 0, 5 + 1j]]) + b_real = np.array([1,1,1]) + b_complex = np.array([1,1,1]) + 1j*np.array([1,1,1]) + x = spsolve(A_real, b_real) + assert_(np.issubdtype(x.dtype, np.floating)) + x = spsolve(A_real, b_complex) + assert_(np.issubdtype(x.dtype, np.complexfloating)) + x = spsolve(A_complex, b_real) + assert_(np.issubdtype(x.dtype, np.complexfloating)) + x = spsolve(A_complex, b_complex) + assert_(np.issubdtype(x.dtype, np.complexfloating)) + + @pytest.mark.slow + @pytest.mark.skipif(not has_umfpack, reason="umfpack not available") + def test_bug_8278(self): + check_free_memory(8000) + use_solver(useUmfpack=True) + A, b = setup_bug_8278() + x = spsolve(A, b) + assert_array_almost_equal(A @ x, b) + + +class TestSplu: + def setup_method(self): + use_solver(useUmfpack=False) + n = 40 + d = arange(n) + 1 + self.n = n + self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n, format='csc') + random.seed(1234) + + def _smoketest(self, spxlu, check, dtype, idx_dtype): + if np.issubdtype(dtype, np.complexfloating): + A = self.A + 1j*self.A.T + else: + A = self.A + + A = A.astype(dtype) + A.indices = A.indices.astype(idx_dtype, copy=False) + A.indptr = A.indptr.astype(idx_dtype, copy=False) + lu = spxlu(A) + + rng = random.RandomState(1234) + + # Input shapes + for k in [None, 1, 2, self.n, self.n+2]: + msg = f"k={k!r}" + + if k is None: + b = rng.rand(self.n) + else: + b = rng.rand(self.n, k) + + if np.issubdtype(dtype, np.complexfloating): + b = b + 1j*rng.rand(*b.shape) + b = b.astype(dtype) + + x = lu.solve(b) + check(A, b, x, msg) + + x = lu.solve(b, 'T') + check(A.T, b, x, msg) + + x = lu.solve(b, 'H') + check(A.T.conj(), b, x, msg) + + @sup_sparse_efficiency + def test_splu_smoketest(self): + self._internal_test_splu_smoketest() + + def _internal_test_splu_smoketest(self): + # Check that splu works at all + def check(A, b, x, msg=""): + eps = np.finfo(A.dtype).eps + r = A @ x + assert_(abs(r - b).max() < 1e3*eps, msg) + + for dtype in [np.float32, np.float64, np.complex64, np.complex128]: + for idx_dtype in [np.int32, np.int64]: + self._smoketest(splu, check, dtype, idx_dtype) + + @sup_sparse_efficiency + def test_spilu_smoketest(self): + self._internal_test_spilu_smoketest() + + def _internal_test_spilu_smoketest(self): + errors = [] + + def check(A, b, x, msg=""): + r = A @ x + err = abs(r - b).max() + assert_(err < 1e-2, msg) + if b.dtype in (np.float64, np.complex128): + errors.append(err) + + for dtype in [np.float32, np.float64, np.complex64, np.complex128]: + for idx_dtype in [np.int32, np.int64]: + self._smoketest(spilu, check, dtype, idx_dtype) + + assert_(max(errors) > 1e-5) + + @sup_sparse_efficiency + def test_spilu_drop_rule(self): + # Test passing in the drop_rule argument to spilu. + A = identity(2) + + rules = [ + b'basic,area'.decode('ascii'), # unicode + b'basic,area', # ascii + [b'basic', b'area'.decode('ascii')] + ] + for rule in rules: + # Argument should be accepted + assert_(isinstance(spilu(A, drop_rule=rule), SuperLU)) + + def test_splu_nnz0(self): + A = csc_matrix((5,5), dtype='d') + assert_raises(RuntimeError, splu, A) + + def test_spilu_nnz0(self): + A = csc_matrix((5,5), dtype='d') + assert_raises(RuntimeError, spilu, A) + + def test_splu_basic(self): + # Test basic splu functionality. + n = 30 + rng = random.RandomState(12) + a = rng.rand(n, n) + a[a < 0.95] = 0 + # First test with a singular matrix + a[:, 0] = 0 + a_ = csc_matrix(a) + # Matrix is exactly singular + assert_raises(RuntimeError, splu, a_) + + # Make a diagonal dominant, to make sure it is not singular + a += 4*eye(n) + a_ = csc_matrix(a) + lu = splu(a_) + b = ones(n) + x = lu.solve(b) + assert_almost_equal(dot(a, x), b) + + def test_splu_perm(self): + # Test the permutation vectors exposed by splu. + n = 30 + a = random.random((n, n)) + a[a < 0.95] = 0 + # Make a diagonal dominant, to make sure it is not singular + a += 4*eye(n) + a_ = csc_matrix(a) + lu = splu(a_) + # Check that the permutation indices do belong to [0, n-1]. + for perm in (lu.perm_r, lu.perm_c): + assert_(all(perm > -1)) + assert_(all(perm < n)) + assert_equal(len(unique(perm)), len(perm)) + + # Now make a symmetric, and test that the two permutation vectors are + # the same + # Note: a += a.T relies on undefined behavior. + a = a + a.T + a_ = csc_matrix(a) + lu = splu(a_) + assert_array_equal(lu.perm_r, lu.perm_c) + + @pytest.mark.parametrize("splu_fun, rtol", [(splu, 1e-7), (spilu, 1e-1)]) + def test_natural_permc(self, splu_fun, rtol): + # Test that the "NATURAL" permc_spec does not permute the matrix + np.random.seed(42) + n = 500 + p = 0.01 + A = scipy.sparse.random(n, n, p) + x = np.random.rand(n) + # Make A diagonal dominant to make sure it is not singular + A += (n+1)*scipy.sparse.identity(n) + A_ = csc_matrix(A) + b = A_ @ x + + # without permc_spec, permutation is not identity + lu = splu_fun(A_) + assert_(np.any(lu.perm_c != np.arange(n))) + + # with permc_spec="NATURAL", permutation is identity + lu = splu_fun(A_, permc_spec="NATURAL") + assert_array_equal(lu.perm_c, np.arange(n)) + + # Also, lu decomposition is valid + x2 = lu.solve(b) + assert_allclose(x, x2, rtol=rtol) + + @pytest.mark.skipif(not hasattr(sys, 'getrefcount'), reason="no sys.getrefcount") + def test_lu_refcount(self): + # Test that we are keeping track of the reference count with splu. + n = 30 + a = random.random((n, n)) + a[a < 0.95] = 0 + # Make a diagonal dominant, to make sure it is not singular + a += 4*eye(n) + a_ = csc_matrix(a) + lu = splu(a_) + + # And now test that we don't have a refcount bug + rc = sys.getrefcount(lu) + for attr in ('perm_r', 'perm_c'): + perm = getattr(lu, attr) + assert_equal(sys.getrefcount(lu), rc + 1) + del perm + assert_equal(sys.getrefcount(lu), rc) + + def test_bad_inputs(self): + A = self.A.tocsc() + + assert_raises(ValueError, splu, A[:,:4]) + assert_raises(ValueError, spilu, A[:,:4]) + + for lu in [splu(A), spilu(A)]: + b = random.rand(42) + B = random.rand(42, 3) + BB = random.rand(self.n, 3, 9) + assert_raises(ValueError, lu.solve, b) + assert_raises(ValueError, lu.solve, B) + assert_raises(ValueError, lu.solve, BB) + assert_raises(TypeError, lu.solve, + b.astype(np.complex64)) + assert_raises(TypeError, lu.solve, + b.astype(np.complex128)) + + @sup_sparse_efficiency + def test_superlu_dlamch_i386_nan(self): + # SuperLU 4.3 calls some functions returning floats without + # declaring them. On i386@linux call convention, this fails to + # clear floating point registers after call. As a result, NaN + # can appear in the next floating point operation made. + # + # Here's a test case that triggered the issue. + n = 8 + d = np.arange(n) + 1 + A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n) + A = A.astype(np.float32) + spilu(A) + A = A + 1j*A + B = A.A + assert_(not np.isnan(B).any()) + + @sup_sparse_efficiency + def test_lu_attr(self): + + def check(dtype, complex_2=False): + A = self.A.astype(dtype) + + if complex_2: + A = A + 1j*A.T + + n = A.shape[0] + lu = splu(A) + + # Check that the decomposition is as advertised + + Pc = np.zeros((n, n)) + Pc[np.arange(n), lu.perm_c] = 1 + + Pr = np.zeros((n, n)) + Pr[lu.perm_r, np.arange(n)] = 1 + + Ad = A.toarray() + lhs = Pr.dot(Ad).dot(Pc) + rhs = (lu.L @ lu.U).toarray() + + eps = np.finfo(dtype).eps + + assert_allclose(lhs, rhs, atol=100*eps) + + check(np.float32) + check(np.float64) + check(np.complex64) + check(np.complex128) + check(np.complex64, True) + check(np.complex128, True) + + @pytest.mark.slow + @sup_sparse_efficiency + def test_threads_parallel(self): + oks = [] + + def worker(): + try: + self.test_splu_basic() + self._internal_test_splu_smoketest() + self._internal_test_spilu_smoketest() + oks.append(True) + except Exception: + pass + + threads = [threading.Thread(target=worker) + for k in range(20)] + for t in threads: + t.start() + for t in threads: + t.join() + + assert_equal(len(oks), 20) + + +class TestSpsolveTriangular: + def setup_method(self): + use_solver(useUmfpack=False) + + def test_zero_diagonal(self): + n = 5 + rng = np.random.default_rng(43876432987) + A = rng.standard_normal((n, n)) + b = np.arange(n) + A = scipy.sparse.tril(A, k=0, format='csr') + + x = spsolve_triangular(A, b, unit_diagonal=True, lower=True) + + A.setdiag(1) + assert_allclose(A.dot(x), b) + + # Regression test from gh-15199 + A = np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0]], dtype=np.float64) + b = np.array([1., 2., 3.]) + with suppress_warnings() as sup: + sup.filter(SparseEfficiencyWarning, "CSR matrix format is") + spsolve_triangular(A, b, unit_diagonal=True) + + def test_singular(self): + n = 5 + A = csr_matrix((n, n)) + b = np.arange(n) + for lower in (True, False): + assert_raises(scipy.linalg.LinAlgError, + spsolve_triangular, A, b, lower=lower) + + @sup_sparse_efficiency + def test_bad_shape(self): + # A is not square. + A = np.zeros((3, 4)) + b = ones((4, 1)) + assert_raises(ValueError, spsolve_triangular, A, b) + # A2 and b2 have incompatible shapes. + A2 = csr_matrix(eye(3)) + b2 = array([1.0, 2.0]) + assert_raises(ValueError, spsolve_triangular, A2, b2) + + @sup_sparse_efficiency + def test_input_types(self): + A = array([[1., 0.], [1., 2.]]) + b = array([[2., 0.], [2., 2.]]) + for matrix_type in (array, csc_matrix, csr_matrix): + x = spsolve_triangular(matrix_type(A), b, lower=True) + assert_array_almost_equal(A.dot(x), b) + + @pytest.mark.slow + @pytest.mark.timeout(120) # prerelease_deps_coverage_64bit_blas job + @sup_sparse_efficiency + def test_random(self): + def random_triangle_matrix(n, lower=True): + A = scipy.sparse.random(n, n, density=0.1, format='coo') + if lower: + A = scipy.sparse.tril(A) + else: + A = scipy.sparse.triu(A) + A = A.tocsr(copy=False) + for i in range(n): + A[i, i] = np.random.rand() + 1 + return A + + np.random.seed(1234) + for lower in (True, False): + for n in (10, 10**2, 10**3): + A = random_triangle_matrix(n, lower=lower) + for m in (1, 10): + for b in (np.random.rand(n, m), + np.random.randint(-9, 9, (n, m)), + np.random.randint(-9, 9, (n, m)) + + np.random.randint(-9, 9, (n, m)) * 1j): + x = spsolve_triangular(A, b, lower=lower) + assert_array_almost_equal(A.dot(x), b) + x = spsolve_triangular(A, b, lower=lower, + unit_diagonal=True) + A.setdiag(1) + assert_array_almost_equal(A.dot(x), b) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_special_sparse_arrays.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_special_sparse_arrays.py new file mode 100644 index 0000000000000000000000000000000000000000..e80ae3c288114e124d34efd7c7f62c43e9d02bea --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_special_sparse_arrays.py @@ -0,0 +1,948 @@ +import numpy as np +from scipy.sparse.linalg import LinearOperator +from scipy.sparse import kron, eye, dia_array + +__all__ = ['LaplacianNd'] +# Sakurai and Mikota classes are intended for tests and benchmarks +# and explicitly not included in the public API of this module. + + +class LaplacianNd(LinearOperator): + """ + The grid Laplacian in ``N`` dimensions and its eigenvalues/eigenvectors. + + Construct Laplacian on a uniform rectangular grid in `N` dimensions + and output its eigenvalues and eigenvectors. + The Laplacian ``L`` is square, negative definite, real symmetric array + with signed integer entries and zeros otherwise. + + Parameters + ---------- + grid_shape : tuple + A tuple of integers of length ``N`` (corresponding to the dimension of + the Lapacian), where each entry gives the size of that dimension. The + Laplacian matrix is square of the size ``np.prod(grid_shape)``. + boundary_conditions : {'neumann', 'dirichlet', 'periodic'}, optional + The type of the boundary conditions on the boundaries of the grid. + Valid values are ``'dirichlet'`` or ``'neumann'``(default) or + ``'periodic'``. + dtype : dtype + Numerical type of the array. Default is ``np.int8``. + + Methods + ------- + toarray() + Construct a dense array from Laplacian data + tosparse() + Construct a sparse array from Laplacian data + eigenvalues(m=None) + Construct a 1D array of `m` largest (smallest in absolute value) + eigenvalues of the Laplacian matrix in ascending order. + eigenvectors(m=None): + Construct the array with columns made of `m` eigenvectors (``float``) + of the ``Nd`` Laplacian corresponding to the `m` ordered eigenvalues. + + .. versionadded:: 1.12.0 + + Notes + ----- + Compared to the MATLAB/Octave implementation [1] of 1-, 2-, and 3-D + Laplacian, this code allows the arbitrary N-D case and the matrix-free + callable option, but is currently limited to pure Dirichlet, Neumann or + Periodic boundary conditions only. + + The Laplacian matrix of a graph (`scipy.sparse.csgraph.laplacian`) of a + rectangular grid corresponds to the negative Laplacian with the Neumann + conditions, i.e., ``boundary_conditions = 'neumann'``. + + All eigenvalues and eigenvectors of the discrete Laplacian operator for + an ``N``-dimensional regular grid of shape `grid_shape` with the grid + step size ``h=1`` are analytically known [2]. + + References + ---------- + .. [1] https://github.com/lobpcg/blopex/blob/master/blopex_\ +tools/matlab/laplacian/laplacian.m + .. [2] "Eigenvalues and eigenvectors of the second derivative", Wikipedia + https://en.wikipedia.org/wiki/Eigenvalues_and_eigenvectors_\ +of_the_second_derivative + + Examples + -------- + >>> import numpy as np + >>> from scipy.sparse.linalg import LaplacianNd + >>> from scipy.sparse import diags, csgraph + >>> from scipy.linalg import eigvalsh + + The one-dimensional Laplacian demonstrated below for pure Neumann boundary + conditions on a regular grid with ``n=6`` grid points is exactly the + negative graph Laplacian for the undirected linear graph with ``n`` + vertices using the sparse adjacency matrix ``G`` represented by the + famous tri-diagonal matrix: + + >>> n = 6 + >>> G = diags(np.ones(n - 1), 1, format='csr') + >>> Lf = csgraph.laplacian(G, symmetrized=True, form='function') + >>> grid_shape = (n, ) + >>> lap = LaplacianNd(grid_shape, boundary_conditions='neumann') + >>> np.array_equal(lap.matmat(np.eye(n)), -Lf(np.eye(n))) + True + + Since all matrix entries of the Laplacian are integers, ``'int8'`` is + the default dtype for storing matrix representations. + + >>> lap.tosparse() + <6x6 sparse array of type '' + with 16 stored elements (3 diagonals) in DIAgonal format> + >>> lap.toarray() + array([[-1, 1, 0, 0, 0, 0], + [ 1, -2, 1, 0, 0, 0], + [ 0, 1, -2, 1, 0, 0], + [ 0, 0, 1, -2, 1, 0], + [ 0, 0, 0, 1, -2, 1], + [ 0, 0, 0, 0, 1, -1]], dtype=int8) + >>> np.array_equal(lap.matmat(np.eye(n)), lap.toarray()) + True + >>> np.array_equal(lap.tosparse().toarray(), lap.toarray()) + True + + Any number of extreme eigenvalues and/or eigenvectors can be computed. + + >>> lap = LaplacianNd(grid_shape, boundary_conditions='periodic') + >>> lap.eigenvalues() + array([-4., -3., -3., -1., -1., 0.]) + >>> lap.eigenvalues()[-2:] + array([-1., 0.]) + >>> lap.eigenvalues(2) + array([-1., 0.]) + >>> lap.eigenvectors(1) + array([[0.40824829], + [0.40824829], + [0.40824829], + [0.40824829], + [0.40824829], + [0.40824829]]) + >>> lap.eigenvectors(2) + array([[ 0.5 , 0.40824829], + [ 0. , 0.40824829], + [-0.5 , 0.40824829], + [-0.5 , 0.40824829], + [ 0. , 0.40824829], + [ 0.5 , 0.40824829]]) + >>> lap.eigenvectors() + array([[ 0.40824829, 0.28867513, 0.28867513, 0.5 , 0.5 , + 0.40824829], + [-0.40824829, -0.57735027, -0.57735027, 0. , 0. , + 0.40824829], + [ 0.40824829, 0.28867513, 0.28867513, -0.5 , -0.5 , + 0.40824829], + [-0.40824829, 0.28867513, 0.28867513, -0.5 , -0.5 , + 0.40824829], + [ 0.40824829, -0.57735027, -0.57735027, 0. , 0. , + 0.40824829], + [-0.40824829, 0.28867513, 0.28867513, 0.5 , 0.5 , + 0.40824829]]) + + The two-dimensional Laplacian is illustrated on a regular grid with + ``grid_shape = (2, 3)`` points in each dimension. + + >>> grid_shape = (2, 3) + >>> n = np.prod(grid_shape) + + Numeration of grid points is as follows: + + >>> np.arange(n).reshape(grid_shape + (-1,)) + array([[[0], + [1], + [2]], + + [[3], + [4], + [5]]]) + + Each of the boundary conditions ``'dirichlet'``, ``'periodic'``, and + ``'neumann'`` is illustrated separately; with ``'dirichlet'`` + + >>> lap = LaplacianNd(grid_shape, boundary_conditions='dirichlet') + >>> lap.tosparse() + <6x6 sparse array of type '' + with 20 stored elements in Compressed Sparse Row format> + >>> lap.toarray() + array([[-4, 1, 0, 1, 0, 0], + [ 1, -4, 1, 0, 1, 0], + [ 0, 1, -4, 0, 0, 1], + [ 1, 0, 0, -4, 1, 0], + [ 0, 1, 0, 1, -4, 1], + [ 0, 0, 1, 0, 1, -4]], dtype=int8) + >>> np.array_equal(lap.matmat(np.eye(n)), lap.toarray()) + True + >>> np.array_equal(lap.tosparse().toarray(), lap.toarray()) + True + >>> lap.eigenvalues() + array([-6.41421356, -5. , -4.41421356, -3.58578644, -3. , + -1.58578644]) + >>> eigvals = eigvalsh(lap.toarray().astype(np.float64)) + >>> np.allclose(lap.eigenvalues(), eigvals) + True + >>> np.allclose(lap.toarray() @ lap.eigenvectors(), + ... lap.eigenvectors() @ np.diag(lap.eigenvalues())) + True + + with ``'periodic'`` + + >>> lap = LaplacianNd(grid_shape, boundary_conditions='periodic') + >>> lap.tosparse() + <6x6 sparse array of type '' + with 24 stored elements in Compressed Sparse Row format> + >>> lap.toarray() + array([[-4, 1, 1, 2, 0, 0], + [ 1, -4, 1, 0, 2, 0], + [ 1, 1, -4, 0, 0, 2], + [ 2, 0, 0, -4, 1, 1], + [ 0, 2, 0, 1, -4, 1], + [ 0, 0, 2, 1, 1, -4]], dtype=int8) + >>> np.array_equal(lap.matmat(np.eye(n)), lap.toarray()) + True + >>> np.array_equal(lap.tosparse().toarray(), lap.toarray()) + True + >>> lap.eigenvalues() + array([-7., -7., -4., -3., -3., 0.]) + >>> eigvals = eigvalsh(lap.toarray().astype(np.float64)) + >>> np.allclose(lap.eigenvalues(), eigvals) + True + >>> np.allclose(lap.toarray() @ lap.eigenvectors(), + ... lap.eigenvectors() @ np.diag(lap.eigenvalues())) + True + + and with ``'neumann'`` + + >>> lap = LaplacianNd(grid_shape, boundary_conditions='neumann') + >>> lap.tosparse() + <6x6 sparse array of type '' + with 20 stored elements in Compressed Sparse Row format> + >>> lap.toarray() + array([[-2, 1, 0, 1, 0, 0], + [ 1, -3, 1, 0, 1, 0], + [ 0, 1, -2, 0, 0, 1], + [ 1, 0, 0, -2, 1, 0], + [ 0, 1, 0, 1, -3, 1], + [ 0, 0, 1, 0, 1, -2]]) + >>> np.array_equal(lap.matmat(np.eye(n)), lap.toarray()) + True + >>> np.array_equal(lap.tosparse().toarray(), lap.toarray()) + True + >>> lap.eigenvalues() + array([-5., -3., -3., -2., -1., 0.]) + >>> eigvals = eigvalsh(lap.toarray().astype(np.float64)) + >>> np.allclose(lap.eigenvalues(), eigvals) + True + >>> np.allclose(lap.toarray() @ lap.eigenvectors(), + ... lap.eigenvectors() @ np.diag(lap.eigenvalues())) + True + + """ + + def __init__(self, grid_shape, *, + boundary_conditions='neumann', + dtype=np.int8): + + if boundary_conditions not in ('dirichlet', 'neumann', 'periodic'): + raise ValueError( + f"Unknown value {boundary_conditions!r} is given for " + "'boundary_conditions' parameter. The valid options are " + "'dirichlet', 'periodic', and 'neumann' (default)." + ) + + self.grid_shape = grid_shape + self.boundary_conditions = boundary_conditions + # LaplacianNd folds all dimensions in `grid_shape` into a single one + N = np.prod(grid_shape) + super().__init__(dtype=dtype, shape=(N, N)) + + def _eigenvalue_ordering(self, m): + """Compute `m` largest eigenvalues in each of the ``N`` directions, + i.e., up to ``m * N`` total, order them and return `m` largest. + """ + grid_shape = self.grid_shape + if m is None: + indices = np.indices(grid_shape) + Leig = np.zeros(grid_shape) + else: + grid_shape_min = min(grid_shape, + tuple(np.ones_like(grid_shape) * m)) + indices = np.indices(grid_shape_min) + Leig = np.zeros(grid_shape_min) + + for j, n in zip(indices, grid_shape): + if self.boundary_conditions == 'dirichlet': + Leig += -4 * np.sin(np.pi * (j + 1) / (2 * (n + 1))) ** 2 + elif self.boundary_conditions == 'neumann': + Leig += -4 * np.sin(np.pi * j / (2 * n)) ** 2 + else: # boundary_conditions == 'periodic' + Leig += -4 * np.sin(np.pi * np.floor((j + 1) / 2) / n) ** 2 + + Leig_ravel = Leig.ravel() + ind = np.argsort(Leig_ravel) + eigenvalues = Leig_ravel[ind] + if m is not None: + eigenvalues = eigenvalues[-m:] + ind = ind[-m:] + + return eigenvalues, ind + + def eigenvalues(self, m=None): + """Return the requested number of eigenvalues. + + Parameters + ---------- + m : int, optional + The positive number of smallest eigenvalues to return. + If not provided, then all eigenvalues will be returned. + + Returns + ------- + eigenvalues : float array + The requested `m` smallest or all eigenvalues, in ascending order. + """ + eigenvalues, _ = self._eigenvalue_ordering(m) + return eigenvalues + + def _ev1d(self, j, n): + """Return 1 eigenvector in 1d with index `j` + and number of grid points `n` where ``j < n``. + """ + if self.boundary_conditions == 'dirichlet': + i = np.pi * (np.arange(n) + 1) / (n + 1) + ev = np.sqrt(2. / (n + 1.)) * np.sin(i * (j + 1)) + elif self.boundary_conditions == 'neumann': + i = np.pi * (np.arange(n) + 0.5) / n + ev = np.sqrt((1. if j == 0 else 2.) / n) * np.cos(i * j) + else: # boundary_conditions == 'periodic' + if j == 0: + ev = np.sqrt(1. / n) * np.ones(n) + elif j + 1 == n and n % 2 == 0: + ev = np.sqrt(1. / n) * np.tile([1, -1], n//2) + else: + i = 2. * np.pi * (np.arange(n) + 0.5) / n + ev = np.sqrt(2. / n) * np.cos(i * np.floor((j + 1) / 2)) + # make small values exact zeros correcting round-off errors + # due to symmetry of eigenvectors the exact 0. is correct + ev[np.abs(ev) < np.finfo(np.float64).eps] = 0. + return ev + + def _one_eve(self, k): + """Return 1 eigenvector in Nd with multi-index `j` + as a tensor product of the corresponding 1d eigenvectors. + """ + phi = [self._ev1d(j, n) for j, n in zip(k, self.grid_shape)] + result = phi[0] + for phi in phi[1:]: + result = np.tensordot(result, phi, axes=0) + return np.asarray(result).ravel() + + def eigenvectors(self, m=None): + """Return the requested number of eigenvectors for ordered eigenvalues. + + Parameters + ---------- + m : int, optional + The positive number of eigenvectors to return. If not provided, + then all eigenvectors will be returned. + + Returns + ------- + eigenvectors : float array + An array with columns made of the requested `m` or all eigenvectors. + The columns are ordered according to the `m` ordered eigenvalues. + """ + _, ind = self._eigenvalue_ordering(m) + if m is None: + grid_shape_min = self.grid_shape + else: + grid_shape_min = min(self.grid_shape, + tuple(np.ones_like(self.grid_shape) * m)) + + N_indices = np.unravel_index(ind, grid_shape_min) + N_indices = [tuple(x) for x in zip(*N_indices)] + eigenvectors_list = [self._one_eve(k) for k in N_indices] + return np.column_stack(eigenvectors_list) + + def toarray(self): + """ + Converts the Laplacian data to a dense array. + + Returns + ------- + L : ndarray + The shape is ``(N, N)`` where ``N = np.prod(grid_shape)``. + + """ + grid_shape = self.grid_shape + n = np.prod(grid_shape) + L = np.zeros([n, n], dtype=np.int8) + # Scratch arrays + L_i = np.empty_like(L) + Ltemp = np.empty_like(L) + + for ind, dim in enumerate(grid_shape): + # Start zeroing out L_i + L_i[:] = 0 + # Allocate the top left corner with the kernel of L_i + # Einsum returns writable view of arrays + np.einsum("ii->i", L_i[:dim, :dim])[:] = -2 + np.einsum("ii->i", L_i[: dim - 1, 1:dim])[:] = 1 + np.einsum("ii->i", L_i[1:dim, : dim - 1])[:] = 1 + + if self.boundary_conditions == 'neumann': + L_i[0, 0] = -1 + L_i[dim - 1, dim - 1] = -1 + elif self.boundary_conditions == 'periodic': + if dim > 1: + L_i[0, dim - 1] += 1 + L_i[dim - 1, 0] += 1 + else: + L_i[0, 0] += 1 + + # kron is too slow for large matrices hence the next two tricks + # 1- kron(eye, mat) is block_diag(mat, mat, ...) + # 2- kron(mat, eye) can be performed by 4d stride trick + + # 1- + new_dim = dim + # for block_diag we tile the top left portion on the diagonal + if ind > 0: + tiles = np.prod(grid_shape[:ind]) + for j in range(1, tiles): + L_i[j*dim:(j+1)*dim, j*dim:(j+1)*dim] = L_i[:dim, :dim] + new_dim += dim + # 2- + # we need the keep L_i, but reset the array + Ltemp[:new_dim, :new_dim] = L_i[:new_dim, :new_dim] + tiles = int(np.prod(grid_shape[ind+1:])) + # Zero out the top left, the rest is already 0 + L_i[:new_dim, :new_dim] = 0 + idx = [x for x in range(tiles)] + L_i.reshape( + (new_dim, tiles, + new_dim, tiles) + )[:, idx, :, idx] = Ltemp[:new_dim, :new_dim] + + L += L_i + + return L.astype(self.dtype) + + def tosparse(self): + """ + Constructs a sparse array from the Laplacian data. The returned sparse + array format is dependent on the selected boundary conditions. + + Returns + ------- + L : scipy.sparse.sparray + The shape is ``(N, N)`` where ``N = np.prod(grid_shape)``. + + """ + N = len(self.grid_shape) + p = np.prod(self.grid_shape) + L = dia_array((p, p), dtype=np.int8) + + for i in range(N): + dim = self.grid_shape[i] + data = np.ones([3, dim], dtype=np.int8) + data[1, :] *= -2 + + if self.boundary_conditions == 'neumann': + data[1, 0] = -1 + data[1, -1] = -1 + + L_i = dia_array((data, [-1, 0, 1]), shape=(dim, dim), + dtype=np.int8 + ) + + if self.boundary_conditions == 'periodic': + t = dia_array((dim, dim), dtype=np.int8) + t.setdiag([1], k=-dim+1) + t.setdiag([1], k=dim-1) + L_i += t + + for j in range(i): + L_i = kron(eye(self.grid_shape[j], dtype=np.int8), L_i) + for j in range(i + 1, N): + L_i = kron(L_i, eye(self.grid_shape[j], dtype=np.int8)) + L += L_i + return L.astype(self.dtype) + + def _matvec(self, x): + grid_shape = self.grid_shape + N = len(grid_shape) + X = x.reshape(grid_shape + (-1,)) + Y = -2 * N * X + for i in range(N): + Y += np.roll(X, 1, axis=i) + Y += np.roll(X, -1, axis=i) + if self.boundary_conditions in ('neumann', 'dirichlet'): + Y[(slice(None),)*i + (0,) + (slice(None),)*(N-i-1) + ] -= np.roll(X, 1, axis=i)[ + (slice(None),) * i + (0,) + (slice(None),) * (N-i-1) + ] + Y[ + (slice(None),) * i + (-1,) + (slice(None),) * (N-i-1) + ] -= np.roll(X, -1, axis=i)[ + (slice(None),) * i + (-1,) + (slice(None),) * (N-i-1) + ] + + if self.boundary_conditions == 'neumann': + Y[ + (slice(None),) * i + (0,) + (slice(None),) * (N-i-1) + ] += np.roll(X, 0, axis=i)[ + (slice(None),) * i + (0,) + (slice(None),) * (N-i-1) + ] + Y[ + (slice(None),) * i + (-1,) + (slice(None),) * (N-i-1) + ] += np.roll(X, 0, axis=i)[ + (slice(None),) * i + (-1,) + (slice(None),) * (N-i-1) + ] + + return Y.reshape(-1, X.shape[-1]) + + def _matmat(self, x): + return self._matvec(x) + + def _adjoint(self): + return self + + def _transpose(self): + return self + + +class Sakurai(LinearOperator): + """ + Construct a Sakurai matrix in various formats and its eigenvalues. + + Constructs the "Sakurai" matrix motivated by reference [1]_: + square real symmetric positive definite and 5-diagonal + with the main digonal ``[5, 6, 6, ..., 6, 6, 5], the ``+1`` and ``-1`` + diagonals filled with ``-4``, and the ``+2`` and ``-2`` diagonals + made of ``1``. Its eigenvalues are analytically known to be + ``16. * np.power(np.cos(0.5 * k * np.pi / (n + 1)), 4)``. + The matrix gets ill-conditioned with its size growing. + It is useful for testing and benchmarking sparse eigenvalue solvers + especially those taking advantage of its banded 5-diagonal structure. + See the notes below for details. + + Parameters + ---------- + n : int + The size of the matrix. + dtype : dtype + Numerical type of the array. Default is ``np.int8``. + + Methods + ------- + toarray() + Construct a dense array from Laplacian data + tosparse() + Construct a sparse array from Laplacian data + tobanded() + The Sakurai matrix in the format for banded symmetric matrices, + i.e., (3, n) ndarray with 3 upper diagonals + placing the main diagonal at the bottom. + eigenvalues + All eigenvalues of the Sakurai matrix ordered ascending. + + Notes + ----- + Reference [1]_ introduces a generalized eigenproblem for the matrix pair + `A` and `B` where `A` is the identity so we turn it into an eigenproblem + just for the matrix `B` that this function outputs in various formats + together with its eigenvalues. + + .. versionadded:: 1.12.0 + + References + ---------- + .. [1] T. Sakurai, H. Tadano, Y. Inadomi, and U. Nagashima, + "A moment-based method for large-scale generalized + eigenvalue problems", + Appl. Num. Anal. Comp. Math. Vol. 1 No. 2 (2004). + + Examples + -------- + >>> import numpy as np + >>> from scipy.sparse.linalg._special_sparse_arrays import Sakurai + >>> from scipy.linalg import eig_banded + >>> n = 6 + >>> sak = Sakurai(n) + + Since all matrix entries are small integers, ``'int8'`` is + the default dtype for storing matrix representations. + + >>> sak.toarray() + array([[ 5, -4, 1, 0, 0, 0], + [-4, 6, -4, 1, 0, 0], + [ 1, -4, 6, -4, 1, 0], + [ 0, 1, -4, 6, -4, 1], + [ 0, 0, 1, -4, 6, -4], + [ 0, 0, 0, 1, -4, 5]], dtype=int8) + >>> sak.tobanded() + array([[ 1, 1, 1, 1, 1, 1], + [-4, -4, -4, -4, -4, -4], + [ 5, 6, 6, 6, 6, 5]], dtype=int8) + >>> sak.tosparse() + <6x6 sparse matrix of type '' + with 24 stored elements (5 diagonals) in DIAgonal format> + >>> np.array_equal(sak.dot(np.eye(n)), sak.tosparse().toarray()) + True + >>> sak.eigenvalues() + array([0.03922866, 0.56703972, 2.41789479, 5.97822974, + 10.54287655, 14.45473055]) + >>> sak.eigenvalues(2) + array([0.03922866, 0.56703972]) + + The banded form can be used in scipy functions for banded matrices, e.g., + + >>> e = eig_banded(sak.tobanded(), eigvals_only=True) + >>> np.allclose(sak.eigenvalues, e, atol= n * n * n * np.finfo(float).eps) + True + + """ + def __init__(self, n, dtype=np.int8): + self.n = n + self.dtype = dtype + shape = (n, n) + super().__init__(dtype, shape) + + def eigenvalues(self, m=None): + """Return the requested number of eigenvalues. + + Parameters + ---------- + m : int, optional + The positive number of smallest eigenvalues to return. + If not provided, then all eigenvalues will be returned. + + Returns + ------- + eigenvalues : `np.float64` array + The requested `m` smallest or all eigenvalues, in ascending order. + """ + if m is None: + m = self.n + k = np.arange(self.n + 1 -m, self.n + 1) + return np.flip(16. * np.power(np.cos(0.5 * k * np.pi / (self.n + 1)), 4)) + + def tobanded(self): + """ + Construct the Sakurai matrix as a banded array. + """ + d0 = np.r_[5, 6 * np.ones(self.n - 2, dtype=self.dtype), 5] + d1 = -4 * np.ones(self.n, dtype=self.dtype) + d2 = np.ones(self.n, dtype=self.dtype) + return np.array([d2, d1, d0]).astype(self.dtype) + + def tosparse(self): + """ + Construct the Sakurai matrix is a sparse format. + """ + from scipy.sparse import spdiags + d = self.tobanded() + # the banded format has the main diagonal at the bottom + # `spdiags` has no `dtype` parameter so inherits dtype from banded + return spdiags([d[0], d[1], d[2], d[1], d[0]], [-2, -1, 0, 1, 2], + self.n, self.n) + + def toarray(self): + return self.tosparse().toarray() + + def _matvec(self, x): + """ + Construct matrix-free callable banded-matrix-vector multiplication by + the Sakurai matrix without constructing or storing the matrix itself + using the knowledge of its entries and the 5-diagonal format. + """ + x = x.reshape(self.n, -1) + result_dtype = np.promote_types(x.dtype, self.dtype) + sx = np.zeros_like(x, dtype=result_dtype) + sx[0, :] = 5 * x[0, :] - 4 * x[1, :] + x[2, :] + sx[-1, :] = 5 * x[-1, :] - 4 * x[-2, :] + x[-3, :] + sx[1: -1, :] = (6 * x[1: -1, :] - 4 * (x[:-2, :] + x[2:, :]) + + np.pad(x[:-3, :], ((1, 0), (0, 0))) + + np.pad(x[3:, :], ((0, 1), (0, 0)))) + return sx + + def _matmat(self, x): + """ + Construct matrix-free callable matrix-matrix multiplication by + the Sakurai matrix without constructing or storing the matrix itself + by reusing the ``_matvec(x)`` that supports both 1D and 2D arrays ``x``. + """ + return self._matvec(x) + + def _adjoint(self): + return self + + def _transpose(self): + return self + + +class MikotaM(LinearOperator): + """ + Construct a mass matrix in various formats of Mikota pair. + + The mass matrix `M` is square real diagonal + positive definite with entries that are reciprocal to integers. + + Parameters + ---------- + shape : tuple of int + The shape of the matrix. + dtype : dtype + Numerical type of the array. Default is ``np.float64``. + + Methods + ------- + toarray() + Construct a dense array from Mikota data + tosparse() + Construct a sparse array from Mikota data + tobanded() + The format for banded symmetric matrices, + i.e., (1, n) ndarray with the main diagonal. + """ + def __init__(self, shape, dtype=np.float64): + self.shape = shape + self.dtype = dtype + super().__init__(dtype, shape) + + def _diag(self): + # The matrix is constructed from its diagonal 1 / [1, ..., N+1]; + # compute in a function to avoid duplicated code & storage footprint + return (1. / np.arange(1, self.shape[0] + 1)).astype(self.dtype) + + def tobanded(self): + return self._diag() + + def tosparse(self): + from scipy.sparse import diags + return diags([self._diag()], [0], shape=self.shape, dtype=self.dtype) + + def toarray(self): + return np.diag(self._diag()).astype(self.dtype) + + def _matvec(self, x): + """ + Construct matrix-free callable banded-matrix-vector multiplication by + the Mikota mass matrix without constructing or storing the matrix itself + using the knowledge of its entries and the diagonal format. + """ + x = x.reshape(self.shape[0], -1) + return self._diag()[:, np.newaxis] * x + + def _matmat(self, x): + """ + Construct matrix-free callable matrix-matrix multiplication by + the Mikota mass matrix without constructing or storing the matrix itself + by reusing the ``_matvec(x)`` that supports both 1D and 2D arrays ``x``. + """ + return self._matvec(x) + + def _adjoint(self): + return self + + def _transpose(self): + return self + + +class MikotaK(LinearOperator): + """ + Construct a stiffness matrix in various formats of Mikota pair. + + The stiffness matrix `K` is square real tri-diagonal symmetric + positive definite with integer entries. + + Parameters + ---------- + shape : tuple of int + The shape of the matrix. + dtype : dtype + Numerical type of the array. Default is ``np.int32``. + + Methods + ------- + toarray() + Construct a dense array from Mikota data + tosparse() + Construct a sparse array from Mikota data + tobanded() + The format for banded symmetric matrices, + i.e., (2, n) ndarray with 2 upper diagonals + placing the main diagonal at the bottom. + """ + def __init__(self, shape, dtype=np.int32): + self.shape = shape + self.dtype = dtype + super().__init__(dtype, shape) + # The matrix is constructed from its diagonals; + # we precompute these to avoid duplicating the computation + n = shape[0] + self._diag0 = np.arange(2 * n - 1, 0, -2, dtype=self.dtype) + self._diag1 = - np.arange(n - 1, 0, -1, dtype=self.dtype) + + def tobanded(self): + return np.array([np.pad(self._diag1, (1, 0), 'constant'), self._diag0]) + + def tosparse(self): + from scipy.sparse import diags + return diags([self._diag1, self._diag0, self._diag1], [-1, 0, 1], + shape=self.shape, dtype=self.dtype) + + def toarray(self): + return self.tosparse().toarray() + + def _matvec(self, x): + """ + Construct matrix-free callable banded-matrix-vector multiplication by + the Mikota stiffness matrix without constructing or storing the matrix + itself using the knowledge of its entries and the 3-diagonal format. + """ + x = x.reshape(self.shape[0], -1) + result_dtype = np.promote_types(x.dtype, self.dtype) + kx = np.zeros_like(x, dtype=result_dtype) + d1 = self._diag1 + d0 = self._diag0 + kx[0, :] = d0[0] * x[0, :] + d1[0] * x[1, :] + kx[-1, :] = d1[-1] * x[-2, :] + d0[-1] * x[-1, :] + kx[1: -1, :] = (d1[:-1, None] * x[: -2, :] + + d0[1: -1, None] * x[1: -1, :] + + d1[1:, None] * x[2:, :]) + return kx + + def _matmat(self, x): + """ + Construct matrix-free callable matrix-matrix multiplication by + the Stiffness mass matrix without constructing or storing the matrix itself + by reusing the ``_matvec(x)`` that supports both 1D and 2D arrays ``x``. + """ + return self._matvec(x) + + def _adjoint(self): + return self + + def _transpose(self): + return self + + +class MikotaPair: + """ + Construct the Mikota pair of matrices in various formats and + eigenvalues of the generalized eigenproblem with them. + + The Mikota pair of matrices [1, 2]_ models a vibration problem + of a linear mass-spring system with the ends attached where + the stiffness of the springs and the masses increase along + the system length such that vibration frequencies are subsequent + integers 1, 2, ..., `n` where `n` is the number of the masses. Thus, + eigenvalues of the generalized eigenvalue problem for + the matrix pair `K` and `M` where `K` is he system stiffness matrix + and `M` is the system mass matrix are the squares of the integers, + i.e., 1, 4, 9, ..., ``n * n``. + + The stiffness matrix `K` is square real tri-diagonal symmetric + positive definite. The mass matrix `M` is diagonal with diagonal + entries 1, 1/2, 1/3, ...., ``1/n``. Both matrices get + ill-conditioned with `n` growing. + + Parameters + ---------- + n : int + The size of the matrices of the Mikota pair. + dtype : dtype + Numerical type of the array. Default is ``np.float64``. + + Attributes + ---------- + eigenvalues : 1D ndarray, ``np.uint64`` + All eigenvalues of the Mikota pair ordered ascending. + + Methods + ------- + MikotaK() + A `LinearOperator` custom object for the stiffness matrix. + MikotaM() + A `LinearOperator` custom object for the mass matrix. + + .. versionadded:: 1.12.0 + + References + ---------- + .. [1] J. Mikota, "Frequency tuning of chain structure multibody oscillators + to place the natural frequencies at omega1 and N-1 integer multiples + omega2,..., omegaN", Z. Angew. Math. Mech. 81 (2001), S2, S201-S202. + Appl. Num. Anal. Comp. Math. Vol. 1 No. 2 (2004). + .. [2] Peter C. Muller and Metin Gurgoze, + "Natural frequencies of a multi-degree-of-freedom vibration system", + Proc. Appl. Math. Mech. 6, 319-320 (2006). + http://dx.doi.org/10.1002/pamm.200610141. + + Examples + -------- + >>> import numpy as np + >>> from scipy.sparse.linalg._special_sparse_arrays import MikotaPair + >>> n = 6 + >>> mik = MikotaPair(n) + >>> mik_k = mik.k + >>> mik_m = mik.m + >>> mik_k.toarray() + array([[11., -5., 0., 0., 0., 0.], + [-5., 9., -4., 0., 0., 0.], + [ 0., -4., 7., -3., 0., 0.], + [ 0., 0., -3., 5., -2., 0.], + [ 0., 0., 0., -2., 3., -1.], + [ 0., 0., 0., 0., -1., 1.]]) + >>> mik_k.tobanded() + array([[ 0., -5., -4., -3., -2., -1.], + [11., 9., 7., 5., 3., 1.]]) + >>> mik_m.tobanded() + array([1. , 0.5 , 0.33333333, 0.25 , 0.2 , + 0.16666667]) + >>> mik_k.tosparse() + <6x6 sparse matrix of type '' + with 16 stored elements (3 diagonals) in DIAgonal format> + >>> mik_m.tosparse() + <6x6 sparse matrix of type '' + with 6 stored elements (1 diagonals) in DIAgonal format> + >>> np.array_equal(mik_k(np.eye(n)), mik_k.toarray()) + True + >>> np.array_equal(mik_m(np.eye(n)), mik_m.toarray()) + True + >>> mik.eigenvalues() + array([ 1, 4, 9, 16, 25, 36]) + >>> mik.eigenvalues(2) + array([ 1, 4]) + + """ + def __init__(self, n, dtype=np.float64): + self.n = n + self.dtype = dtype + self.shape = (n, n) + self.m = MikotaM(self.shape, self.dtype) + self.k = MikotaK(self.shape, self.dtype) + + def eigenvalues(self, m=None): + """Return the requested number of eigenvalues. + + Parameters + ---------- + m : int, optional + The positive number of smallest eigenvalues to return. + If not provided, then all eigenvalues will be returned. + + Returns + ------- + eigenvalues : `np.uint64` array + The requested `m` smallest or all eigenvalues, in ascending order. + """ + if m is None: + m = self.n + arange_plus1 = np.arange(1, m + 1, dtype=np.uint64) + return arange_plus1 * arange_plus1 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/dsolve.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/dsolve.py new file mode 100644 index 0000000000000000000000000000000000000000..e4d0a33200cdf82eb36f9ff8c66678897e70a3a6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/dsolve.py @@ -0,0 +1,24 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.sparse.linalg` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'MatrixRankWarning', 'SuperLU', 'factorized', + 'spilu', 'splu', 'spsolve', + 'spsolve_triangular', 'use_solver', 'linsolve', 'test' +] + +dsolve_modules = ['linsolve'] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="sparse.linalg", module="dsolve", + private_modules=["_dsolve"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/eigen.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/eigen.py new file mode 100644 index 0000000000000000000000000000000000000000..0022daecea9917fd53b17724aae98226d6bb5d57 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/eigen.py @@ -0,0 +1,23 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.sparse.linalg` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'ArpackError', 'ArpackNoConvergence', 'ArpackError', + 'eigs', 'eigsh', 'lobpcg', 'svds', 'arpack', 'test' +] + +eigen_modules = ['arpack'] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="sparse.linalg", module="eigen", + private_modules=["_eigen"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61c3ddc3f75eb0895383a9b54c1dbfba53a94fae Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_array_api.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_array_api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9963761fc530c261caa26f3301abcb1cc54b0e84 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_array_api.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_base.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ad6af62e42ed705ef28bc0b7d1d64b6a3259482 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_base.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_common1d.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_common1d.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c84602f73472f4a5a3be5490633e393a1a530a1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_common1d.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_construct.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_construct.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a82a715dd40bb73dc44b8e57f4a85db88ce2f7da Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_construct.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_coo.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_coo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e4611d8b699d7ad483ce8f249250630c71bd05f9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_coo.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_csc.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_csc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c4b07d1e8d1ef07bd20d2b1a8f19442c83ba537 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_csc.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_csr.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_csr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e335f3b1b1e65ec88dd3ed4bdca578fae4f77589 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_csr.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_deprecations.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_deprecations.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00609b49652624760051f8581f156d86b8e807f8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_deprecations.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_dok.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_dok.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39ace467e16ea972cb6d8fd225dfb2c18b113ad7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_dok.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_extract.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_extract.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50a228042c2ce88cc42e9c96939059bd2ed37482 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_extract.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_matrix_io.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_matrix_io.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40b98076ed5c30ce22fc3dc1a593a089df8aaaf7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_matrix_io.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_minmax1d.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_minmax1d.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc82f8278e24a08810171d86219b7f74e1278266 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_minmax1d.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_sparsetools.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_sparsetools.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d43cc5b0f9ebaa52aeb9dcd935c354fea89dc66 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_sparsetools.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_spfuncs.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_spfuncs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a1394634ad078d4aefd3fdcfd4023edd96b41cc Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_spfuncs.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_sputils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_sputils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..42b4cdd6fab985eb45bc664c4e03a16a92f4c7ea Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_sputils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_array_api.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_array_api.py new file mode 100644 index 0000000000000000000000000000000000000000..4385a4e3ab3e50f9076bcbbc539c448131e11c71 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_array_api.py @@ -0,0 +1,561 @@ +import pytest +import numpy as np +import numpy.testing as npt +import scipy.sparse +import scipy.sparse.linalg as spla +from scipy._lib._util import VisibleDeprecationWarning + + +sparray_types = ('bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil') + +sparray_classes = [ + getattr(scipy.sparse, f'{T}_array') for T in sparray_types +] + +A = np.array([ + [0, 1, 2, 0], + [2, 0, 0, 3], + [1, 4, 0, 0] +]) + +B = np.array([ + [0, 1], + [2, 0] +]) + +X = np.array([ + [1, 0, 0, 1], + [2, 1, 2, 0], + [0, 2, 1, 0], + [0, 0, 1, 2] +], dtype=float) + + +sparrays = [sparray(A) for sparray in sparray_classes] +square_sparrays = [sparray(B) for sparray in sparray_classes] +eig_sparrays = [sparray(X) for sparray in sparray_classes] + +parametrize_sparrays = pytest.mark.parametrize( + "A", sparrays, ids=sparray_types +) +parametrize_square_sparrays = pytest.mark.parametrize( + "B", square_sparrays, ids=sparray_types +) +parametrize_eig_sparrays = pytest.mark.parametrize( + "X", eig_sparrays, ids=sparray_types +) + + +@parametrize_sparrays +def test_sum(A): + assert not isinstance(A.sum(axis=0), np.matrix), \ + "Expected array, got matrix" + assert A.sum(axis=0).shape == (4,) + assert A.sum(axis=1).shape == (3,) + + +@parametrize_sparrays +def test_mean(A): + assert not isinstance(A.mean(axis=1), np.matrix), \ + "Expected array, got matrix" + + +@parametrize_sparrays +def test_min_max(A): + # Some formats don't support min/max operations, so we skip those here. + if hasattr(A, 'min'): + assert not isinstance(A.min(axis=1), np.matrix), \ + "Expected array, got matrix" + if hasattr(A, 'max'): + assert not isinstance(A.max(axis=1), np.matrix), \ + "Expected array, got matrix" + if hasattr(A, 'argmin'): + assert not isinstance(A.argmin(axis=1), np.matrix), \ + "Expected array, got matrix" + if hasattr(A, 'argmax'): + assert not isinstance(A.argmax(axis=1), np.matrix), \ + "Expected array, got matrix" + + +@parametrize_sparrays +def test_todense(A): + assert not isinstance(A.todense(), np.matrix), \ + "Expected array, got matrix" + + +@parametrize_sparrays +def test_indexing(A): + if A.__class__.__name__[:3] in ('dia', 'coo', 'bsr'): + return + + with pytest.raises(NotImplementedError): + A[1, :] + + with pytest.raises(NotImplementedError): + A[:, 1] + + with pytest.raises(NotImplementedError): + A[1, [1, 2]] + + with pytest.raises(NotImplementedError): + A[[1, 2], 1] + + assert isinstance(A[[0]], scipy.sparse.sparray), \ + "Expected sparse array, got sparse matrix" + assert isinstance(A[1, [[1, 2]]], scipy.sparse.sparray), \ + "Expected ndarray, got sparse array" + assert isinstance(A[[[1, 2]], 1], scipy.sparse.sparray), \ + "Expected ndarray, got sparse array" + assert isinstance(A[:, [1, 2]], scipy.sparse.sparray), \ + "Expected sparse array, got something else" + + +@parametrize_sparrays +def test_dense_addition(A): + X = np.random.random(A.shape) + assert not isinstance(A + X, np.matrix), "Expected array, got matrix" + + +@parametrize_sparrays +def test_sparse_addition(A): + assert isinstance((A + A), scipy.sparse.sparray), "Expected array, got matrix" + + +@parametrize_sparrays +def test_elementwise_mul(A): + assert np.all((A * A).todense() == A.power(2).todense()) + + +@parametrize_sparrays +def test_elementwise_rmul(A): + with pytest.raises(TypeError): + None * A + + with pytest.raises(ValueError): + np.eye(3) * scipy.sparse.csr_array(np.arange(6).reshape(2, 3)) + + assert np.all((2 * A) == (A.todense() * 2)) + + assert np.all((A.todense() * A) == (A.todense() ** 2)) + + +@parametrize_sparrays +def test_matmul(A): + assert np.all((A @ A.T).todense() == A.dot(A.T).todense()) + + +@parametrize_sparrays +def test_power_operator(A): + assert isinstance((A**2), scipy.sparse.sparray), "Expected array, got matrix" + + # https://github.com/scipy/scipy/issues/15948 + npt.assert_equal((A**2).todense(), (A.todense())**2) + + # power of zero is all ones (dense) so helpful msg exception + with pytest.raises(NotImplementedError, match="zero power"): + A**0 + + +@parametrize_sparrays +def test_sparse_divide(A): + assert isinstance(A / A, np.ndarray) + +@parametrize_sparrays +def test_sparse_dense_divide(A): + with pytest.warns(RuntimeWarning): + assert isinstance((A / A.todense()), scipy.sparse.sparray) + +@parametrize_sparrays +def test_dense_divide(A): + assert isinstance((A / 2), scipy.sparse.sparray), "Expected array, got matrix" + + +@parametrize_sparrays +def test_no_A_attr(A): + with pytest.warns(VisibleDeprecationWarning): + A.A + + +@parametrize_sparrays +def test_no_H_attr(A): + with pytest.warns(VisibleDeprecationWarning): + A.H + + +@parametrize_sparrays +def test_getrow_getcol(A): + assert isinstance(A._getcol(0), scipy.sparse.sparray) + assert isinstance(A._getrow(0), scipy.sparse.sparray) + + +# -- linalg -- + +@parametrize_sparrays +def test_as_linearoperator(A): + L = spla.aslinearoperator(A) + npt.assert_allclose(L * [1, 2, 3, 4], A @ [1, 2, 3, 4]) + + +@parametrize_square_sparrays +def test_inv(B): + if B.__class__.__name__[:3] != 'csc': + return + + C = spla.inv(B) + + assert isinstance(C, scipy.sparse.sparray) + npt.assert_allclose(C.todense(), np.linalg.inv(B.todense())) + + +@parametrize_square_sparrays +def test_expm(B): + if B.__class__.__name__[:3] != 'csc': + return + + Bmat = scipy.sparse.csc_matrix(B) + + C = spla.expm(B) + + assert isinstance(C, scipy.sparse.sparray) + npt.assert_allclose( + C.todense(), + spla.expm(Bmat).todense() + ) + + +@parametrize_square_sparrays +def test_expm_multiply(B): + if B.__class__.__name__[:3] != 'csc': + return + + npt.assert_allclose( + spla.expm_multiply(B, np.array([1, 2])), + spla.expm(B) @ [1, 2] + ) + + +@parametrize_sparrays +def test_norm(A): + C = spla.norm(A) + npt.assert_allclose(C, np.linalg.norm(A.todense())) + + +@parametrize_square_sparrays +def test_onenormest(B): + C = spla.onenormest(B) + npt.assert_allclose(C, np.linalg.norm(B.todense(), 1)) + + +@parametrize_square_sparrays +def test_spsolve(B): + if B.__class__.__name__[:3] not in ('csc', 'csr'): + return + + npt.assert_allclose( + spla.spsolve(B, [1, 2]), + np.linalg.solve(B.todense(), [1, 2]) + ) + + +def test_spsolve_triangular(): + X = scipy.sparse.csr_array([ + [1, 0, 0, 0], + [2, 1, 0, 0], + [3, 2, 1, 0], + [4, 3, 2, 1], + ]) + spla.spsolve_triangular(X, [1, 2, 3, 4]) + + +@parametrize_square_sparrays +def test_factorized(B): + if B.__class__.__name__[:3] != 'csc': + return + + LU = spla.factorized(B) + npt.assert_allclose( + LU(np.array([1, 2])), + np.linalg.solve(B.todense(), [1, 2]) + ) + + +@parametrize_square_sparrays +@pytest.mark.parametrize( + "solver", + ["bicg", "bicgstab", "cg", "cgs", "gmres", "lgmres", "minres", "qmr", + "gcrotmk", "tfqmr"] +) +def test_solvers(B, solver): + if solver == "minres": + kwargs = {} + else: + kwargs = {'atol': 1e-5} + + x, info = getattr(spla, solver)(B, np.array([1, 2]), **kwargs) + assert info >= 0 # no errors, even if perhaps did not converge fully + npt.assert_allclose(x, [1, 1], atol=1e-1) + + +@parametrize_sparrays +@pytest.mark.parametrize( + "solver", + ["lsqr", "lsmr"] +) +def test_lstsqr(A, solver): + x, *_ = getattr(spla, solver)(A, [1, 2, 3]) + npt.assert_allclose(A @ x, [1, 2, 3]) + + +@parametrize_eig_sparrays +def test_eigs(X): + e, v = spla.eigs(X, k=1) + npt.assert_allclose( + X @ v, + e[0] * v + ) + + +@parametrize_eig_sparrays +def test_eigsh(X): + X = X + X.T + e, v = spla.eigsh(X, k=1) + npt.assert_allclose( + X @ v, + e[0] * v + ) + + +@parametrize_eig_sparrays +def test_svds(X): + u, s, vh = spla.svds(X, k=3) + u2, s2, vh2 = np.linalg.svd(X.todense()) + s = np.sort(s) + s2 = np.sort(s2[:3]) + npt.assert_allclose(s, s2, atol=1e-3) + + +def test_splu(): + X = scipy.sparse.csc_array([ + [1, 0, 0, 0], + [2, 1, 0, 0], + [3, 2, 1, 0], + [4, 3, 2, 1], + ]) + LU = spla.splu(X) + npt.assert_allclose( + LU.solve(np.array([1, 2, 3, 4])), + np.asarray([1, 0, 0, 0], dtype=np.float64), + rtol=1e-14, atol=3e-16 + ) + + +def test_spilu(): + X = scipy.sparse.csc_array([ + [1, 0, 0, 0], + [2, 1, 0, 0], + [3, 2, 1, 0], + [4, 3, 2, 1], + ]) + LU = spla.spilu(X) + npt.assert_allclose( + LU.solve(np.array([1, 2, 3, 4])), + np.asarray([1, 0, 0, 0], dtype=np.float64), + rtol=1e-14, atol=3e-16 + ) + + +@pytest.mark.parametrize( + "cls,indices_attrs", + [ + ( + scipy.sparse.csr_array, + ["indices", "indptr"], + ), + ( + scipy.sparse.csc_array, + ["indices", "indptr"], + ), + ( + scipy.sparse.coo_array, + ["row", "col"], + ), + ] +) +@pytest.mark.parametrize("expected_dtype", [np.int64, np.int32]) +def test_index_dtype_compressed(cls, indices_attrs, expected_dtype): + input_array = scipy.sparse.coo_array(np.arange(9).reshape(3, 3)) + coo_tuple = ( + input_array.data, + ( + input_array.row.astype(expected_dtype), + input_array.col.astype(expected_dtype), + ) + ) + + result = cls(coo_tuple) + for attr in indices_attrs: + assert getattr(result, attr).dtype == expected_dtype + + result = cls(coo_tuple, shape=(3, 3)) + for attr in indices_attrs: + assert getattr(result, attr).dtype == expected_dtype + + if issubclass(cls, scipy.sparse._compressed._cs_matrix): + input_array_csr = input_array.tocsr() + csr_tuple = ( + input_array_csr.data, + input_array_csr.indices.astype(expected_dtype), + input_array_csr.indptr.astype(expected_dtype), + ) + + result = cls(csr_tuple) + for attr in indices_attrs: + assert getattr(result, attr).dtype == expected_dtype + + result = cls(csr_tuple, shape=(3, 3)) + for attr in indices_attrs: + assert getattr(result, attr).dtype == expected_dtype + + +def test_default_is_matrix_diags(): + m = scipy.sparse.diags([0, 1, 2]) + assert not isinstance(m, scipy.sparse.sparray) + + +def test_default_is_matrix_eye(): + m = scipy.sparse.eye(3) + assert not isinstance(m, scipy.sparse.sparray) + + +def test_default_is_matrix_spdiags(): + m = scipy.sparse.spdiags([1, 2, 3], 0, 3, 3) + assert not isinstance(m, scipy.sparse.sparray) + + +def test_default_is_matrix_identity(): + m = scipy.sparse.identity(3) + assert not isinstance(m, scipy.sparse.sparray) + + +def test_default_is_matrix_kron_dense(): + m = scipy.sparse.kron( + np.array([[1, 2], [3, 4]]), np.array([[4, 3], [2, 1]]) + ) + assert not isinstance(m, scipy.sparse.sparray) + + +def test_default_is_matrix_kron_sparse(): + m = scipy.sparse.kron( + np.array([[1, 2], [3, 4]]), np.array([[1, 0], [0, 0]]) + ) + assert not isinstance(m, scipy.sparse.sparray) + + +def test_default_is_matrix_kronsum(): + m = scipy.sparse.kronsum( + np.array([[1, 0], [0, 1]]), np.array([[0, 1], [1, 0]]) + ) + assert not isinstance(m, scipy.sparse.sparray) + + +def test_default_is_matrix_random(): + m = scipy.sparse.random(3, 3) + assert not isinstance(m, scipy.sparse.sparray) + + +def test_default_is_matrix_rand(): + m = scipy.sparse.rand(3, 3) + assert not isinstance(m, scipy.sparse.sparray) + + +@pytest.mark.parametrize("fn", (scipy.sparse.hstack, scipy.sparse.vstack)) +def test_default_is_matrix_stacks(fn): + """Same idea as `test_default_construction_fn_matrices`, but for the + stacking creation functions.""" + A = scipy.sparse.coo_matrix(np.eye(2)) + B = scipy.sparse.coo_matrix([[0, 1], [1, 0]]) + m = fn([A, B]) + assert not isinstance(m, scipy.sparse.sparray) + + +def test_blocks_default_construction_fn_matrices(): + """Same idea as `test_default_construction_fn_matrices`, but for the block + creation function""" + A = scipy.sparse.coo_matrix(np.eye(2)) + B = scipy.sparse.coo_matrix([[2], [0]]) + C = scipy.sparse.coo_matrix([[3]]) + + # block diag + m = scipy.sparse.block_diag((A, B, C)) + assert not isinstance(m, scipy.sparse.sparray) + + # bmat + m = scipy.sparse.bmat([[A, None], [None, C]]) + assert not isinstance(m, scipy.sparse.sparray) + + +def test_format_property(): + for fmt in sparray_types: + arr_cls = getattr(scipy.sparse, f"{fmt}_array") + M = arr_cls([[1, 2]]) + assert M.format == fmt + assert M._format == fmt + with pytest.raises(AttributeError): + M.format = "qqq" + + +def test_issparse(): + m = scipy.sparse.eye(3) + a = scipy.sparse.csr_array(m) + assert not isinstance(m, scipy.sparse.sparray) + assert isinstance(a, scipy.sparse.sparray) + + # Both sparse arrays and sparse matrices should be sparse + assert scipy.sparse.issparse(a) + assert scipy.sparse.issparse(m) + + # ndarray and array_likes are not sparse + assert not scipy.sparse.issparse(a.todense()) + assert not scipy.sparse.issparse(m.todense()) + + +def test_isspmatrix(): + m = scipy.sparse.eye(3) + a = scipy.sparse.csr_array(m) + assert not isinstance(m, scipy.sparse.sparray) + assert isinstance(a, scipy.sparse.sparray) + + # Should only be true for sparse matrices, not sparse arrays + assert not scipy.sparse.isspmatrix(a) + assert scipy.sparse.isspmatrix(m) + + # ndarray and array_likes are not sparse + assert not scipy.sparse.isspmatrix(a.todense()) + assert not scipy.sparse.isspmatrix(m.todense()) + + +@pytest.mark.parametrize( + ("fmt", "fn"), + ( + ("bsr", scipy.sparse.isspmatrix_bsr), + ("coo", scipy.sparse.isspmatrix_coo), + ("csc", scipy.sparse.isspmatrix_csc), + ("csr", scipy.sparse.isspmatrix_csr), + ("dia", scipy.sparse.isspmatrix_dia), + ("dok", scipy.sparse.isspmatrix_dok), + ("lil", scipy.sparse.isspmatrix_lil), + ), +) +def test_isspmatrix_format(fmt, fn): + m = scipy.sparse.eye(3, format=fmt) + a = scipy.sparse.csr_array(m).asformat(fmt) + assert not isinstance(m, scipy.sparse.sparray) + assert isinstance(a, scipy.sparse.sparray) + + # Should only be true for sparse matrices, not sparse arrays + assert not fn(a) + assert fn(m) + + # ndarray and array_likes are not sparse + assert not fn(a.todense()) + assert not fn(m.todense()) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_base.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_base.py new file mode 100644 index 0000000000000000000000000000000000000000..7757384551c514d5f5cd18f91d2a931921704ee7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_base.py @@ -0,0 +1,5220 @@ +# +# Authors: Travis Oliphant, Ed Schofield, Robert Cimrman, Nathan Bell, and others + +""" Test functions for sparse matrices. Each class in the "Matrix class +based tests" section become subclasses of the classes in the "Generic +tests" section. This is done by the functions in the "Tailored base +class for generic tests" section. + +""" + + +import contextlib +import functools +import operator +import platform +import itertools +import sys +from scipy._lib import _pep440 + +import numpy as np +from numpy import (arange, zeros, array, dot, asarray, + vstack, ndarray, transpose, diag, kron, inf, conjugate, + int8) + +import random +from numpy.testing import (assert_equal, assert_array_equal, + assert_array_almost_equal, assert_almost_equal, assert_, + assert_allclose,suppress_warnings) +from pytest import raises as assert_raises + +import scipy.linalg + +import scipy.sparse as sparse +from scipy.sparse import (csc_matrix, csr_matrix, dok_matrix, + coo_matrix, lil_matrix, dia_matrix, bsr_matrix, + eye, issparse, SparseEfficiencyWarning, sparray) +from scipy.sparse._sputils import (supported_dtypes, isscalarlike, + get_index_dtype, asmatrix, matrix) +from scipy.sparse.linalg import splu, expm, inv + +from scipy._lib.decorator import decorator +from scipy._lib._util import ComplexWarning + +import pytest + + +IS_COLAB = ('google.colab' in sys.modules) + + +def assert_in(member, collection, msg=None): + message = msg if msg is not None else f"{member!r} not found in {collection!r}" + assert_(member in collection, msg=message) + + +def assert_array_equal_dtype(x, y, **kwargs): + assert_(x.dtype == y.dtype) + assert_array_equal(x, y, **kwargs) + + +NON_ARRAY_BACKED_FORMATS = frozenset(['dok']) + +def sparse_may_share_memory(A, B): + # Checks if A and B have any numpy array sharing memory. + + def _underlying_arrays(x): + # Given any object (e.g. a sparse array), returns all numpy arrays + # stored in any attribute. + + arrays = [] + for a in x.__dict__.values(): + if isinstance(a, (np.ndarray, np.generic)): + arrays.append(a) + return arrays + + for a in _underlying_arrays(A): + for b in _underlying_arrays(B): + if np.may_share_memory(a, b): + return True + return False + + +sup_complex = suppress_warnings() +sup_complex.filter(ComplexWarning) + + +def with_64bit_maxval_limit(maxval_limit=None, random=False, fixed_dtype=None, + downcast_maxval=None, assert_32bit=False): + """ + Monkeypatch the maxval threshold at which scipy.sparse switches to + 64-bit index arrays, or make it (pseudo-)random. + + """ + if maxval_limit is None: + maxval_limit = np.int64(10) + else: + # Ensure we use numpy scalars rather than Python scalars (matters for + # NEP 50 casting rule changes) + maxval_limit = np.int64(maxval_limit) + + if assert_32bit: + def new_get_index_dtype(arrays=(), maxval=None, check_contents=False): + tp = get_index_dtype(arrays, maxval, check_contents) + assert_equal(np.iinfo(tp).max, np.iinfo(np.int32).max) + assert_(tp == np.int32 or tp == np.intc) + return tp + elif fixed_dtype is not None: + def new_get_index_dtype(arrays=(), maxval=None, check_contents=False): + return fixed_dtype + elif random: + counter = np.random.RandomState(seed=1234) + + def new_get_index_dtype(arrays=(), maxval=None, check_contents=False): + return (np.int32, np.int64)[counter.randint(2)] + else: + def new_get_index_dtype(arrays=(), maxval=None, check_contents=False): + dtype = np.int32 + if maxval is not None: + if maxval > maxval_limit: + dtype = np.int64 + for arr in arrays: + arr = np.asarray(arr) + if arr.dtype > np.int32: + if check_contents: + if arr.size == 0: + # a bigger type not needed + continue + elif np.issubdtype(arr.dtype, np.integer): + maxval = arr.max() + minval = arr.min() + if minval >= -maxval_limit and maxval <= maxval_limit: + # a bigger type not needed + continue + dtype = np.int64 + return dtype + + if downcast_maxval is not None: + def new_downcast_intp_index(arr): + if arr.max() > downcast_maxval: + raise AssertionError("downcast limited") + return arr.astype(np.intp) + + @decorator + def deco(func, *a, **kw): + backup = [] + modules = [scipy.sparse._bsr, scipy.sparse._coo, scipy.sparse._csc, + scipy.sparse._csr, scipy.sparse._dia, scipy.sparse._dok, + scipy.sparse._lil, scipy.sparse._sputils, + scipy.sparse._compressed, scipy.sparse._construct] + try: + for mod in modules: + backup.append((mod, 'get_index_dtype', + getattr(mod, 'get_index_dtype', None))) + setattr(mod, 'get_index_dtype', new_get_index_dtype) + if downcast_maxval is not None: + backup.append((mod, 'downcast_intp_index', + getattr(mod, 'downcast_intp_index', None))) + setattr(mod, 'downcast_intp_index', new_downcast_intp_index) + return func(*a, **kw) + finally: + for mod, name, oldfunc in backup: + if oldfunc is not None: + setattr(mod, name, oldfunc) + + return deco + + +def toarray(a): + if isinstance(a, np.ndarray) or isscalarlike(a): + return a + return a.toarray() + + +class BinopTester: + # Custom type to test binary operations on sparse matrices. + + def __add__(self, mat): + return "matrix on the right" + + def __mul__(self, mat): + return "matrix on the right" + + def __sub__(self, mat): + return "matrix on the right" + + def __radd__(self, mat): + return "matrix on the left" + + def __rmul__(self, mat): + return "matrix on the left" + + def __rsub__(self, mat): + return "matrix on the left" + + def __matmul__(self, mat): + return "matrix on the right" + + def __rmatmul__(self, mat): + return "matrix on the left" + +class BinopTester_with_shape: + # Custom type to test binary operations on sparse matrices + # with object which has shape attribute. + def __init__(self,shape): + self._shape = shape + + def shape(self): + return self._shape + + def ndim(self): + return len(self._shape) + + def __add__(self, mat): + return "matrix on the right" + + def __mul__(self, mat): + return "matrix on the right" + + def __sub__(self, mat): + return "matrix on the right" + + def __radd__(self, mat): + return "matrix on the left" + + def __rmul__(self, mat): + return "matrix on the left" + + def __rsub__(self, mat): + return "matrix on the left" + + def __matmul__(self, mat): + return "matrix on the right" + + def __rmatmul__(self, mat): + return "matrix on the left" + +class ComparisonTester: + # Custom type to test comparison operations on sparse matrices. + def __eq__(self, other): + return "eq" + + def __ne__(self, other): + return "ne" + + def __lt__(self, other): + return "lt" + + def __le__(self, other): + return "le" + + def __gt__(self, other): + return "gt" + + def __ge__(self, other): + return "ge" + + +#------------------------------------------------------------------------------ +# Generic tests +#------------------------------------------------------------------------------ + + +# TODO test prune +# TODO test has_sorted_indices +class _TestCommon: + """test common functionality shared by all sparse formats""" + math_dtypes = supported_dtypes + + @classmethod + def init_class(cls): + # Canonical data. + cls.dat = array([[1, 0, 0, 2], [3, 0, 1, 0], [0, 2, 0, 0]], 'd') + cls.datsp = cls.spcreator(cls.dat) + + # Some sparse and dense matrices with data for every supported dtype. + # This set union is a workaround for numpy#6295, which means that + # two np.int64 dtypes don't hash to the same value. + cls.checked_dtypes = set(supported_dtypes).union(cls.math_dtypes) + cls.dat_dtypes = {} + cls.datsp_dtypes = {} + for dtype in cls.checked_dtypes: + cls.dat_dtypes[dtype] = cls.dat.astype(dtype) + cls.datsp_dtypes[dtype] = cls.spcreator(cls.dat.astype(dtype)) + + # Check that the original data is equivalent to the + # corresponding dat_dtypes & datsp_dtypes. + assert_equal(cls.dat, cls.dat_dtypes[np.float64]) + assert_equal(cls.datsp.toarray(), + cls.datsp_dtypes[np.float64].toarray()) + + def test_bool(self): + def check(dtype): + datsp = self.datsp_dtypes[dtype] + + assert_raises(ValueError, bool, datsp) + assert_(self.spcreator([1])) + assert_(not self.spcreator([0])) + + if isinstance(self, TestDOK): + pytest.skip("Cannot create a rank <= 2 DOK matrix.") + for dtype in self.checked_dtypes: + check(dtype) + + def test_bool_rollover(self): + # bool's underlying dtype is 1 byte, check that it does not + # rollover True -> False at 256. + dat = array([[True, False]]) + datsp = self.spcreator(dat) + + for _ in range(10): + datsp = datsp + datsp + dat = dat + dat + assert_array_equal(dat, datsp.toarray()) + + def test_eq(self): + sup = suppress_warnings() + sup.filter(SparseEfficiencyWarning) + + @sup + @sup_complex + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + dat2 = dat.copy() + dat2[:,0] = 0 + datsp2 = self.spcreator(dat2) + datbsr = bsr_matrix(dat) + datcsr = csr_matrix(dat) + datcsc = csc_matrix(dat) + datlil = lil_matrix(dat) + + # sparse/sparse + assert_array_equal_dtype(dat == dat2, (datsp == datsp2).toarray()) + # mix sparse types + assert_array_equal_dtype(dat == dat2, (datbsr == datsp2).toarray()) + assert_array_equal_dtype(dat == dat2, (datcsr == datsp2).toarray()) + assert_array_equal_dtype(dat == dat2, (datcsc == datsp2).toarray()) + assert_array_equal_dtype(dat == dat2, (datlil == datsp2).toarray()) + # sparse/dense + assert_array_equal_dtype(dat == datsp2, datsp2 == dat) + # sparse/scalar + assert_array_equal_dtype(dat == 0, (datsp == 0).toarray()) + assert_array_equal_dtype(dat == 1, (datsp == 1).toarray()) + assert_array_equal_dtype(dat == np.nan, + (datsp == np.nan).toarray()) + + if not isinstance(self, (TestBSR, TestCSC, TestCSR)): + pytest.skip("Bool comparisons only implemented for BSR, CSC, and CSR.") + for dtype in self.checked_dtypes: + check(dtype) + + def test_ne(self): + sup = suppress_warnings() + sup.filter(SparseEfficiencyWarning) + + @sup + @sup_complex + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + dat2 = dat.copy() + dat2[:,0] = 0 + datsp2 = self.spcreator(dat2) + datbsr = bsr_matrix(dat) + datcsc = csc_matrix(dat) + datcsr = csr_matrix(dat) + datlil = lil_matrix(dat) + + # sparse/sparse + assert_array_equal_dtype(dat != dat2, (datsp != datsp2).toarray()) + # mix sparse types + assert_array_equal_dtype(dat != dat2, (datbsr != datsp2).toarray()) + assert_array_equal_dtype(dat != dat2, (datcsc != datsp2).toarray()) + assert_array_equal_dtype(dat != dat2, (datcsr != datsp2).toarray()) + assert_array_equal_dtype(dat != dat2, (datlil != datsp2).toarray()) + # sparse/dense + assert_array_equal_dtype(dat != datsp2, datsp2 != dat) + # sparse/scalar + assert_array_equal_dtype(dat != 0, (datsp != 0).toarray()) + assert_array_equal_dtype(dat != 1, (datsp != 1).toarray()) + assert_array_equal_dtype(0 != dat, (0 != datsp).toarray()) + assert_array_equal_dtype(1 != dat, (1 != datsp).toarray()) + assert_array_equal_dtype(dat != np.nan, + (datsp != np.nan).toarray()) + + if not isinstance(self, (TestBSR, TestCSC, TestCSR)): + pytest.skip("Bool comparisons only implemented for BSR, CSC, and CSR.") + for dtype in self.checked_dtypes: + check(dtype) + + def test_lt(self): + sup = suppress_warnings() + sup.filter(SparseEfficiencyWarning) + + @sup + @sup_complex + def check(dtype): + # data + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + dat2 = dat.copy() + dat2[:,0] = 0 + datsp2 = self.spcreator(dat2) + datcomplex = dat.astype(complex) + datcomplex[:,0] = 1 + 1j + datspcomplex = self.spcreator(datcomplex) + datbsr = bsr_matrix(dat) + datcsc = csc_matrix(dat) + datcsr = csr_matrix(dat) + datlil = lil_matrix(dat) + + # sparse/sparse + assert_array_equal_dtype(dat < dat2, (datsp < datsp2).toarray()) + assert_array_equal_dtype(datcomplex < dat2, + (datspcomplex < datsp2).toarray()) + # mix sparse types + assert_array_equal_dtype(dat < dat2, (datbsr < datsp2).toarray()) + assert_array_equal_dtype(dat < dat2, (datcsc < datsp2).toarray()) + assert_array_equal_dtype(dat < dat2, (datcsr < datsp2).toarray()) + assert_array_equal_dtype(dat < dat2, (datlil < datsp2).toarray()) + + assert_array_equal_dtype(dat2 < dat, (datsp2 < datbsr).toarray()) + assert_array_equal_dtype(dat2 < dat, (datsp2 < datcsc).toarray()) + assert_array_equal_dtype(dat2 < dat, (datsp2 < datcsr).toarray()) + assert_array_equal_dtype(dat2 < dat, (datsp2 < datlil).toarray()) + # sparse/dense + assert_array_equal_dtype(dat < dat2, datsp < dat2) + assert_array_equal_dtype(datcomplex < dat2, datspcomplex < dat2) + # sparse/scalar + for val in [2, 1, 0, -1, -2]: + val = np.int64(val) # avoid Python scalar (due to NEP 50 changes) + assert_array_equal_dtype((datsp < val).toarray(), dat < val) + assert_array_equal_dtype((val < datsp).toarray(), val < dat) + + with np.errstate(invalid='ignore'): + assert_array_equal_dtype((datsp < np.nan).toarray(), + dat < np.nan) + + # data + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + dat2 = dat.copy() + dat2[:,0] = 0 + datsp2 = self.spcreator(dat2) + + # dense rhs + assert_array_equal_dtype(dat < datsp2, datsp < dat2) + + if not isinstance(self, (TestBSR, TestCSC, TestCSR)): + pytest.skip("Bool comparisons only implemented for BSR, CSC, and CSR.") + for dtype in self.checked_dtypes: + check(dtype) + + def test_gt(self): + sup = suppress_warnings() + sup.filter(SparseEfficiencyWarning) + + @sup + @sup_complex + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + dat2 = dat.copy() + dat2[:,0] = 0 + datsp2 = self.spcreator(dat2) + datcomplex = dat.astype(complex) + datcomplex[:,0] = 1 + 1j + datspcomplex = self.spcreator(datcomplex) + datbsr = bsr_matrix(dat) + datcsc = csc_matrix(dat) + datcsr = csr_matrix(dat) + datlil = lil_matrix(dat) + + # sparse/sparse + assert_array_equal_dtype(dat > dat2, (datsp > datsp2).toarray()) + assert_array_equal_dtype(datcomplex > dat2, + (datspcomplex > datsp2).toarray()) + # mix sparse types + assert_array_equal_dtype(dat > dat2, (datbsr > datsp2).toarray()) + assert_array_equal_dtype(dat > dat2, (datcsc > datsp2).toarray()) + assert_array_equal_dtype(dat > dat2, (datcsr > datsp2).toarray()) + assert_array_equal_dtype(dat > dat2, (datlil > datsp2).toarray()) + + assert_array_equal_dtype(dat2 > dat, (datsp2 > datbsr).toarray()) + assert_array_equal_dtype(dat2 > dat, (datsp2 > datcsc).toarray()) + assert_array_equal_dtype(dat2 > dat, (datsp2 > datcsr).toarray()) + assert_array_equal_dtype(dat2 > dat, (datsp2 > datlil).toarray()) + # sparse/dense + assert_array_equal_dtype(dat > dat2, datsp > dat2) + assert_array_equal_dtype(datcomplex > dat2, datspcomplex > dat2) + # sparse/scalar + for val in [2, 1, 0, -1, -2]: + val = np.int64(val) # avoid Python scalar (due to NEP 50 changes) + assert_array_equal_dtype((datsp > val).toarray(), dat > val) + assert_array_equal_dtype((val > datsp).toarray(), val > dat) + + with np.errstate(invalid='ignore'): + assert_array_equal_dtype((datsp > np.nan).toarray(), + dat > np.nan) + + # data + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + dat2 = dat.copy() + dat2[:,0] = 0 + datsp2 = self.spcreator(dat2) + + # dense rhs + assert_array_equal_dtype(dat > datsp2, datsp > dat2) + + if not isinstance(self, (TestBSR, TestCSC, TestCSR)): + pytest.skip("Bool comparisons only implemented for BSR, CSC, and CSR.") + for dtype in self.checked_dtypes: + check(dtype) + + def test_le(self): + sup = suppress_warnings() + sup.filter(SparseEfficiencyWarning) + + @sup + @sup_complex + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + dat2 = dat.copy() + dat2[:,0] = 0 + datsp2 = self.spcreator(dat2) + datcomplex = dat.astype(complex) + datcomplex[:,0] = 1 + 1j + datspcomplex = self.spcreator(datcomplex) + datbsr = bsr_matrix(dat) + datcsc = csc_matrix(dat) + datcsr = csr_matrix(dat) + datlil = lil_matrix(dat) + + # sparse/sparse + assert_array_equal_dtype(dat <= dat2, (datsp <= datsp2).toarray()) + assert_array_equal_dtype(datcomplex <= dat2, + (datspcomplex <= datsp2).toarray()) + # mix sparse types + assert_array_equal_dtype((datbsr <= datsp2).toarray(), dat <= dat2) + assert_array_equal_dtype((datcsc <= datsp2).toarray(), dat <= dat2) + assert_array_equal_dtype((datcsr <= datsp2).toarray(), dat <= dat2) + assert_array_equal_dtype((datlil <= datsp2).toarray(), dat <= dat2) + + assert_array_equal_dtype((datsp2 <= datbsr).toarray(), dat2 <= dat) + assert_array_equal_dtype((datsp2 <= datcsc).toarray(), dat2 <= dat) + assert_array_equal_dtype((datsp2 <= datcsr).toarray(), dat2 <= dat) + assert_array_equal_dtype((datsp2 <= datlil).toarray(), dat2 <= dat) + # sparse/dense + assert_array_equal_dtype(datsp <= dat2, dat <= dat2) + assert_array_equal_dtype(datspcomplex <= dat2, datcomplex <= dat2) + # sparse/scalar + for val in [2, 1, -1, -2]: + val = np.int64(val) # avoid Python scalar (due to NEP 50 changes) + assert_array_equal_dtype((datsp <= val).toarray(), dat <= val) + assert_array_equal_dtype((val <= datsp).toarray(), val <= dat) + + # data + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + dat2 = dat.copy() + dat2[:,0] = 0 + datsp2 = self.spcreator(dat2) + + # dense rhs + assert_array_equal_dtype(dat <= datsp2, datsp <= dat2) + + if not isinstance(self, (TestBSR, TestCSC, TestCSR)): + pytest.skip("Bool comparisons only implemented for BSR, CSC, and CSR.") + for dtype in self.checked_dtypes: + check(dtype) + + def test_ge(self): + sup = suppress_warnings() + sup.filter(SparseEfficiencyWarning) + + @sup + @sup_complex + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + dat2 = dat.copy() + dat2[:,0] = 0 + datsp2 = self.spcreator(dat2) + datcomplex = dat.astype(complex) + datcomplex[:,0] = 1 + 1j + datspcomplex = self.spcreator(datcomplex) + datbsr = bsr_matrix(dat) + datcsc = csc_matrix(dat) + datcsr = csr_matrix(dat) + datlil = lil_matrix(dat) + + # sparse/sparse + assert_array_equal_dtype(dat >= dat2, (datsp >= datsp2).toarray()) + assert_array_equal_dtype(datcomplex >= dat2, + (datspcomplex >= datsp2).toarray()) + # mix sparse types + assert_array_equal_dtype((datbsr >= datsp2).toarray(), dat >= dat2) + assert_array_equal_dtype((datcsc >= datsp2).toarray(), dat >= dat2) + assert_array_equal_dtype((datcsr >= datsp2).toarray(), dat >= dat2) + assert_array_equal_dtype((datlil >= datsp2).toarray(), dat >= dat2) + + assert_array_equal_dtype((datsp2 >= datbsr).toarray(), dat2 >= dat) + assert_array_equal_dtype((datsp2 >= datcsc).toarray(), dat2 >= dat) + assert_array_equal_dtype((datsp2 >= datcsr).toarray(), dat2 >= dat) + assert_array_equal_dtype((datsp2 >= datlil).toarray(), dat2 >= dat) + # sparse/dense + assert_array_equal_dtype(datsp >= dat2, dat >= dat2) + assert_array_equal_dtype(datspcomplex >= dat2, datcomplex >= dat2) + # sparse/scalar + for val in [2, 1, -1, -2]: + val = np.int64(val) # avoid Python scalar (due to NEP 50 changes) + assert_array_equal_dtype((datsp >= val).toarray(), dat >= val) + assert_array_equal_dtype((val >= datsp).toarray(), val >= dat) + + # dense data + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + dat2 = dat.copy() + dat2[:,0] = 0 + datsp2 = self.spcreator(dat2) + + # dense rhs + assert_array_equal_dtype(dat >= datsp2, datsp >= dat2) + + if not isinstance(self, (TestBSR, TestCSC, TestCSR)): + pytest.skip("Bool comparisons only implemented for BSR, CSC, and CSR.") + for dtype in self.checked_dtypes: + check(dtype) + + def test_empty(self): + # create empty matrices + assert_equal(self.spcreator((3, 3)).toarray(), zeros((3, 3))) + assert_equal(self.spcreator((3, 3)).nnz, 0) + assert_equal(self.spcreator((3, 3)).count_nonzero(), 0) + + def test_count_nonzero(self): + expected = np.count_nonzero(self.datsp.toarray()) + assert_equal(self.datsp.count_nonzero(), expected) + assert_equal(self.datsp.T.count_nonzero(), expected) + + def test_invalid_shapes(self): + assert_raises(ValueError, self.spcreator, (-1,3)) + assert_raises(ValueError, self.spcreator, (3,-1)) + assert_raises(ValueError, self.spcreator, (-1,-1)) + + def test_repr(self): + repr(self.datsp) + + def test_str(self): + str(self.datsp) + + def test_empty_arithmetic(self): + # Test manipulating empty matrices. Fails in SciPy SVN <= r1768 + shape = (5, 5) + for mytype in [np.dtype('int32'), np.dtype('float32'), + np.dtype('float64'), np.dtype('complex64'), + np.dtype('complex128')]: + a = self.spcreator(shape, dtype=mytype) + b = a + a + c = 2 * a + d = a @ a.tocsc() + e = a @ a.tocsr() + f = a @ a.tocoo() + for m in [a,b,c,d,e,f]: + assert_equal(m.toarray(), a.toarray()@a.toarray()) + # These fail in all revisions <= r1768: + assert_equal(m.dtype,mytype) + assert_equal(m.toarray().dtype,mytype) + + def test_abs(self): + A = array([[-1, 0, 17], [0, -5, 0], [1, -4, 0], [0, 0, 0]], 'd') + assert_equal(abs(A), abs(self.spcreator(A)).toarray()) + + def test_round(self): + decimal = 1 + A = array([[-1.35, 0.56], [17.25, -5.98]], 'd') + assert_equal(np.around(A, decimals=decimal), + round(self.spcreator(A), ndigits=decimal).toarray()) + + def test_elementwise_power(self): + A = array([[-4, -3, -2], [-1, 0, 1], [2, 3, 4]], 'd') + assert_equal(np.power(A, 2), self.spcreator(A).power(2).toarray()) + + #it's element-wise power function, input has to be a scalar + assert_raises(NotImplementedError, self.spcreator(A).power, A) + + def test_neg(self): + A = array([[-1, 0, 17], [0, -5, 0], [1, -4, 0], [0, 0, 0]], 'd') + assert_equal(-A, (-self.spcreator(A)).toarray()) + + # see gh-5843 + A = array([[True, False, False], [False, False, True]]) + assert_raises(NotImplementedError, self.spcreator(A).__neg__) + + def test_real(self): + D = array([[1 + 3j, 2 - 4j]]) + A = self.spcreator(D) + assert_equal(A.real.toarray(), D.real) + + def test_imag(self): + D = array([[1 + 3j, 2 - 4j]]) + A = self.spcreator(D) + assert_equal(A.imag.toarray(), D.imag) + + def test_diagonal(self): + # Does the matrix's .diagonal() method work? + mats = [] + mats.append([[1,0,2]]) + mats.append([[1],[0],[2]]) + mats.append([[0,1],[0,2],[0,3]]) + mats.append([[0,0,1],[0,0,2],[0,3,0]]) + mats.append([[1,0],[0,0]]) + + mats.append(kron(mats[0],[[1,2]])) + mats.append(kron(mats[0],[[1],[2]])) + mats.append(kron(mats[1],[[1,2],[3,4]])) + mats.append(kron(mats[2],[[1,2],[3,4]])) + mats.append(kron(mats[3],[[1,2],[3,4]])) + mats.append(kron(mats[3],[[1,2,3,4]])) + + for m in mats: + rows, cols = array(m).shape + sparse_mat = self.spcreator(m) + for k in range(-rows-1, cols+2): + assert_equal(sparse_mat.diagonal(k=k), diag(m, k=k)) + # Test for k beyond boundaries(issue #11949) + assert_equal(sparse_mat.diagonal(k=10), diag(m, k=10)) + assert_equal(sparse_mat.diagonal(k=-99), diag(m, k=-99)) + + # Test all-zero matrix. + assert_equal(self.spcreator((40, 16130)).diagonal(), np.zeros(40)) + # Test empty matrix + # https://github.com/scipy/scipy/issues/11949 + assert_equal(self.spcreator((0, 0)).diagonal(), np.empty(0)) + assert_equal(self.spcreator((15, 0)).diagonal(), np.empty(0)) + assert_equal(self.spcreator((0, 5)).diagonal(10), np.empty(0)) + + def test_trace(self): + # For square matrix + A = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + B = self.spcreator(A) + for k in range(-2, 3): + assert_equal(A.trace(offset=k), B.trace(offset=k)) + + # For rectangular matrix + A = np.array([[1, 2, 3], [4, 5, 6]]) + B = self.spcreator(A) + for k in range(-1, 3): + assert_equal(A.trace(offset=k), B.trace(offset=k)) + + def test_reshape(self): + # This first example is taken from the lil_matrix reshaping test. + x = self.spcreator([[1, 0, 7], [0, 0, 0], [0, 3, 0], [0, 0, 5]]) + for order in ['C', 'F']: + for s in [(12, 1), (1, 12)]: + assert_array_equal(x.reshape(s, order=order).toarray(), + x.toarray().reshape(s, order=order)) + + # This example is taken from the stackoverflow answer at + # https://stackoverflow.com/q/16511879 + x = self.spcreator([[0, 10, 0, 0], [0, 0, 0, 0], [0, 20, 30, 40]]) + y = x.reshape((2, 6)) # Default order is 'C' + desired = [[0, 10, 0, 0, 0, 0], [0, 0, 0, 20, 30, 40]] + assert_array_equal(y.toarray(), desired) + + # Reshape with negative indexes + y = x.reshape((2, -1)) + assert_array_equal(y.toarray(), desired) + y = x.reshape((-1, 6)) + assert_array_equal(y.toarray(), desired) + assert_raises(ValueError, x.reshape, (-1, -1)) + + # Reshape with star args + y = x.reshape(2, 6) + assert_array_equal(y.toarray(), desired) + assert_raises(TypeError, x.reshape, 2, 6, not_an_arg=1) + + # Reshape with same size is noop unless copy=True + y = x.reshape((3, 4)) + assert_(y is x) + y = x.reshape((3, 4), copy=True) + assert_(y is not x) + + # Ensure reshape did not alter original size + assert_array_equal(x.shape, (3, 4)) + + # Reshape in place + x.shape = (2, 6) + assert_array_equal(x.toarray(), desired) + + # Reshape to bad ndim + assert_raises(ValueError, x.reshape, (x.size,)) + assert_raises(ValueError, x.reshape, (1, x.size, 1)) + + @pytest.mark.slow + def test_setdiag_comprehensive(self): + def dense_setdiag(a, v, k): + v = np.asarray(v) + if k >= 0: + n = min(a.shape[0], a.shape[1] - k) + if v.ndim != 0: + n = min(n, len(v)) + v = v[:n] + i = np.arange(0, n) + j = np.arange(k, k + n) + a[i,j] = v + elif k < 0: + dense_setdiag(a.T, v, -k) + + def check_setdiag(a, b, k): + # Check setting diagonal using a scalar, a vector of + # correct length, and too short or too long vectors + for r in [-1, len(np.diag(a, k)), 2, 30]: + if r < 0: + v = np.random.choice(range(1, 20)) + else: + v = np.random.randint(1, 20, size=r) + + dense_setdiag(a, v, k) + with suppress_warnings() as sup: + message = ("Changing the sparsity structure of " + "a cs[cr]_matrix is expensive") + sup.filter(SparseEfficiencyWarning, message) + b.setdiag(v, k) + + # check that dense_setdiag worked + d = np.diag(a, k) + if np.asarray(v).ndim == 0: + assert_array_equal(d, v, err_msg="%s %d" % (msg, r)) + else: + n = min(len(d), len(v)) + assert_array_equal(d[:n], v[:n], err_msg="%s %d" % (msg, r)) + # check that sparse setdiag worked + assert_array_equal(b.toarray(), a, err_msg="%s %d" % (msg, r)) + + # comprehensive test + np.random.seed(1234) + shapes = [(0,5), (5,0), (1,5), (5,1), (5,5)] + for dtype in [np.int8, np.float64]: + for m,n in shapes: + ks = np.arange(-m+1, n-1) + for k in ks: + msg = repr((dtype, m, n, k)) + a = np.zeros((m, n), dtype=dtype) + b = self.spcreator((m, n), dtype=dtype) + + check_setdiag(a, b, k) + + # check overwriting etc + for k2 in np.random.choice(ks, size=min(len(ks), 5)): + check_setdiag(a, b, k2) + + def test_setdiag(self): + # simple test cases + m = self.spcreator(np.eye(3)) + m2 = self.spcreator((4, 4)) + values = [3, 2, 1] + with suppress_warnings() as sup: + sup.filter( + SparseEfficiencyWarning, + "Changing the sparsity structure of a cs[cr]_matrix is expensive", + ) + assert_raises(ValueError, m.setdiag, values, k=4) + m.setdiag(values) + assert_array_equal(m.diagonal(), values) + m.setdiag(values, k=1) + assert_array_equal(m.toarray(), np.array([[3, 3, 0], + [0, 2, 2], + [0, 0, 1]])) + m.setdiag(values, k=-2) + assert_array_equal(m.toarray(), np.array([[3, 3, 0], + [0, 2, 2], + [3, 0, 1]])) + m.setdiag((9,), k=2) + assert_array_equal(m.toarray()[0,2], 9) + m.setdiag((9,), k=-2) + assert_array_equal(m.toarray()[2,0], 9) + # test short values on an empty matrix + m2.setdiag([1], k=2) + assert_array_equal(m2.toarray()[0], [0, 0, 1, 0]) + # test overwriting that same diagonal + m2.setdiag([1, 1], k=2) + assert_array_equal(m2.toarray()[:2], [[0, 0, 1, 0], + [0, 0, 0, 1]]) + + def test_nonzero(self): + A = array([[1, 0, 1],[0, 1, 1],[0, 0, 1]]) + Asp = self.spcreator(A) + + A_nz = {tuple(ij) for ij in transpose(A.nonzero())} + Asp_nz = {tuple(ij) for ij in transpose(Asp.nonzero())} + + assert_equal(A_nz, Asp_nz) + + def test_numpy_nonzero(self): + # See gh-5987 + A = array([[1, 0, 1], [0, 1, 1], [0, 0, 1]]) + Asp = self.spcreator(A) + + A_nz = {tuple(ij) for ij in transpose(np.nonzero(A))} + Asp_nz = {tuple(ij) for ij in transpose(np.nonzero(Asp))} + + assert_equal(A_nz, Asp_nz) + + def test_getrow(self): + assert_array_equal(self.datsp.getrow(1).toarray(), self.dat[[1], :]) + assert_array_equal(self.datsp.getrow(-1).toarray(), self.dat[[-1], :]) + + def test_getcol(self): + assert_array_equal(self.datsp.getcol(1).toarray(), self.dat[:, [1]]) + assert_array_equal(self.datsp.getcol(-1).toarray(), self.dat[:, [-1]]) + + def test_sum(self): + np.random.seed(1234) + dat_1 = matrix([[0, 1, 2], + [3, -4, 5], + [-6, 7, 9]]) + dat_2 = np.random.rand(5, 5) + dat_3 = np.array([[]]) + dat_4 = np.zeros((40, 40)) + dat_5 = sparse.rand(5, 5, density=1e-2).toarray() + matrices = [dat_1, dat_2, dat_3, dat_4, dat_5] + + def check(dtype, j): + dat = matrix(matrices[j], dtype=dtype) + datsp = self.spcreator(dat, dtype=dtype) + with np.errstate(over='ignore'): + assert_array_almost_equal(dat.sum(), datsp.sum()) + assert_equal(dat.sum().dtype, datsp.sum().dtype) + assert_(np.isscalar(datsp.sum(axis=None))) + assert_array_almost_equal(dat.sum(axis=None), + datsp.sum(axis=None)) + assert_equal(dat.sum(axis=None).dtype, + datsp.sum(axis=None).dtype) + assert_array_almost_equal(dat.sum(axis=0), datsp.sum(axis=0)) + assert_equal(dat.sum(axis=0).dtype, datsp.sum(axis=0).dtype) + assert_array_almost_equal(dat.sum(axis=1), datsp.sum(axis=1)) + assert_equal(dat.sum(axis=1).dtype, datsp.sum(axis=1).dtype) + assert_array_almost_equal(dat.sum(axis=-2), datsp.sum(axis=-2)) + assert_equal(dat.sum(axis=-2).dtype, datsp.sum(axis=-2).dtype) + assert_array_almost_equal(dat.sum(axis=-1), datsp.sum(axis=-1)) + assert_equal(dat.sum(axis=-1).dtype, datsp.sum(axis=-1).dtype) + + for dtype in self.checked_dtypes: + for j in range(len(matrices)): + check(dtype, j) + + def test_sum_invalid_params(self): + out = np.zeros((1, 3)) + dat = array([[0, 1, 2], + [3, -4, 5], + [-6, 7, 9]]) + datsp = self.spcreator(dat) + + assert_raises(ValueError, datsp.sum, axis=3) + assert_raises(TypeError, datsp.sum, axis=(0, 1)) + assert_raises(TypeError, datsp.sum, axis=1.5) + assert_raises(ValueError, datsp.sum, axis=1, out=out) + + def test_sum_dtype(self): + dat = array([[0, 1, 2], + [3, -4, 5], + [-6, 7, 9]]) + datsp = self.spcreator(dat) + + def check(dtype): + dat_mean = dat.mean(dtype=dtype) + datsp_mean = datsp.mean(dtype=dtype) + + assert_array_almost_equal(dat_mean, datsp_mean) + assert_equal(dat_mean.dtype, datsp_mean.dtype) + + for dtype in self.checked_dtypes: + check(dtype) + + def test_sum_out(self): + dat = array([[0, 1, 2], + [3, -4, 5], + [-6, 7, 9]]) + datsp = self.spcreator(dat) + + dat_out = array([[0]]) + datsp_out = matrix([[0]]) + + dat.sum(out=dat_out, keepdims=True) + datsp.sum(out=datsp_out) + assert_array_almost_equal(dat_out, datsp_out) + + dat_out = np.zeros((3, 1)) + datsp_out = asmatrix(np.zeros((3, 1))) + + dat.sum(axis=1, out=dat_out, keepdims=True) + datsp.sum(axis=1, out=datsp_out) + assert_array_almost_equal(dat_out, datsp_out) + + def test_numpy_sum(self): + # See gh-5987 + dat = array([[0, 1, 2], + [3, -4, 5], + [-6, 7, 9]]) + datsp = self.spcreator(dat) + + dat_mean = np.sum(dat) + datsp_mean = np.sum(datsp) + + assert_array_almost_equal(dat_mean, datsp_mean) + assert_equal(dat_mean.dtype, datsp_mean.dtype) + + def test_mean(self): + def check(dtype): + dat = array([[0, 1, 2], + [3, 4, 5], + [6, 7, 9]], dtype=dtype) + datsp = self.spcreator(dat, dtype=dtype) + + assert_array_almost_equal(dat.mean(), datsp.mean()) + assert_equal(dat.mean().dtype, datsp.mean().dtype) + assert_(np.isscalar(datsp.mean(axis=None))) + assert_array_almost_equal( + dat.mean(axis=None, keepdims=True), datsp.mean(axis=None) + ) + assert_equal(dat.mean(axis=None).dtype, datsp.mean(axis=None).dtype) + assert_array_almost_equal( + dat.mean(axis=0, keepdims=True), datsp.mean(axis=0) + ) + assert_equal(dat.mean(axis=0).dtype, datsp.mean(axis=0).dtype) + assert_array_almost_equal( + dat.mean(axis=1, keepdims=True), datsp.mean(axis=1) + ) + assert_equal(dat.mean(axis=1).dtype, datsp.mean(axis=1).dtype) + assert_array_almost_equal( + dat.mean(axis=-2, keepdims=True), datsp.mean(axis=-2) + ) + assert_equal(dat.mean(axis=-2).dtype, datsp.mean(axis=-2).dtype) + assert_array_almost_equal( + dat.mean(axis=-1, keepdims=True), datsp.mean(axis=-1) + ) + assert_equal(dat.mean(axis=-1).dtype, datsp.mean(axis=-1).dtype) + + for dtype in self.checked_dtypes: + check(dtype) + + def test_mean_invalid_params(self): + out = asmatrix(np.zeros((1, 3))) + dat = array([[0, 1, 2], + [3, -4, 5], + [-6, 7, 9]]) + datsp = self.spcreator(dat) + + assert_raises(ValueError, datsp.mean, axis=3) + assert_raises(TypeError, datsp.mean, axis=(0, 1)) + assert_raises(TypeError, datsp.mean, axis=1.5) + assert_raises(ValueError, datsp.mean, axis=1, out=out) + + def test_mean_dtype(self): + dat = array([[0, 1, 2], + [3, -4, 5], + [-6, 7, 9]]) + datsp = self.spcreator(dat) + + def check(dtype): + dat_mean = dat.mean(dtype=dtype) + datsp_mean = datsp.mean(dtype=dtype) + + assert_array_almost_equal(dat_mean, datsp_mean) + assert_equal(dat_mean.dtype, datsp_mean.dtype) + + for dtype in self.checked_dtypes: + check(dtype) + + def test_mean_out(self): + dat = array([[0, 1, 2], + [3, -4, 5], + [-6, 7, 9]]) + datsp = self.spcreator(dat) + + dat_out = array([[0]]) + datsp_out = matrix([[0]]) + + dat.mean(out=dat_out, keepdims=True) + datsp.mean(out=datsp_out) + assert_array_almost_equal(dat_out, datsp_out) + + dat_out = np.zeros((3, 1)) + datsp_out = matrix(np.zeros((3, 1))) + + dat.mean(axis=1, out=dat_out, keepdims=True) + datsp.mean(axis=1, out=datsp_out) + assert_array_almost_equal(dat_out, datsp_out) + + def test_numpy_mean(self): + # See gh-5987 + dat = array([[0, 1, 2], + [3, -4, 5], + [-6, 7, 9]]) + datsp = self.spcreator(dat) + + dat_mean = np.mean(dat) + datsp_mean = np.mean(datsp) + + assert_array_almost_equal(dat_mean, datsp_mean) + assert_equal(dat_mean.dtype, datsp_mean.dtype) + + def test_expm(self): + M = array([[1, 0, 2], [0, 0, 3], [-4, 5, 6]], float) + sM = self.spcreator(M, shape=(3,3), dtype=float) + Mexp = scipy.linalg.expm(M) + + N = array([[3., 0., 1.], [0., 2., 0.], [0., 0., 0.]]) + sN = self.spcreator(N, shape=(3,3), dtype=float) + Nexp = scipy.linalg.expm(N) + + with suppress_warnings() as sup: + sup.filter( + SparseEfficiencyWarning, + "splu converted its input to CSC format", + ) + sup.filter( + SparseEfficiencyWarning, + "spsolve is more efficient when sparse b is in the CSC matrix format", + ) + sup.filter( + SparseEfficiencyWarning, + "spsolve requires A be CSC or CSR matrix format", + ) + sMexp = expm(sM).toarray() + sNexp = expm(sN).toarray() + + assert_array_almost_equal((sMexp - Mexp), zeros((3, 3))) + assert_array_almost_equal((sNexp - Nexp), zeros((3, 3))) + + def test_inv(self): + def check(dtype): + M = array([[1, 0, 2], [0, 0, 3], [-4, 5, 6]], dtype) + with suppress_warnings() as sup: + sup.filter(SparseEfficiencyWarning, + "spsolve requires A be CSC or CSR matrix format",) + sup.filter(SparseEfficiencyWarning, + "spsolve is more efficient when sparse b " + "is in the CSC matrix format",) + sup.filter(SparseEfficiencyWarning, + "splu converted its input to CSC format",) + sM = self.spcreator(M, shape=(3,3), dtype=dtype) + sMinv = inv(sM) + assert_array_almost_equal(sMinv.dot(sM).toarray(), np.eye(3)) + assert_raises(TypeError, inv, M) + for dtype in [float]: + check(dtype) + + @sup_complex + def test_from_array(self): + A = array([[1,0,0],[2,3,4],[0,5,0],[0,0,0]]) + assert_array_equal(self.spcreator(A).toarray(), A) + + A = array([[1.0 + 3j, 0, 0], + [0, 2.0 + 5, 0], + [0, 0, 0]]) + assert_array_equal(self.spcreator(A).toarray(), A) + assert_array_equal(self.spcreator(A, dtype='int16').toarray(),A.astype('int16')) + + @sup_complex + def test_from_matrix(self): + A = matrix([[1, 0, 0], [2, 3, 4], [0, 5, 0], [0, 0, 0]]) + assert_array_equal(self.spcreator(A).todense(), A) + + A = matrix([[1.0 + 3j, 0, 0], + [0, 2.0 + 5, 0], + [0, 0, 0]]) + assert_array_equal(self.spcreator(A).todense(), A) + assert_array_equal( + self.spcreator(A, dtype='int16').todense(), A.astype('int16') + ) + + @sup_complex + def test_from_list(self): + A = [[1,0,0],[2,3,4],[0,5,0],[0,0,0]] + assert_array_equal(self.spcreator(A).toarray(), A) + + A = [[1.0 + 3j, 0, 0], + [0, 2.0 + 5, 0], + [0, 0, 0]] + assert_array_equal(self.spcreator(A).toarray(), array(A)) + assert_array_equal( + self.spcreator(A, dtype='int16').toarray(), array(A).astype('int16') + ) + + @sup_complex + def test_from_sparse(self): + D = array([[1,0,0],[2,3,4],[0,5,0],[0,0,0]]) + S = csr_matrix(D) + assert_array_equal(self.spcreator(S).toarray(), D) + S = self.spcreator(D) + assert_array_equal(self.spcreator(S).toarray(), D) + + D = array([[1.0 + 3j, 0, 0], + [0, 2.0 + 5, 0], + [0, 0, 0]]) + S = csr_matrix(D) + assert_array_equal(self.spcreator(S).toarray(), D) + assert_array_equal(self.spcreator(S, dtype='int16').toarray(), + D.astype('int16')) + S = self.spcreator(D) + assert_array_equal(self.spcreator(S).toarray(), D) + assert_array_equal(self.spcreator(S, dtype='int16').toarray(), + D.astype('int16')) + + # def test_array(self): + # """test array(A) where A is in sparse format""" + # assert_equal( array(self.datsp), self.dat ) + + def test_todense(self): + # Check C- or F-contiguous (default). + chk = self.datsp.todense() + assert isinstance(chk, np.matrix) + assert_array_equal(chk, self.dat) + assert_(chk.flags.c_contiguous != chk.flags.f_contiguous) + # Check C-contiguous (with arg). + chk = self.datsp.todense(order='C') + assert_array_equal(chk, self.dat) + assert_(chk.flags.c_contiguous) + assert_(not chk.flags.f_contiguous) + # Check F-contiguous (with arg). + chk = self.datsp.todense(order='F') + assert_array_equal(chk, self.dat) + assert_(not chk.flags.c_contiguous) + assert_(chk.flags.f_contiguous) + # Check with out argument (array). + out = np.zeros(self.datsp.shape, dtype=self.datsp.dtype) + chk = self.datsp.todense(out=out) + assert_array_equal(self.dat, out) + assert_array_equal(self.dat, chk) + assert np.may_share_memory(chk, out) + # Check with out array (matrix). + out = asmatrix(np.zeros(self.datsp.shape, dtype=self.datsp.dtype)) + chk = self.datsp.todense(out=out) + assert_array_equal(self.dat, out) + assert_array_equal(self.dat, chk) + assert np.may_share_memory(chk, out) + a = array([[1.,2.,3.]]) + dense_dot_dense = a @ self.dat + check = a @ self.datsp.todense() + assert_array_equal(dense_dot_dense, check) + b = array([[1.,2.,3.,4.]]).T + dense_dot_dense = self.dat @ b + check2 = self.datsp.todense() @ b + assert_array_equal(dense_dot_dense, check2) + # Check bool data works. + spbool = self.spcreator(self.dat, dtype=bool) + matbool = self.dat.astype(bool) + assert_array_equal(spbool.todense(), matbool) + + def test_toarray(self): + # Check C- or F-contiguous (default). + dat = asarray(self.dat) + chk = self.datsp.toarray() + assert_array_equal(chk, dat) + assert_(chk.flags.c_contiguous != chk.flags.f_contiguous) + # Check C-contiguous (with arg). + chk = self.datsp.toarray(order='C') + assert_array_equal(chk, dat) + assert_(chk.flags.c_contiguous) + assert_(not chk.flags.f_contiguous) + # Check F-contiguous (with arg). + chk = self.datsp.toarray(order='F') + assert_array_equal(chk, dat) + assert_(not chk.flags.c_contiguous) + assert_(chk.flags.f_contiguous) + # Check with output arg. + out = np.zeros(self.datsp.shape, dtype=self.datsp.dtype) + self.datsp.toarray(out=out) + assert_array_equal(chk, dat) + # Check that things are fine when we don't initialize with zeros. + out[...] = 1. + self.datsp.toarray(out=out) + assert_array_equal(chk, dat) + a = array([1.,2.,3.]) + dense_dot_dense = dot(a, dat) + check = dot(a, self.datsp.toarray()) + assert_array_equal(dense_dot_dense, check) + b = array([1.,2.,3.,4.]) + dense_dot_dense = dot(dat, b) + check2 = dot(self.datsp.toarray(), b) + assert_array_equal(dense_dot_dense, check2) + # Check bool data works. + spbool = self.spcreator(self.dat, dtype=bool) + arrbool = dat.astype(bool) + assert_array_equal(spbool.toarray(), arrbool) + + @sup_complex + def test_astype(self): + D = array([[2.0 + 3j, 0, 0], + [0, 4.0 + 5j, 0], + [0, 0, 0]]) + S = self.spcreator(D) + + for x in supported_dtypes: + # Check correctly casted + D_casted = D.astype(x) + for copy in (True, False): + S_casted = S.astype(x, copy=copy) + assert_equal(S_casted.dtype, D_casted.dtype) # correct type + assert_equal(S_casted.toarray(), D_casted) # correct values + assert_equal(S_casted.format, S.format) # format preserved + # Check correctly copied + assert_(S_casted.astype(x, copy=False) is S_casted) + S_copied = S_casted.astype(x, copy=True) + assert_(S_copied is not S_casted) + + def check_equal_but_not_same_array_attribute(attribute): + a = getattr(S_casted, attribute) + b = getattr(S_copied, attribute) + assert_array_equal(a, b) + assert_(a is not b) + i = (0,) * b.ndim + b_i = b[i] + b[i] = not b[i] + assert_(a[i] != b[i]) + b[i] = b_i + + if S_casted.format in ('csr', 'csc', 'bsr'): + for attribute in ('indices', 'indptr', 'data'): + check_equal_but_not_same_array_attribute(attribute) + elif S_casted.format == 'coo': + for attribute in ('row', 'col', 'data'): + check_equal_but_not_same_array_attribute(attribute) + elif S_casted.format == 'dia': + for attribute in ('offsets', 'data'): + check_equal_but_not_same_array_attribute(attribute) + + @sup_complex + def test_astype_immutable(self): + D = array([[2.0 + 3j, 0, 0], + [0, 4.0 + 5j, 0], + [0, 0, 0]]) + S = self.spcreator(D) + if hasattr(S, 'data'): + S.data.flags.writeable = False + if S.format in ('csr', 'csc', 'bsr'): + S.indptr.flags.writeable = False + S.indices.flags.writeable = False + for x in supported_dtypes: + D_casted = D.astype(x) + S_casted = S.astype(x) + assert_equal(S_casted.dtype, D_casted.dtype) + + + def test_asfptype(self): + A = self.spcreator(arange(6,dtype='int32').reshape(2,3)) + + assert_equal(A.dtype, np.dtype('int32')) + assert_equal(A.asfptype().dtype, np.dtype('float64')) + assert_equal(A.asfptype().format, A.format) + assert_equal(A.astype('int16').asfptype().dtype, np.dtype('float32')) + assert_equal(A.astype('complex128').asfptype().dtype, np.dtype('complex128')) + + B = A.asfptype() + C = B.asfptype() + assert_(B is C) + + def test_mul_scalar(self): + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + + assert_array_equal(dat*2, (datsp*2).toarray()) + assert_array_equal(dat*17.3, (datsp*17.3).toarray()) + + for dtype in self.math_dtypes: + check(dtype) + + def test_rmul_scalar(self): + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + + assert_array_equal(2*dat, (2*datsp).toarray()) + assert_array_equal(17.3*dat, (17.3*datsp).toarray()) + + for dtype in self.math_dtypes: + check(dtype) + + # github issue #15210 + def test_rmul_scalar_type_error(self): + datsp = self.datsp_dtypes[np.float64] + with assert_raises(TypeError): + None * datsp + + def test_add(self): + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + + a = dat.copy() + a[0,2] = 2.0 + b = datsp + c = b + a + assert_array_equal(c, b.toarray() + a) + + c = b + b.tocsr() + assert_array_equal(c.toarray(), + b.toarray() + b.toarray()) + + # test broadcasting + c = b + a[0] + assert_array_equal(c, b.toarray() + a[0]) + + for dtype in self.math_dtypes: + check(dtype) + + def test_radd(self): + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + + a = dat.copy() + a[0,2] = 2.0 + b = datsp + c = a + b + assert_array_equal(c, a + b.toarray()) + + for dtype in self.math_dtypes: + check(dtype) + + def test_sub(self): + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + + assert_array_equal((datsp - datsp).toarray(), np.zeros((3, 4))) + assert_array_equal((datsp - 0).toarray(), dat) + + A = self.spcreator( + np.array([[1, 0, 0, 4], [-1, 0, 0, 0], [0, 8, 0, -5]], 'd') + ) + assert_array_equal((datsp - A).toarray(), dat - A.toarray()) + assert_array_equal((A - datsp).toarray(), A.toarray() - dat) + + # test broadcasting + assert_array_equal(datsp - dat[0], dat - dat[0]) + + for dtype in self.math_dtypes: + if dtype == np.dtype('bool'): + # boolean array subtraction deprecated in 1.9.0 + continue + + check(dtype) + + def test_rsub(self): + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + + assert_array_equal((dat - datsp),[[0,0,0,0],[0,0,0,0],[0,0,0,0]]) + assert_array_equal((datsp - dat),[[0,0,0,0],[0,0,0,0],[0,0,0,0]]) + assert_array_equal((0 - datsp).toarray(), -dat) + + A = self.spcreator(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d')) + assert_array_equal((dat - A), dat - A.toarray()) + assert_array_equal((A - dat), A.toarray() - dat) + assert_array_equal(A.toarray() - datsp, A.toarray() - dat) + assert_array_equal(datsp - A.toarray(), dat - A.toarray()) + + # test broadcasting + assert_array_equal(dat[0] - datsp, dat[0] - dat) + + for dtype in self.math_dtypes: + if dtype == np.dtype('bool'): + # boolean array subtraction deprecated in 1.9.0 + continue + + check(dtype) + + def test_add0(self): + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + + # Adding 0 to a sparse matrix + assert_array_equal((datsp + 0).toarray(), dat) + # use sum (which takes 0 as a starting value) + sumS = sum([k * datsp for k in range(1, 3)]) + sumD = sum([k * dat for k in range(1, 3)]) + assert_almost_equal(sumS.toarray(), sumD) + + for dtype in self.math_dtypes: + check(dtype) + + def test_elementwise_multiply(self): + # real/real + A = array([[4,0,9],[2,-3,5]]) + B = array([[0,7,0],[0,-4,0]]) + Asp = self.spcreator(A) + Bsp = self.spcreator(B) + assert_almost_equal(Asp.multiply(Bsp).toarray(), A*B) # sparse/sparse + assert_almost_equal(Asp.multiply(B).toarray(), A*B) # sparse/dense + + # complex/complex + C = array([[1-2j,0+5j,-1+0j],[4-3j,-3+6j,5]]) + D = array([[5+2j,7-3j,-2+1j],[0-1j,-4+2j,9]]) + Csp = self.spcreator(C) + Dsp = self.spcreator(D) + assert_almost_equal(Csp.multiply(Dsp).toarray(), C*D) # sparse/sparse + assert_almost_equal(Csp.multiply(D).toarray(), C*D) # sparse/dense + + # real/complex + assert_almost_equal(Asp.multiply(Dsp).toarray(), A*D) # sparse/sparse + assert_almost_equal(Asp.multiply(D).toarray(), A*D) # sparse/dense + + def test_elementwise_multiply_broadcast(self): + A = array([4]) + B = array([[-9]]) + C = array([1,-1,0]) + D = array([[7,9,-9]]) + E = array([[3],[2],[1]]) + F = array([[8,6,3],[-4,3,2],[6,6,6]]) + G = [1, 2, 3] + H = np.ones((3, 4)) + J = H.T + K = array([[0]]) + L = array([[[1,2],[0,1]]]) + + # Some arrays can't be cast as spmatrices (A,C,L) so leave + # them out. + Bsp = self.spcreator(B) + Dsp = self.spcreator(D) + Esp = self.spcreator(E) + Fsp = self.spcreator(F) + Hsp = self.spcreator(H) + Hspp = self.spcreator(H[0,None]) + Jsp = self.spcreator(J) + Jspp = self.spcreator(J[:,0,None]) + Ksp = self.spcreator(K) + + matrices = [A, B, C, D, E, F, G, H, J, K, L] + spmatrices = [Bsp, Dsp, Esp, Fsp, Hsp, Hspp, Jsp, Jspp, Ksp] + + # sparse/sparse + for i in spmatrices: + for j in spmatrices: + try: + dense_mult = i.toarray() * j.toarray() + except ValueError: + assert_raises(ValueError, i.multiply, j) + continue + sp_mult = i.multiply(j) + assert_almost_equal(sp_mult.toarray(), dense_mult) + + # sparse/dense + for i in spmatrices: + for j in matrices: + try: + dense_mult = i.toarray() * j + except TypeError: + continue + except ValueError: + assert_raises(ValueError, i.multiply, j) + continue + sp_mult = i.multiply(j) + if issparse(sp_mult): + assert_almost_equal(sp_mult.toarray(), dense_mult) + else: + assert_almost_equal(sp_mult, dense_mult) + + def test_elementwise_divide(self): + expected = [[1,np.nan,np.nan,1], + [1,np.nan,1,np.nan], + [np.nan,1,np.nan,np.nan]] + assert_array_equal(toarray(self.datsp / self.datsp), expected) + + denom = self.spcreator(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d')) + expected = [[1,np.nan,np.nan,0.5], + [-3,np.nan,inf,np.nan], + [np.nan,0.25,np.nan,0]] + assert_array_equal(toarray(self.datsp / denom), expected) + + # complex + A = array([[1-2j,0+5j,-1+0j],[4-3j,-3+6j,5]]) + B = array([[5+2j,7-3j,-2+1j],[0-1j,-4+2j,9]]) + Asp = self.spcreator(A) + Bsp = self.spcreator(B) + assert_almost_equal(toarray(Asp / Bsp), A/B) + + # integer + A = array([[1,2,3],[-3,2,1]]) + B = array([[0,1,2],[0,-2,3]]) + Asp = self.spcreator(A) + Bsp = self.spcreator(B) + with np.errstate(divide='ignore'): + assert_array_equal(toarray(Asp / Bsp), A / B) + + # mismatching sparsity patterns + A = array([[0,1],[1,0]]) + B = array([[1,0],[1,0]]) + Asp = self.spcreator(A) + Bsp = self.spcreator(B) + with np.errstate(divide='ignore', invalid='ignore'): + assert_array_equal(np.array(toarray(Asp / Bsp)), A / B) + + def test_pow(self): + A = array([[1, 0, 2, 0], [0, 3, 4, 0], [0, 5, 0, 0], [0, 6, 7, 8]]) + B = self.spcreator(A) + + for exponent in [0,1,2,3]: + ret_sp = B**exponent + ret_np = np.linalg.matrix_power(A, exponent) + assert_array_equal(ret_sp.toarray(), ret_np) + assert_equal(ret_sp.dtype, ret_np.dtype) + + # invalid exponents + for exponent in [-1, 2.2, 1 + 3j]: + assert_raises(ValueError, B.__pow__, exponent) + + # nonsquare matrix + B = self.spcreator(A[:3,:]) + assert_raises(TypeError, B.__pow__, 1) + + def test_rmatvec(self): + M = self.spcreator(matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])) + assert_array_almost_equal([1,2,3,4] @ M, dot([1,2,3,4], M.toarray())) + row = array([[1,2,3,4]]) + assert_array_almost_equal(row @ M, row @ M.toarray()) + + def test_small_multiplication(self): + # test that A*x works for x with shape () (1,) (1,1) and (1,0) + A = self.spcreator([[1],[2],[3]]) + + assert_(issparse(A * array(1))) + assert_equal((A * array(1)).toarray(), [[1], [2], [3]]) + + assert_equal(A @ array([1]), array([1, 2, 3])) + assert_equal(A @ array([[1]]), array([[1], [2], [3]])) + assert_equal(A @ np.ones((1, 1)), array([[1], [2], [3]])) + assert_equal(A @ np.ones((1, 0)), np.ones((3, 0))) + + def test_start_vs_at_sign_for_sparray_and_spmatrix(self): + # test that * is matmul for spmatrix and mul for sparray + A = self.spcreator([[1],[2],[3]]) + + if isinstance(A, sparray): + assert_array_almost_equal(A * np.ones((3,1)), A) + assert_array_almost_equal(A * array([[1]]), A) + assert_array_almost_equal(A * np.ones((3,1)), A) + else: + assert_equal(A * array([1]), array([1, 2, 3])) + assert_equal(A * array([[1]]), array([[1], [2], [3]])) + assert_equal(A * np.ones((1, 0)), np.ones((3, 0))) + + def test_binop_custom_type(self): + # Non-regression test: previously, binary operations would raise + # NotImplementedError instead of returning NotImplemented + # (https://docs.python.org/library/constants.html#NotImplemented) + # so overloading Custom + matrix etc. didn't work. + A = self.spcreator([[1], [2], [3]]) + B = BinopTester() + assert_equal(A + B, "matrix on the left") + assert_equal(A - B, "matrix on the left") + assert_equal(A * B, "matrix on the left") + assert_equal(B + A, "matrix on the right") + assert_equal(B - A, "matrix on the right") + assert_equal(B * A, "matrix on the right") + + assert_equal(A @ B, "matrix on the left") + assert_equal(B @ A, "matrix on the right") + + def test_binop_custom_type_with_shape(self): + A = self.spcreator([[1], [2], [3]]) + B = BinopTester_with_shape((3,1)) + assert_equal(A + B, "matrix on the left") + assert_equal(A - B, "matrix on the left") + assert_equal(A * B, "matrix on the left") + assert_equal(B + A, "matrix on the right") + assert_equal(B - A, "matrix on the right") + assert_equal(B * A, "matrix on the right") + + assert_equal(A @ B, "matrix on the left") + assert_equal(B @ A, "matrix on the right") + + def test_mul_custom_type(self): + class Custom: + def __init__(self, scalar): + self.scalar = scalar + + def __rmul__(self, other): + return other * self.scalar + + scalar = 2 + A = self.spcreator([[1],[2],[3]]) + c = Custom(scalar) + A_scalar = A * scalar + A_c = A * c + assert_array_equal_dtype(A_scalar.toarray(), A_c.toarray()) + assert_equal(A_scalar.format, A_c.format) + + def test_comparisons_custom_type(self): + A = self.spcreator([[1], [2], [3]]) + B = ComparisonTester() + assert_equal(A == B, "eq") + assert_equal(A != B, "ne") + assert_equal(A > B, "lt") + assert_equal(A >= B, "le") + assert_equal(A < B, "gt") + assert_equal(A <= B, "ge") + + def test_dot_scalar(self): + M = self.spcreator(array([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])) + scalar = 10 + actual = M.dot(scalar) + expected = M * scalar + + assert_allclose(actual.toarray(), expected.toarray()) + + def test_matmul(self): + M = self.spcreator(array([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])) + B = self.spcreator(array([[0,1],[1,0],[0,2]],'d')) + col = array([[1,2,3]]).T + + matmul = operator.matmul + # check matrix-vector + assert_array_almost_equal(matmul(M, col), M.toarray() @ col) + + # check matrix-matrix + assert_array_almost_equal(matmul(M, B).toarray(), (M @ B).toarray()) + assert_array_almost_equal(matmul(M.toarray(), B), (M @ B).toarray()) + assert_array_almost_equal(matmul(M, B.toarray()), (M @ B).toarray()) + if not isinstance(M, sparray): + assert_array_almost_equal(matmul(M, B).toarray(), (M * B).toarray()) + assert_array_almost_equal(matmul(M.toarray(), B), (M * B).toarray()) + assert_array_almost_equal(matmul(M, B.toarray()), (M * B).toarray()) + + # check error on matrix-scalar + assert_raises(ValueError, matmul, M, 1) + assert_raises(ValueError, matmul, 1, M) + + def test_matvec(self): + M = self.spcreator(matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])) + col = array([[1,2,3]]).T + + assert_array_almost_equal(M @ col, M.toarray() @ col) + + # check result dimensions (ticket #514) + assert_equal((M @ array([1,2,3])).shape,(4,)) + assert_equal((M @ array([[1],[2],[3]])).shape,(4,1)) + assert_equal((M @ matrix([[1],[2],[3]])).shape,(4,1)) + + # check result type + assert_(isinstance(M @ array([1,2,3]), ndarray)) + assert_(isinstance(M @ matrix([1,2,3]).T, np.matrix)) + + # ensure exception is raised for improper dimensions + bad_vecs = [array([1,2]), array([1,2,3,4]), array([[1],[2]]), + matrix([1,2,3]), matrix([[1],[2]])] + for x in bad_vecs: + assert_raises(ValueError, M.__mul__, x) + + # The current relationship between sparse matrix products and array + # products is as follows: + assert_array_almost_equal(M@array([1,2,3]), dot(M.toarray(),[1,2,3])) + assert_array_almost_equal(M@[[1],[2],[3]], asmatrix(dot(M.toarray(),[1,2,3])).T) + # Note that the result of M * x is dense if x has a singleton dimension. + + # Currently M.matvec(asarray(col)) is rank-1, whereas M.matvec(col) + # is rank-2. Is this desirable? + + def test_matmat_sparse(self): + a = matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]]) + a2 = array([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]]) + b = matrix([[0,1],[1,0],[0,2]],'d') + asp = self.spcreator(a) + bsp = self.spcreator(b) + assert_array_almost_equal((asp @ bsp).toarray(), a @ b) + assert_array_almost_equal(asp @ b, a @ b) + assert_array_almost_equal(a @ bsp, a @ b) + assert_array_almost_equal(a2 @ bsp, a @ b) + + # Now try performing cross-type multplication: + csp = bsp.tocsc() + c = b + want = a @ c + assert_array_almost_equal((asp @ csp).toarray(), want) + assert_array_almost_equal(asp @ c, want) + + assert_array_almost_equal(a @ csp, want) + assert_array_almost_equal(a2 @ csp, want) + csp = bsp.tocsr() + assert_array_almost_equal((asp @ csp).toarray(), want) + assert_array_almost_equal(asp @ c, want) + + assert_array_almost_equal(a @ csp, want) + assert_array_almost_equal(a2 @ csp, want) + csp = bsp.tocoo() + assert_array_almost_equal((asp @ csp).toarray(), want) + assert_array_almost_equal(asp @ c, want) + + assert_array_almost_equal(a @ csp, want) + assert_array_almost_equal(a2 @ csp, want) + + # Test provided by Andy Fraser, 2006-03-26 + L = 30 + frac = .3 + random.seed(0) # make runs repeatable + A = zeros((L,2)) + for i in range(L): + for j in range(2): + r = random.random() + if r < frac: + A[i,j] = r/frac + + A = self.spcreator(A) + B = A @ A.T + assert_array_almost_equal(B.toarray(), A.toarray() @ A.T.toarray()) + assert_array_almost_equal(B.toarray(), A.toarray() @ A.toarray().T) + + # check dimension mismatch 2x2 times 3x2 + A = self.spcreator([[1,2],[3,4]]) + B = self.spcreator([[1,2],[3,4],[5,6]]) + assert_raises(ValueError, A.__matmul__, B) + if isinstance(A, sparray): + assert_raises(ValueError, A.__mul__, B) + + def test_matmat_dense(self): + a = matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]]) + asp = self.spcreator(a) + + # check both array and matrix types + bs = [array([[1,2],[3,4],[5,6]]), matrix([[1,2],[3,4],[5,6]])] + + for b in bs: + result = asp @ b + assert_(isinstance(result, type(b))) + assert_equal(result.shape, (4,2)) + assert_equal(result, dot(a,b)) + + def test_sparse_format_conversions(self): + A = sparse.kron([[1,0,2],[0,3,4],[5,0,0]], [[1,2],[0,3]]) + D = A.toarray() + A = self.spcreator(A) + + for format in ['bsr','coo','csc','csr','dia','dok','lil']: + a = A.asformat(format) + assert_equal(a.format,format) + assert_array_equal(a.toarray(), D) + + b = self.spcreator(D+3j).asformat(format) + assert_equal(b.format,format) + assert_array_equal(b.toarray(), D+3j) + + c = eval(format + '_matrix')(A) + assert_equal(c.format,format) + assert_array_equal(c.toarray(), D) + + for format in ['array', 'dense']: + a = A.asformat(format) + assert_array_equal(a, D) + + b = self.spcreator(D+3j).asformat(format) + assert_array_equal(b, D+3j) + + def test_tobsr(self): + x = array([[1,0,2,0],[0,0,0,0],[0,0,4,5]]) + y = array([[0,1,2],[3,0,5]]) + A = kron(x,y) + Asp = self.spcreator(A) + for format in ['bsr']: + fn = getattr(Asp, 'to' + format) + + for X in [1, 2, 3, 6]: + for Y in [1, 2, 3, 4, 6, 12]: + assert_equal(fn(blocksize=(X, Y)).toarray(), A) + + def test_transpose(self): + dat_1 = self.dat + dat_2 = np.array([[]]) + matrices = [dat_1, dat_2] + + def check(dtype, j): + dat = array(matrices[j], dtype=dtype) + datsp = self.spcreator(dat) + + a = datsp.transpose() + b = dat.transpose() + + assert_array_equal(a.toarray(), b) + assert_array_equal(a.transpose().toarray(), dat) + assert_array_equal(datsp.transpose(axes=(1, 0)).toarray(), b) + assert_equal(a.dtype, b.dtype) + + # See gh-5987 + empty = self.spcreator((3, 4)) + assert_array_equal(np.transpose(empty).toarray(), + np.transpose(zeros((3, 4)))) + assert_array_equal(empty.T.toarray(), zeros((4, 3))) + assert_raises(ValueError, empty.transpose, axes=0) + + for dtype in self.checked_dtypes: + for j in range(len(matrices)): + check(dtype, j) + + def test_add_dense(self): + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + + # adding a dense matrix to a sparse matrix + sum1 = dat + datsp + assert_array_equal(sum1, dat + dat) + sum2 = datsp + dat + assert_array_equal(sum2, dat + dat) + + for dtype in self.math_dtypes: + check(dtype) + + def test_sub_dense(self): + # subtracting a dense matrix to/from a sparse matrix + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + + # Behavior is different for bool. + if dat.dtype == bool: + sum1 = dat - datsp + assert_array_equal(sum1, dat - dat) + sum2 = datsp - dat + assert_array_equal(sum2, dat - dat) + else: + # Manually add to avoid upcasting from scalar + # multiplication. + sum1 = (dat + dat + dat) - datsp + assert_array_equal(sum1, dat + dat) + sum2 = (datsp + datsp + datsp) - dat + assert_array_equal(sum2, dat + dat) + + for dtype in self.math_dtypes: + if dtype == np.dtype('bool'): + # boolean array subtraction deprecated in 1.9.0 + continue + + check(dtype) + + def test_maximum_minimum(self): + A_dense = np.array([[1, 0, 3], [0, 4, 5], [0, 0, 0]]) + B_dense = np.array([[1, 1, 2], [0, 3, 6], [1, -1, 0]]) + + A_dense_cpx = np.array([[1, 0, 3], [0, 4+2j, 5], [0, 1j, -1j]]) + + def check(dtype, dtype2, btype): + if np.issubdtype(dtype, np.complexfloating): + A = self.spcreator(A_dense_cpx.astype(dtype)) + else: + A = self.spcreator(A_dense.astype(dtype)) + if btype == 'scalar': + B = dtype2.type(1) + elif btype == 'scalar2': + B = dtype2.type(-1) + elif btype == 'dense': + B = B_dense.astype(dtype2) + elif btype == 'sparse': + B = self.spcreator(B_dense.astype(dtype2)) + else: + raise ValueError() + + with suppress_warnings() as sup: + sup.filter(SparseEfficiencyWarning, + "Taking maximum .minimum. with > 0 .< 0. number " + "results to a dense matrix") + + max_s = A.maximum(B) + min_s = A.minimum(B) + + max_d = np.maximum(toarray(A), toarray(B)) + assert_array_equal(toarray(max_s), max_d) + assert_equal(max_s.dtype, max_d.dtype) + + min_d = np.minimum(toarray(A), toarray(B)) + assert_array_equal(toarray(min_s), min_d) + assert_equal(min_s.dtype, min_d.dtype) + + for dtype in self.math_dtypes: + for dtype2 in [np.int8, np.float64, np.complex128]: + for btype in ['scalar', 'scalar2', 'dense', 'sparse']: + check(np.dtype(dtype), np.dtype(dtype2), btype) + + def test_copy(self): + # Check whether the copy=True and copy=False keywords work + A = self.datsp + + # check that copy preserves format + assert_equal(A.copy().format, A.format) + assert_equal(A.__class__(A,copy=True).format, A.format) + assert_equal(A.__class__(A,copy=False).format, A.format) + + assert_equal(A.copy().toarray(), A.toarray()) + assert_equal(A.__class__(A, copy=True).toarray(), A.toarray()) + assert_equal(A.__class__(A, copy=False).toarray(), A.toarray()) + + # check that XXX_matrix.toXXX() works + toself = getattr(A,'to' + A.format) + assert_(toself() is A) + assert_(toself(copy=False) is A) + assert_equal(toself(copy=True).format, A.format) + assert_equal(toself(copy=True).toarray(), A.toarray()) + + # check whether the data is copied? + assert_(not sparse_may_share_memory(A.copy(), A)) + + # test that __iter__ is compatible with NumPy matrix + def test_iterator(self): + B = matrix(np.arange(50).reshape(5, 10)) + A = self.spcreator(B) + + for x, y in zip(A, B): + assert_equal(x.toarray(), y) + + def test_size_zero_matrix_arithmetic(self): + # Test basic matrix arithmetic with shapes like (0,0), (10,0), + # (0, 3), etc. + mat = array([]) + a = mat.reshape((0, 0)) + b = mat.reshape((0, 1)) + c = mat.reshape((0, 5)) + d = mat.reshape((1, 0)) + e = mat.reshape((5, 0)) + f = np.ones([5, 5]) + + asp = self.spcreator(a) + bsp = self.spcreator(b) + csp = self.spcreator(c) + dsp = self.spcreator(d) + esp = self.spcreator(e) + fsp = self.spcreator(f) + + # matrix product. + assert_array_equal(asp.dot(asp).toarray(), np.dot(a, a)) + assert_array_equal(bsp.dot(dsp).toarray(), np.dot(b, d)) + assert_array_equal(dsp.dot(bsp).toarray(), np.dot(d, b)) + assert_array_equal(csp.dot(esp).toarray(), np.dot(c, e)) + assert_array_equal(csp.dot(fsp).toarray(), np.dot(c, f)) + assert_array_equal(esp.dot(csp).toarray(), np.dot(e, c)) + assert_array_equal(dsp.dot(csp).toarray(), np.dot(d, c)) + assert_array_equal(fsp.dot(esp).toarray(), np.dot(f, e)) + + # bad matrix products + assert_raises(ValueError, dsp.dot, e) + assert_raises(ValueError, asp.dot, d) + + # elemente-wise multiplication + assert_array_equal(asp.multiply(asp).toarray(), np.multiply(a, a)) + assert_array_equal(bsp.multiply(bsp).toarray(), np.multiply(b, b)) + assert_array_equal(dsp.multiply(dsp).toarray(), np.multiply(d, d)) + + assert_array_equal(asp.multiply(a).toarray(), np.multiply(a, a)) + assert_array_equal(bsp.multiply(b).toarray(), np.multiply(b, b)) + assert_array_equal(dsp.multiply(d).toarray(), np.multiply(d, d)) + + assert_array_equal(asp.multiply(6).toarray(), np.multiply(a, 6)) + assert_array_equal(bsp.multiply(6).toarray(), np.multiply(b, 6)) + assert_array_equal(dsp.multiply(6).toarray(), np.multiply(d, 6)) + + # bad element-wise multiplication + assert_raises(ValueError, asp.multiply, c) + assert_raises(ValueError, esp.multiply, c) + + # Addition + assert_array_equal(asp.__add__(asp).toarray(), a.__add__(a)) + assert_array_equal(bsp.__add__(bsp).toarray(), b.__add__(b)) + assert_array_equal(dsp.__add__(dsp).toarray(), d.__add__(d)) + + # bad addition + assert_raises(ValueError, asp.__add__, dsp) + assert_raises(ValueError, bsp.__add__, asp) + + def test_size_zero_conversions(self): + mat = array([]) + a = mat.reshape((0, 0)) + b = mat.reshape((0, 5)) + c = mat.reshape((5, 0)) + + for m in [a, b, c]: + spm = self.spcreator(m) + assert_array_equal(spm.tocoo().toarray(), m) + assert_array_equal(spm.tocsr().toarray(), m) + assert_array_equal(spm.tocsc().toarray(), m) + assert_array_equal(spm.tolil().toarray(), m) + assert_array_equal(spm.todok().toarray(), m) + assert_array_equal(spm.tobsr().toarray(), m) + + def test_pickle(self): + import pickle + sup = suppress_warnings() + sup.filter(SparseEfficiencyWarning) + + @sup + def check(): + datsp = self.datsp.copy() + for protocol in range(pickle.HIGHEST_PROTOCOL): + sploaded = pickle.loads(pickle.dumps(datsp, protocol=protocol)) + assert_equal(datsp.shape, sploaded.shape) + assert_array_equal(datsp.toarray(), sploaded.toarray()) + assert_equal(datsp.format, sploaded.format) + # Hacky check for class member equality. This assumes that + # all instance variables are one of: + # 1. Plain numpy ndarrays + # 2. Tuples of ndarrays + # 3. Types that support equality comparison with == + for key, val in datsp.__dict__.items(): + if isinstance(val, np.ndarray): + assert_array_equal(val, sploaded.__dict__[key]) + elif (isinstance(val, tuple) and val + and isinstance(val[0], np.ndarray)): + assert_array_equal(val, sploaded.__dict__[key]) + else: + assert_(val == sploaded.__dict__[key]) + check() + + def test_unary_ufunc_overrides(self): + def check(name): + if name == "sign": + pytest.skip("sign conflicts with comparison op " + "support on Numpy") + if self.spcreator in (dok_matrix, lil_matrix): + pytest.skip("Unary ops not implemented for dok/lil") + ufunc = getattr(np, name) + + X = self.spcreator(np.arange(20).reshape(4, 5) / 20.) + X0 = ufunc(X.toarray()) + + X2 = ufunc(X) + assert_array_equal(X2.toarray(), X0) + + for name in ["sin", "tan", "arcsin", "arctan", "sinh", "tanh", + "arcsinh", "arctanh", "rint", "sign", "expm1", "log1p", + "deg2rad", "rad2deg", "floor", "ceil", "trunc", "sqrt", + "abs"]: + check(name) + + def test_resize(self): + # resize(shape) resizes the matrix in-place + D = np.array([[1, 0, 3, 4], + [2, 0, 0, 0], + [3, 0, 0, 0]]) + S = self.spcreator(D) + assert_(S.resize((3, 2)) is None) + assert_array_equal(S.toarray(), [[1, 0], + [2, 0], + [3, 0]]) + S.resize((2, 2)) + assert_array_equal(S.toarray(), [[1, 0], + [2, 0]]) + S.resize((3, 2)) + assert_array_equal(S.toarray(), [[1, 0], + [2, 0], + [0, 0]]) + S.resize((3, 3)) + assert_array_equal(S.toarray(), [[1, 0, 0], + [2, 0, 0], + [0, 0, 0]]) + # test no-op + S.resize((3, 3)) + assert_array_equal(S.toarray(), [[1, 0, 0], + [2, 0, 0], + [0, 0, 0]]) + + # test *args + S.resize(3, 2) + assert_array_equal(S.toarray(), [[1, 0], + [2, 0], + [0, 0]]) + + for bad_shape in [1, (-1, 2), (2, -1), (1, 2, 3)]: + assert_raises(ValueError, S.resize, bad_shape) + + def test_constructor1_base(self): + A = self.datsp + + self_format = A.format + + C = A.__class__(A, copy=False) + assert_array_equal_dtype(A.toarray(), C.toarray()) + if self_format not in NON_ARRAY_BACKED_FORMATS: + assert_(sparse_may_share_memory(A, C)) + + C = A.__class__(A, dtype=A.dtype, copy=False) + assert_array_equal_dtype(A.toarray(), C.toarray()) + if self_format not in NON_ARRAY_BACKED_FORMATS: + assert_(sparse_may_share_memory(A, C)) + + C = A.__class__(A, dtype=np.float32, copy=False) + assert_array_equal(A.toarray(), C.toarray()) + + C = A.__class__(A, copy=True) + assert_array_equal_dtype(A.toarray(), C.toarray()) + assert_(not sparse_may_share_memory(A, C)) + + for other_format in ['csr', 'csc', 'coo', 'dia', 'dok', 'lil']: + if other_format == self_format: + continue + B = A.asformat(other_format) + C = A.__class__(B, copy=False) + assert_array_equal_dtype(A.toarray(), C.toarray()) + + C = A.__class__(B, copy=True) + assert_array_equal_dtype(A.toarray(), C.toarray()) + assert_(not sparse_may_share_memory(B, C)) + + +class _TestInplaceArithmetic: + def test_inplace_dense(self): + a = np.ones((3, 4)) + b = self.spcreator(a) + + x = a.copy() + y = a.copy() + x += a + y += b + assert_array_equal(x, y) + + x = a.copy() + y = a.copy() + x -= a + y -= b + assert_array_equal(x, y) + + x = a.copy() + y = a.copy() + if isinstance(b, sparray): + assert_raises(ValueError, operator.imul, x, b.T) + x = x * a + y *= b + else: + # This is matrix product, from __rmul__ + assert_raises(ValueError, operator.imul, x, b) + x = x.dot(a.T) + y *= b.T + assert_array_equal(x, y) + + # Matrix (non-elementwise) floor division is not defined + assert_raises(TypeError, operator.ifloordiv, x, b) + + def test_imul_scalar(self): + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + + # Avoid implicit casting. + if np.can_cast(int, dtype, casting='same_kind'): + a = datsp.copy() + a *= 2 + b = dat.copy() + b *= 2 + assert_array_equal(b, a.toarray()) + + if np.can_cast(float, dtype, casting='same_kind'): + a = datsp.copy() + a *= 17.3 + b = dat.copy() + b *= 17.3 + assert_array_equal(b, a.toarray()) + + for dtype in self.math_dtypes: + check(dtype) + + def test_idiv_scalar(self): + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + + if np.can_cast(int, dtype, casting='same_kind'): + a = datsp.copy() + a /= 2 + b = dat.copy() + b /= 2 + assert_array_equal(b, a.toarray()) + + if np.can_cast(float, dtype, casting='same_kind'): + a = datsp.copy() + a /= 17.3 + b = dat.copy() + b /= 17.3 + assert_array_equal(b, a.toarray()) + + for dtype in self.math_dtypes: + # /= should only be used with float dtypes to avoid implicit + # casting. + if not np.can_cast(dtype, np.dtype(int)): + check(dtype) + + def test_inplace_success(self): + # Inplace ops should work even if a specialized version is not + # implemented, falling back to x = x y + a = self.spcreator(np.eye(5)) + b = self.spcreator(np.eye(5)) + bp = self.spcreator(np.eye(5)) + + b += a + bp = bp + a + assert_allclose(b.toarray(), bp.toarray()) + + b *= a + bp = bp * a + assert_allclose(b.toarray(), bp.toarray()) + + b -= a + bp = bp - a + assert_allclose(b.toarray(), bp.toarray()) + + assert_raises(TypeError, operator.ifloordiv, a, b) + + +class _TestGetSet: + def test_getelement(self): + def check(dtype): + D = array([[1,0,0], + [4,3,0], + [0,2,0], + [0,0,0]], dtype=dtype) + A = self.spcreator(D) + + M,N = D.shape + + for i in range(-M, M): + for j in range(-N, N): + assert_equal(A[i,j], D[i,j]) + + assert_equal(type(A[1,1]), dtype) + + for ij in [(0,3),(-1,3),(4,0),(4,3),(4,-1), (1, 2, 3)]: + assert_raises((IndexError, TypeError), A.__getitem__, ij) + + for dtype in supported_dtypes: + check(np.dtype(dtype)) + + def test_setelement(self): + def check(dtype): + A = self.spcreator((3,4), dtype=dtype) + with suppress_warnings() as sup: + sup.filter( + SparseEfficiencyWarning, + "Changing the sparsity structure of a cs[cr]_matrix is expensive" + ) + A[0, 0] = dtype.type(0) # bug 870 + A[1, 2] = dtype.type(4.0) + A[0, 1] = dtype.type(3) + A[2, 0] = dtype.type(2.0) + A[0,-1] = dtype.type(8) + A[-1,-2] = dtype.type(7) + A[0, 1] = dtype.type(5) + + if dtype != np.bool_: + assert_array_equal( + A.toarray(), + [ + [0, 5, 0, 8], + [0, 0, 4, 0], + [2, 0, 7, 0] + ] + ) + + for ij in [(0,4),(-1,4),(3,0),(3,4),(3,-1)]: + assert_raises(IndexError, A.__setitem__, ij, 123.0) + + for v in [[1,2,3], array([1,2,3])]: + assert_raises(ValueError, A.__setitem__, (0,0), v) + + if (not np.issubdtype(dtype, np.complexfloating) and + dtype != np.bool_): + for v in [3j]: + assert_raises(TypeError, A.__setitem__, (0,0), v) + + for dtype in supported_dtypes: + check(np.dtype(dtype)) + + def test_negative_index_assignment(self): + # Regression test for github issue 4428. + + def check(dtype): + A = self.spcreator((3, 10), dtype=dtype) + with suppress_warnings() as sup: + sup.filter( + SparseEfficiencyWarning, + "Changing the sparsity structure of a cs[cr]_matrix is expensive" + ) + A[0, -4] = 1 + assert_equal(A[0, -4], 1) + + for dtype in self.math_dtypes: + check(np.dtype(dtype)) + + def test_scalar_assign_2(self): + n, m = (5, 10) + + def _test_set(i, j, nitems): + msg = f"{i!r} ; {j!r} ; {nitems!r}" + A = self.spcreator((n, m)) + with suppress_warnings() as sup: + sup.filter( + SparseEfficiencyWarning, + "Changing the sparsity structure of a cs[cr]_matrix is expensive" + ) + A[i, j] = 1 + assert_almost_equal(A.sum(), nitems, err_msg=msg) + assert_almost_equal(A[i, j], 1, err_msg=msg) + + # [i,j] + for i, j in [(2, 3), (-1, 8), (-1, -2), (array(-1), -2), (-1, array(-2)), + (array(-1), array(-2))]: + _test_set(i, j, 1) + + def test_index_scalar_assign(self): + A = self.spcreator((5, 5)) + B = np.zeros((5, 5)) + with suppress_warnings() as sup: + sup.filter( + SparseEfficiencyWarning, + "Changing the sparsity structure of a cs[cr]_matrix is expensive" + ) + for C in [A, B]: + C[0,1] = 1 + C[3,0] = 4 + C[3,0] = 9 + assert_array_equal(A.toarray(), B) + + +class _TestSolve: + def test_solve(self): + # Test whether the lu_solve command segfaults, as reported by Nils + # Wagner for a 64-bit machine, 02 March 2005 (EJS) + n = 20 + np.random.seed(0) # make tests repeatable + A = zeros((n,n), dtype=complex) + x = np.random.rand(n) + y = np.random.rand(n-1)+1j*np.random.rand(n-1) + r = np.random.rand(n) + for i in range(len(x)): + A[i,i] = x[i] + for i in range(len(y)): + A[i,i+1] = y[i] + A[i+1,i] = conjugate(y[i]) + A = self.spcreator(A) + with suppress_warnings() as sup: + sup.filter(SparseEfficiencyWarning, + "splu converted its input to CSC format") + x = splu(A).solve(r) + assert_almost_equal(A @ x,r) + + +class _TestSlicing: + def test_dtype_preservation(self): + assert_equal(self.spcreator((1,10), dtype=np.int16)[0,1:5].dtype, np.int16) + assert_equal(self.spcreator((1,10), dtype=np.int32)[0,1:5].dtype, np.int32) + assert_equal(self.spcreator((1,10), dtype=np.float32)[0,1:5].dtype, np.float32) + assert_equal(self.spcreator((1,10), dtype=np.float64)[0,1:5].dtype, np.float64) + + def test_dtype_preservation_empty_slice(self): + # This should be parametrized with pytest, but something in the parent + # class creation used in this file breaks pytest.mark.parametrize. + for dt in [np.int16, np.int32, np.float32, np.float64]: + A = self.spcreator((3, 2), dtype=dt) + assert_equal(A[:, 0:0:2].dtype, dt) + assert_equal(A[0:0:2, :].dtype, dt) + assert_equal(A[0, 0:0:2].dtype, dt) + assert_equal(A[0:0:2, 0].dtype, dt) + + def test_get_horiz_slice(self): + B = asmatrix(arange(50.).reshape(5,10)) + A = self.spcreator(B) + assert_array_equal(B[1, :], A[1, :].toarray()) + assert_array_equal(B[1, 2:5], A[1, 2:5].toarray()) + + C = matrix([[1, 2, 1], [4, 0, 6], [0, 0, 0], [0, 0, 1]]) + D = self.spcreator(C) + assert_array_equal(C[1, 1:3], D[1, 1:3].toarray()) + + # Now test slicing when a row contains only zeros + E = matrix([[1, 2, 1], [4, 0, 0], [0, 0, 0], [0, 0, 1]]) + F = self.spcreator(E) + assert_array_equal(E[1, 1:3], F[1, 1:3].toarray()) + assert_array_equal(E[2, -2:], F[2, -2:].toarray()) + + # The following should raise exceptions: + assert_raises(IndexError, A.__getitem__, (slice(None), 11)) + assert_raises(IndexError, A.__getitem__, (6, slice(3, 7))) + + def test_get_vert_slice(self): + B = arange(50.).reshape(5, 10) + A = self.spcreator(B) + assert_array_equal(B[2:5, [0]], A[2:5, 0].toarray()) + assert_array_equal(B[:, [1]], A[:, 1].toarray()) + + C = array([[1, 2, 1], [4, 0, 6], [0, 0, 0], [0, 0, 1]]) + D = self.spcreator(C) + assert_array_equal(C[1:3, [1]], D[1:3, 1].toarray()) + assert_array_equal(C[:, [2]], D[:, 2].toarray()) + + # Now test slicing when a column contains only zeros + E = array([[1, 0, 1], [4, 0, 0], [0, 0, 0], [0, 0, 1]]) + F = self.spcreator(E) + assert_array_equal(E[:, [1]], F[:, 1].toarray()) + assert_array_equal(E[-2:, [2]], F[-2:, 2].toarray()) + + # The following should raise exceptions: + assert_raises(IndexError, A.__getitem__, (slice(None), 11)) + assert_raises(IndexError, A.__getitem__, (6, slice(3, 7))) + + def test_get_slices(self): + B = arange(50.).reshape(5, 10) + A = self.spcreator(B) + assert_array_equal(A[2:5, 0:3].toarray(), B[2:5, 0:3]) + assert_array_equal(A[1:, :-1].toarray(), B[1:, :-1]) + assert_array_equal(A[:-1, 1:].toarray(), B[:-1, 1:]) + + # Now test slicing when a column contains only zeros + E = array([[1, 0, 1], [4, 0, 0], [0, 0, 0], [0, 0, 1]]) + F = self.spcreator(E) + assert_array_equal(E[1:2, 1:2], F[1:2, 1:2].toarray()) + assert_array_equal(E[:, 1:], F[:, 1:].toarray()) + + def test_non_unit_stride_2d_indexing(self): + # Regression test -- used to silently ignore the stride. + v0 = np.random.rand(50, 50) + try: + v = self.spcreator(v0)[0:25:2, 2:30:3] + except ValueError: + # if unsupported + raise pytest.skip("feature not implemented") + + assert_array_equal(v.toarray(), v0[0:25:2, 2:30:3]) + + def test_slicing_2(self): + B = asmatrix(arange(50).reshape(5,10)) + A = self.spcreator(B) + + # [i,j] + assert_equal(A[2,3], B[2,3]) + assert_equal(A[-1,8], B[-1,8]) + assert_equal(A[-1,-2],B[-1,-2]) + assert_equal(A[array(-1),-2],B[-1,-2]) + assert_equal(A[-1,array(-2)],B[-1,-2]) + assert_equal(A[array(-1),array(-2)],B[-1,-2]) + + # [i,1:2] + assert_equal(A[2, :].toarray(), B[2, :]) + assert_equal(A[2, 5:-2].toarray(), B[2, 5:-2]) + assert_equal(A[array(2), 5:-2].toarray(), B[2, 5:-2]) + + # [1:2,j] + assert_equal(A[:, 2].toarray(), B[:, 2]) + assert_equal(A[3:4, 9].toarray(), B[3:4, 9]) + assert_equal(A[1:4, -5].toarray(), B[1:4, -5]) + assert_equal(A[2:-1, 3].toarray(), B[2:-1, 3]) + assert_equal(A[2:-1, array(3)].toarray(), B[2:-1, 3]) + + # [1:2,1:2] + assert_equal(A[1:2, 1:2].toarray(), B[1:2, 1:2]) + assert_equal(A[4:, 3:].toarray(), B[4:, 3:]) + assert_equal(A[:4, :5].toarray(), B[:4, :5]) + assert_equal(A[2:-1, :5].toarray(), B[2:-1, :5]) + + # [i] + assert_equal(A[1, :].toarray(), B[1, :]) + assert_equal(A[-2, :].toarray(), B[-2, :]) + assert_equal(A[array(-2), :].toarray(), B[-2, :]) + + # [1:2] + assert_equal(A[1:4].toarray(), B[1:4]) + assert_equal(A[1:-2].toarray(), B[1:-2]) + + # Check bug reported by Robert Cimrman: + # http://thread.gmane.org/gmane.comp.python.scientific.devel/7986 (dead link) + s = slice(int8(2),int8(4),None) + assert_equal(A[s, :].toarray(), B[2:4, :]) + assert_equal(A[:, s].toarray(), B[:, 2:4]) + + def test_slicing_3(self): + B = asmatrix(arange(50).reshape(5,10)) + A = self.spcreator(B) + + s_ = np.s_ + slices = [s_[:2], s_[1:2], s_[3:], s_[3::2], + s_[15:20], s_[3:2], + s_[8:3:-1], s_[4::-2], s_[:5:-1], + 0, 1, s_[:], s_[1:5], -1, -2, -5, + array(-1), np.int8(-3)] + + def check_1(a): + x = A[a] + y = B[a] + if y.shape == (): + assert_equal(x, y, repr(a)) + else: + if x.size == 0 and y.size == 0: + pass + else: + assert_array_equal(x.toarray(), y, repr(a)) + + for j, a in enumerate(slices): + check_1(a) + + def check_2(a, b): + # Indexing np.matrix with 0-d arrays seems to be broken, + # as they seem not to be treated as scalars. + # https://github.com/numpy/numpy/issues/3110 + if isinstance(a, np.ndarray): + ai = int(a) + else: + ai = a + if isinstance(b, np.ndarray): + bi = int(b) + else: + bi = b + + x = A[a, b] + y = B[ai, bi] + + if y.shape == (): + assert_equal(x, y, repr((a, b))) + else: + if x.size == 0 and y.size == 0: + pass + else: + assert_array_equal(x.toarray(), y, repr((a, b))) + + for i, a in enumerate(slices): + for j, b in enumerate(slices): + check_2(a, b) + + # Check out of bounds etc. systematically + extra_slices = [] + for a, b, c in itertools.product(*([(None, 0, 1, 2, 5, 15, + -1, -2, 5, -15)]*3)): + if c == 0: + continue + extra_slices.append(slice(a, b, c)) + + for a in extra_slices: + check_2(a, a) + check_2(a, -2) + check_2(-2, a) + + def test_ellipsis_slicing(self): + b = asmatrix(arange(50).reshape(5,10)) + a = self.spcreator(b) + + assert_array_equal(a[...].toarray(), b[...].A) + assert_array_equal(a[...,].toarray(), b[...,].A) + + assert_array_equal(a[1, ...].toarray(), b[1, ...].A) + assert_array_equal(a[..., 1].toarray(), b[..., 1].A) + assert_array_equal(a[1:, ...].toarray(), b[1:, ...].A) + assert_array_equal(a[..., 1:].toarray(), b[..., 1:].A) + + assert_array_equal(a[1:, 1, ...].toarray(), b[1:, 1, ...].A) + assert_array_equal(a[1, ..., 1:].toarray(), b[1, ..., 1:].A) + # These return ints + assert_equal(a[1, 1, ...], b[1, 1, ...]) + assert_equal(a[1, ..., 1], b[1, ..., 1]) + + def test_multiple_ellipsis_slicing(self): + a = self.spcreator(arange(6).reshape(3, 2)) + + with pytest.raises(IndexError, + match='an index can only have a single ellipsis'): + a[..., ...] + with pytest.raises(IndexError, + match='an index can only have a single ellipsis'): + a[..., 1, ...] + + +class _TestSlicingAssign: + def test_slice_scalar_assign(self): + A = self.spcreator((5, 5)) + B = np.zeros((5, 5)) + with suppress_warnings() as sup: + sup.filter( + SparseEfficiencyWarning, + "Changing the sparsity structure of a cs[cr]_matrix is expensive" + ) + for C in [A, B]: + C[0:1,1] = 1 + C[3:0,0] = 4 + C[3:4,0] = 9 + C[0,4:] = 1 + C[3::-1,4:] = 9 + assert_array_equal(A.toarray(), B) + + def test_slice_assign_2(self): + n, m = (5, 10) + + def _test_set(i, j): + msg = f"i={i!r}; j={j!r}" + A = self.spcreator((n, m)) + with suppress_warnings() as sup: + sup.filter( + SparseEfficiencyWarning, + "Changing the sparsity structure of a cs[cr]_matrix is expensive" + ) + A[i, j] = 1 + B = np.zeros((n, m)) + B[i, j] = 1 + assert_array_almost_equal(A.toarray(), B, err_msg=msg) + # [i,1:2] + for i, j in [(2, slice(3)), (2, slice(None, 10, 4)), (2, slice(5, -2)), + (array(2), slice(5, -2))]: + _test_set(i, j) + + def test_self_self_assignment(self): + # Tests whether a row of one lil_matrix can be assigned to + # another. + B = self.spcreator((4,3)) + with suppress_warnings() as sup: + sup.filter( + SparseEfficiencyWarning, + "Changing the sparsity structure of a cs[cr]_matrix is expensive" + ) + B[0,0] = 2 + B[1,2] = 7 + B[2,1] = 3 + B[3,0] = 10 + + A = B / 10 + B[0,:] = A[0,:] + assert_array_equal(A[0,:].toarray(), B[0,:].toarray()) + + A = B / 10 + B[:,:] = A[:1,:1] + assert_array_equal(np.zeros((4,3)) + A[0,0], B.toarray()) + + A = B / 10 + B[:-1,0] = A[0,:].T + assert_array_equal(A[0,:].toarray().T, B[:-1,0].toarray()) + + def test_slice_assignment(self): + B = self.spcreator((4,3)) + expected = array([[10,0,0], + [0,0,6], + [0,14,0], + [0,0,0]]) + block = [[1,0],[0,4]] + + with suppress_warnings() as sup: + sup.filter( + SparseEfficiencyWarning, + "Changing the sparsity structure of a cs[cr]_matrix is expensive" + ) + B[0,0] = 5 + B[1,2] = 3 + B[2,1] = 7 + B[:,:] = B+B + assert_array_equal(B.toarray(), expected) + + B[:2,:2] = csc_matrix(array(block)) + assert_array_equal(B.toarray()[:2, :2], block) + + def test_sparsity_modifying_assignment(self): + B = self.spcreator((4,3)) + with suppress_warnings() as sup: + sup.filter( + SparseEfficiencyWarning, + "Changing the sparsity structure of a cs[cr]_matrix is expensive" + ) + B[0,0] = 5 + B[1,2] = 3 + B[2,1] = 7 + B[3,0] = 10 + B[:3] = csr_matrix(np.eye(3)) + + expected = array([[1,0,0],[0,1,0],[0,0,1],[10,0,0]]) + assert_array_equal(B.toarray(), expected) + + def test_set_slice(self): + A = self.spcreator((5,10)) + B = array(zeros((5, 10), float)) + s_ = np.s_ + slices = [s_[:2], s_[1:2], s_[3:], s_[3::2], + s_[8:3:-1], s_[4::-2], s_[:5:-1], + 0, 1, s_[:], s_[1:5], -1, -2, -5, + array(-1), np.int8(-3)] + + with suppress_warnings() as sup: + sup.filter( + SparseEfficiencyWarning, + "Changing the sparsity structure of a cs[cr]_matrix is expensive" + ) + for j, a in enumerate(slices): + A[a] = j + B[a] = j + assert_array_equal(A.toarray(), B, repr(a)) + + for i, a in enumerate(slices): + for j, b in enumerate(slices): + A[a,b] = 10*i + 1000*(j+1) + B[a,b] = 10*i + 1000*(j+1) + assert_array_equal(A.toarray(), B, repr((a, b))) + + A[0, 1:10:2] = range(1, 10, 2) + B[0, 1:10:2] = range(1, 10, 2) + assert_array_equal(A.toarray(), B) + A[1:5:2, 0] = np.arange(1, 5, 2)[:, None] + B[1:5:2, 0] = np.arange(1, 5, 2)[:] + assert_array_equal(A.toarray(), B) + + # The next commands should raise exceptions + assert_raises(ValueError, A.__setitem__, (0, 0), list(range(100))) + assert_raises(ValueError, A.__setitem__, (0, 0), arange(100)) + assert_raises(ValueError, A.__setitem__, (0, slice(None)), + list(range(100))) + assert_raises(ValueError, A.__setitem__, (slice(None), 1), + list(range(100))) + assert_raises(ValueError, A.__setitem__, (slice(None), 1), A.copy()) + assert_raises(ValueError, A.__setitem__, + ([[1, 2, 3], [0, 3, 4]], [1, 2, 3]), [1, 2, 3, 4]) + assert_raises(ValueError, A.__setitem__, + ([[1, 2, 3], [0, 3, 4], [4, 1, 3]], + [[1, 2, 4], [0, 1, 3]]), [2, 3, 4]) + assert_raises(ValueError, A.__setitem__, (slice(4), 0), + [[1, 2], [3, 4]]) + + def test_assign_empty(self): + A = self.spcreator(np.ones((2, 3))) + B = self.spcreator((1, 2)) + A[1, :2] = B + assert_array_equal(A.toarray(), [[1, 1, 1], [0, 0, 1]]) + + def test_assign_1d_slice(self): + A = self.spcreator(np.ones((3, 3))) + x = np.zeros(3) + A[:, 0] = x + A[1, :] = x + assert_array_equal(A.toarray(), [[0, 1, 1], [0, 0, 0], [0, 1, 1]]) + + +class _TestFancyIndexing: + """Tests fancy indexing features. The tests for any matrix formats + that implement these features should derive from this class. + """ + + def test_dtype_preservation_empty_index(self): + # This should be parametrized with pytest, but something in the parent + # class creation used in this file breaks pytest.mark.parametrize. + for dt in [np.int16, np.int32, np.float32, np.float64]: + A = self.spcreator((3, 2), dtype=dt) + assert_equal(A[:, [False, False]].dtype, dt) + assert_equal(A[[False, False, False], :].dtype, dt) + assert_equal(A[:, []].dtype, dt) + assert_equal(A[[], :].dtype, dt) + + def test_bad_index(self): + A = self.spcreator(np.zeros([5, 5])) + assert_raises((IndexError, ValueError, TypeError), A.__getitem__, "foo") + assert_raises((IndexError, ValueError, TypeError), A.__getitem__, (2, "foo")) + assert_raises((IndexError, ValueError), A.__getitem__, + ([1, 2, 3], [1, 2, 3, 4])) + + def test_fancy_indexing(self): + B = asmatrix(arange(50).reshape(5,10)) + A = self.spcreator(B) + + # [i] + assert_equal(A[[1, 3]].toarray(), B[[1, 3]]) + + # [i,[1,2]] + assert_equal(A[3, [1, 3]].toarray(), B[3, [1, 3]]) + assert_equal(A[-1, [2, -5]].toarray(), B[-1, [2, -5]]) + assert_equal(A[array(-1), [2, -5]].toarray(), B[-1, [2, -5]]) + assert_equal(A[-1, array([2, -5])].toarray(), B[-1, [2, -5]]) + assert_equal(A[array(-1), array([2, -5])].toarray(), B[-1, [2, -5]]) + + # [1:2,[1,2]] + assert_equal(A[:, [2, 8, 3, -1]].toarray(), B[:, [2, 8, 3, -1]]) + assert_equal(A[3:4, [9]].toarray(), B[3:4, [9]]) + assert_equal(A[1:4, [-1, -5]].toarray(), B[1:4, [-1, -5]]) + assert_equal(A[1:4, array([-1, -5])].toarray(), B[1:4, [-1, -5]]) + + # [[1,2],j] + assert_equal(A[[1, 3], 3].toarray(), B[[1, 3], 3]) + assert_equal(A[[2, -5], -4].toarray(), B[[2, -5], -4]) + assert_equal(A[array([2, -5]), -4].toarray(), B[[2, -5], -4]) + assert_equal(A[[2, -5], array(-4)].toarray(), B[[2, -5], -4]) + assert_equal(A[array([2, -5]), array(-4)].toarray(), B[[2, -5], -4]) + + # [[1,2],1:2] + assert_equal(A[[1, 3], :].toarray(), B[[1, 3], :]) + assert_equal(A[[2, -5], 8:-1].toarray(), B[[2, -5], 8:-1]) + assert_equal(A[array([2, -5]), 8:-1].toarray(), B[[2, -5], 8:-1]) + + # [[1,2],[1,2]] + assert_equal(toarray(A[[1, 3], [2, 4]]), B[[1, 3], [2, 4]]) + assert_equal(toarray(A[[-1, -3], [2, -4]]), B[[-1, -3], [2, -4]]) + assert_equal( + toarray(A[array([-1, -3]), [2, -4]]), B[[-1, -3], [2, -4]] + ) + assert_equal( + toarray(A[[-1, -3], array([2, -4])]), B[[-1, -3], [2, -4]] + ) + assert_equal( + toarray(A[array([-1, -3]), array([2, -4])]), B[[-1, -3], [2, -4]] + ) + + # [[[1],[2]],[1,2]] + assert_equal(A[[[1], [3]], [2, 4]].toarray(), B[[[1], [3]], [2, 4]]) + assert_equal( + A[[[-1], [-3], [-2]], [2, -4]].toarray(), + B[[[-1], [-3], [-2]], [2, -4]] + ) + assert_equal( + A[array([[-1], [-3], [-2]]), [2, -4]].toarray(), + B[[[-1], [-3], [-2]], [2, -4]] + ) + assert_equal( + A[[[-1], [-3], [-2]], array([2, -4])].toarray(), + B[[[-1], [-3], [-2]], [2, -4]] + ) + assert_equal( + A[array([[-1], [-3], [-2]]), array([2, -4])].toarray(), + B[[[-1], [-3], [-2]], [2, -4]] + ) + + # [[1,2]] + assert_equal(A[[1, 3]].toarray(), B[[1, 3]]) + assert_equal(A[[-1, -3]].toarray(), B[[-1, -3]]) + assert_equal(A[array([-1, -3])].toarray(), B[[-1, -3]]) + + # [[1,2],:][:,[1,2]] + assert_equal( + A[[1, 3], :][:, [2, 4]].toarray(), B[[1, 3], :][:, [2, 4]] + ) + assert_equal( + A[[-1, -3], :][:, [2, -4]].toarray(), B[[-1, -3], :][:, [2, -4]] + ) + assert_equal( + A[array([-1, -3]), :][:, array([2, -4])].toarray(), + B[[-1, -3], :][:, [2, -4]] + ) + + # [:,[1,2]][[1,2],:] + assert_equal( + A[:, [1, 3]][[2, 4], :].toarray(), B[:, [1, 3]][[2, 4], :] + ) + assert_equal( + A[:, [-1, -3]][[2, -4], :].toarray(), B[:, [-1, -3]][[2, -4], :] + ) + assert_equal( + A[:, array([-1, -3])][array([2, -4]), :].toarray(), + B[:, [-1, -3]][[2, -4], :] + ) + + # Check bug reported by Robert Cimrman: + # http://thread.gmane.org/gmane.comp.python.scientific.devel/7986 (dead link) + s = slice(int8(2),int8(4),None) + assert_equal(A[s, :].toarray(), B[2:4, :]) + assert_equal(A[:, s].toarray(), B[:, 2:4]) + + # Regression for gh-4917: index with tuple of 2D arrays + i = np.array([[1]], dtype=int) + assert_equal(A[i, i].toarray(), B[i, i]) + + # Regression for gh-4917: index with tuple of empty nested lists + assert_equal(A[[[]], [[]]].toarray(), B[[[]], [[]]]) + + def test_fancy_indexing_randomized(self): + np.random.seed(1234) # make runs repeatable + + NUM_SAMPLES = 50 + M = 6 + N = 4 + + D = asmatrix(np.random.rand(M,N)) + D = np.multiply(D, D > 0.5) + + I = np.random.randint(-M + 1, M, size=NUM_SAMPLES) + J = np.random.randint(-N + 1, N, size=NUM_SAMPLES) + + S = self.spcreator(D) + + SIJ = S[I,J] + if issparse(SIJ): + SIJ = SIJ.toarray() + assert_equal(SIJ, D[I,J]) + + I_bad = I + M + J_bad = J - N + + assert_raises(IndexError, S.__getitem__, (I_bad,J)) + assert_raises(IndexError, S.__getitem__, (I,J_bad)) + + def test_missized_masking(self): + M, N = 5, 10 + + B = asmatrix(arange(M * N).reshape(M, N)) + A = self.spcreator(B) + + # Content of mask shouldn't matter, only its size + row_long = np.ones(M + 1, dtype=bool) + row_short = np.ones(M - 1, dtype=bool) + col_long = np.ones(N + 2, dtype=bool) + col_short = np.ones(N - 2, dtype=bool) + + with pytest.raises( + IndexError, + match=rf"boolean row index has incorrect length: {M + 1} instead of {M}" + ): + _ = A[row_long, :] + with pytest.raises( + IndexError, + match=rf"boolean row index has incorrect length: {M - 1} instead of {M}" + ): + _ = A[row_short, :] + + for i, j in itertools.product( + (row_long, row_short, slice(None)), + (col_long, col_short, slice(None)), + ): + if isinstance(i, slice) and isinstance(j, slice): + continue + with pytest.raises( + IndexError, + match=r"boolean \w+ index has incorrect length" + ): + _ = A[i, j] + + def test_fancy_indexing_boolean(self): + np.random.seed(1234) # make runs repeatable + + B = asmatrix(arange(50).reshape(5,10)) + A = self.spcreator(B) + + I = np.array(np.random.randint(0, 2, size=5), dtype=bool) + J = np.array(np.random.randint(0, 2, size=10), dtype=bool) + X = np.array(np.random.randint(0, 2, size=(5, 10)), dtype=bool) + + assert_equal(toarray(A[I]), B[I]) + assert_equal(toarray(A[:, J]), B[:, J]) + assert_equal(toarray(A[X]), B[X]) + assert_equal(toarray(A[B > 9]), B[B > 9]) + + I = np.array([True, False, True, True, False]) + J = np.array([False, True, True, False, True, + False, False, False, False, False]) + + assert_equal(toarray(A[I, J]), B[I, J]) + + Z1 = np.zeros((6, 11), dtype=bool) + Z2 = np.zeros((6, 11), dtype=bool) + Z2[0,-1] = True + Z3 = np.zeros((6, 11), dtype=bool) + Z3[-1,0] = True + + assert_raises(IndexError, A.__getitem__, Z1) + assert_raises(IndexError, A.__getitem__, Z2) + assert_raises(IndexError, A.__getitem__, Z3) + assert_raises((IndexError, ValueError), A.__getitem__, (X, 1)) + + def test_fancy_indexing_sparse_boolean(self): + np.random.seed(1234) # make runs repeatable + + B = asmatrix(arange(50).reshape(5,10)) + A = self.spcreator(B) + + X = np.array(np.random.randint(0, 2, size=(5, 10)), dtype=bool) + + Xsp = csr_matrix(X) + + assert_equal(toarray(A[Xsp]), B[X]) + assert_equal(toarray(A[A > 9]), B[B > 9]) + + Z = np.array(np.random.randint(0, 2, size=(5, 11)), dtype=bool) + Y = np.array(np.random.randint(0, 2, size=(6, 10)), dtype=bool) + + Zsp = csr_matrix(Z) + Ysp = csr_matrix(Y) + + assert_raises(IndexError, A.__getitem__, Zsp) + assert_raises(IndexError, A.__getitem__, Ysp) + assert_raises((IndexError, ValueError), A.__getitem__, (Xsp, 1)) + + def test_fancy_indexing_regression_3087(self): + mat = self.spcreator(array([[1, 0, 0], [0,1,0], [1,0,0]])) + desired_cols = np.ravel(mat.sum(0)) > 0 + assert_equal(mat[:, desired_cols].toarray(), [[1, 0], [0, 1], [1, 0]]) + + def test_fancy_indexing_seq_assign(self): + mat = self.spcreator(array([[1, 0], [0, 1]])) + assert_raises(ValueError, mat.__setitem__, (0, 0), np.array([1,2])) + + def test_fancy_indexing_2d_assign(self): + # regression test for gh-10695 + mat = self.spcreator(array([[1, 0], [2, 3]])) + with suppress_warnings() as sup: + sup.filter(SparseEfficiencyWarning, + "Changing the sparsity structure") + mat[[0, 1], [1, 1]] = mat[[1, 0], [0, 0]] + assert_equal(toarray(mat), array([[1, 2], [2, 1]])) + + def test_fancy_indexing_empty(self): + B = asmatrix(arange(50).reshape(5,10)) + B[1,:] = 0 + B[:,2] = 0 + B[3,6] = 0 + A = self.spcreator(B) + + K = np.array([False, False, False, False, False]) + assert_equal(toarray(A[K]), B[K]) + K = np.array([], dtype=int) + assert_equal(toarray(A[K]), B[K]) + assert_equal(toarray(A[K, K]), B[K, K]) + J = np.array([0, 1, 2, 3, 4], dtype=int)[:,None] + assert_equal(toarray(A[K, J]), B[K, J]) + assert_equal(toarray(A[J, K]), B[J, K]) + + +@contextlib.contextmanager +def check_remains_sorted(X): + """Checks that sorted indices property is retained through an operation + """ + if not hasattr(X, 'has_sorted_indices') or not X.has_sorted_indices: + yield + return + yield + indices = X.indices.copy() + X.has_sorted_indices = False + X.sort_indices() + assert_array_equal(indices, X.indices, + 'Expected sorted indices, found unsorted') + + +class _TestFancyIndexingAssign: + def test_bad_index_assign(self): + A = self.spcreator(np.zeros([5, 5])) + assert_raises((IndexError, ValueError, TypeError), A.__setitem__, "foo", 2) + assert_raises((IndexError, ValueError, TypeError), A.__setitem__, (2, "foo"), 5) + + def test_fancy_indexing_set(self): + n, m = (5, 10) + + def _test_set_slice(i, j): + A = self.spcreator((n, m)) + B = asmatrix(np.zeros((n, m))) + with suppress_warnings() as sup: + sup.filter( + SparseEfficiencyWarning, + "Changing the sparsity structure of a cs[cr]_matrix is expensive" + ) + B[i, j] = 1 + with check_remains_sorted(A): + A[i, j] = 1 + assert_array_almost_equal(A.toarray(), B) + # [1:2,1:2] + for i, j in [((2, 3, 4), slice(None, 10, 4)), + (np.arange(3), slice(5, -2)), + (slice(2, 5), slice(5, -2))]: + _test_set_slice(i, j) + for i, j in [(np.arange(3), np.arange(3)), ((0, 3, 4), (1, 2, 4))]: + _test_set_slice(i, j) + + def test_fancy_assignment_dtypes(self): + def check(dtype): + A = self.spcreator((5, 5), dtype=dtype) + with suppress_warnings() as sup: + sup.filter( + SparseEfficiencyWarning, + "Changing the sparsity structure of a cs[cr]_matrix is expensive" + ) + A[[0,1],[0,1]] = dtype.type(1) + assert_equal(A.sum(), dtype.type(1)*2) + A[0:2,0:2] = dtype.type(1.0) + assert_equal(A.sum(), dtype.type(1)*4) + A[2,2] = dtype.type(1.0) + assert_equal(A.sum(), dtype.type(1)*4 + dtype.type(1)) + + for dtype in supported_dtypes: + check(np.dtype(dtype)) + + def test_sequence_assignment(self): + A = self.spcreator((4,3)) + B = self.spcreator(eye(3,4)) + + i0 = [0,1,2] + i1 = (0,1,2) + i2 = array(i0) + + with suppress_warnings() as sup: + sup.filter( + SparseEfficiencyWarning, + "Changing the sparsity structure of a cs[cr]_matrix is expensive" + ) + with check_remains_sorted(A): + A[0,i0] = B[i0,0].T + A[1,i1] = B[i1,1].T + A[2,i2] = B[i2,2].T + assert_array_equal(A.toarray(), B.T.toarray()) + + # column slice + A = self.spcreator((2,3)) + with check_remains_sorted(A): + A[1,1:3] = [10,20] + assert_array_equal(A.toarray(), [[0, 0, 0], [0, 10, 20]]) + + # row slice + A = self.spcreator((3,2)) + with check_remains_sorted(A): + A[1:3,1] = [[10],[20]] + assert_array_equal(A.toarray(), [[0, 0], [0, 10], [0, 20]]) + + # both slices + A = self.spcreator((3,3)) + B = asmatrix(np.zeros((3,3))) + with check_remains_sorted(A): + for C in [A, B]: + C[[0,1,2], [0,1,2]] = [4,5,6] + assert_array_equal(A.toarray(), B) + + # both slices (2) + A = self.spcreator((4, 3)) + with check_remains_sorted(A): + A[(1, 2, 3), (0, 1, 2)] = [1, 2, 3] + assert_almost_equal(A.sum(), 6) + B = asmatrix(np.zeros((4, 3))) + B[(1, 2, 3), (0, 1, 2)] = [1, 2, 3] + assert_array_equal(A.toarray(), B) + + def test_fancy_assign_empty(self): + B = asmatrix(arange(50).reshape(5,10)) + B[1,:] = 0 + B[:,2] = 0 + B[3,6] = 0 + A = self.spcreator(B) + + K = np.array([False, False, False, False, False]) + A[K] = 42 + assert_equal(toarray(A), B) + + K = np.array([], dtype=int) + A[K] = 42 + assert_equal(toarray(A), B) + A[K,K] = 42 + assert_equal(toarray(A), B) + + J = np.array([0, 1, 2, 3, 4], dtype=int)[:,None] + A[K,J] = 42 + assert_equal(toarray(A), B) + A[J,K] = 42 + assert_equal(toarray(A), B) + + +class _TestFancyMultidim: + def test_fancy_indexing_ndarray(self): + sets = [ + (np.array([[1], [2], [3]]), np.array([3, 4, 2])), + (np.array([[1], [2], [3]]), np.array([[3, 4, 2]])), + (np.array([[1, 2, 3]]), np.array([[3], [4], [2]])), + (np.array([1, 2, 3]), np.array([[3], [4], [2]])), + (np.array([[1, 2, 3], [3, 4, 2]]), + np.array([[5, 6, 3], [2, 3, 1]])) + ] + # These inputs generate 3-D outputs + # (np.array([[[1], [2], [3]], [[3], [4], [2]]]), + # np.array([[[5], [6], [3]], [[2], [3], [1]]])), + + for I, J in sets: + np.random.seed(1234) + D = asmatrix(np.random.rand(5, 7)) + S = self.spcreator(D) + + SIJ = S[I,J] + if issparse(SIJ): + SIJ = SIJ.toarray() + assert_equal(SIJ, D[I,J]) + + I_bad = I + 5 + J_bad = J + 7 + + assert_raises(IndexError, S.__getitem__, (I_bad,J)) + assert_raises(IndexError, S.__getitem__, (I,J_bad)) + + # This would generate 3-D arrays -- not supported + assert_raises(IndexError, S.__getitem__, ([I, I], slice(None))) + assert_raises(IndexError, S.__getitem__, (slice(None), [J, J])) + + +class _TestFancyMultidimAssign: + def test_fancy_assign_ndarray(self): + np.random.seed(1234) + + D = asmatrix(np.random.rand(5, 7)) + S = self.spcreator(D) + X = np.random.rand(2, 3) + + I = np.array([[1, 2, 3], [3, 4, 2]]) + J = np.array([[5, 6, 3], [2, 3, 1]]) + + with check_remains_sorted(S): + S[I,J] = X + D[I,J] = X + assert_equal(S.toarray(), D) + + I_bad = I + 5 + J_bad = J + 7 + + C = [1, 2, 3] + + with check_remains_sorted(S): + S[I,J] = C + D[I,J] = C + assert_equal(S.toarray(), D) + + with check_remains_sorted(S): + S[I,J] = 3 + D[I,J] = 3 + assert_equal(S.toarray(), D) + + assert_raises(IndexError, S.__setitem__, (I_bad,J), C) + assert_raises(IndexError, S.__setitem__, (I,J_bad), C) + + def test_fancy_indexing_multidim_set(self): + n, m = (5, 10) + + def _test_set_slice(i, j): + A = self.spcreator((n, m)) + with check_remains_sorted(A), suppress_warnings() as sup: + sup.filter( + SparseEfficiencyWarning, + "Changing the sparsity structure of a cs[cr]_matrix is expensive" + ) + A[i, j] = 1 + B = asmatrix(np.zeros((n, m))) + B[i, j] = 1 + assert_array_almost_equal(A.toarray(), B) + # [[[1, 2], [1, 2]], [1, 2]] + for i, j in [(np.array([[1, 2], [1, 3]]), [1, 3]), + (np.array([0, 4]), [[0, 3], [1, 2]]), + ([[1, 2, 3], [0, 2, 4]], [[0, 4, 3], [4, 1, 2]])]: + _test_set_slice(i, j) + + def test_fancy_assign_list(self): + np.random.seed(1234) + + D = asmatrix(np.random.rand(5, 7)) + S = self.spcreator(D) + X = np.random.rand(2, 3) + + I = [[1, 2, 3], [3, 4, 2]] + J = [[5, 6, 3], [2, 3, 1]] + + S[I,J] = X + D[I,J] = X + assert_equal(S.toarray(), D) + + I_bad = [[ii + 5 for ii in i] for i in I] + J_bad = [[jj + 7 for jj in j] for j in J] + C = [1, 2, 3] + + S[I,J] = C + D[I,J] = C + assert_equal(S.toarray(), D) + + S[I,J] = 3 + D[I,J] = 3 + assert_equal(S.toarray(), D) + + assert_raises(IndexError, S.__setitem__, (I_bad,J), C) + assert_raises(IndexError, S.__setitem__, (I,J_bad), C) + + def test_fancy_assign_slice(self): + np.random.seed(1234) + + D = asmatrix(np.random.rand(5, 7)) + S = self.spcreator(D) + + I = [1, 2, 3, 3, 4, 2] + J = [5, 6, 3, 2, 3, 1] + + I_bad = [ii + 5 for ii in I] + J_bad = [jj + 7 for jj in J] + + C1 = [1, 2, 3, 4, 5, 6, 7] + C2 = np.arange(5)[:, None] + assert_raises(IndexError, S.__setitem__, (I_bad, slice(None)), C1) + assert_raises(IndexError, S.__setitem__, (slice(None), J_bad), C2) + + +class _TestArithmetic: + """ + Test real/complex arithmetic + """ + def __arith_init(self): + # these can be represented exactly in FP (so arithmetic should be exact) + self.__A = array([[-1.5, 6.5, 0, 2.25, 0, 0], + [3.125, -7.875, 0.625, 0, 0, 0], + [0, 0, -0.125, 1.0, 0, 0], + [0, 0, 8.375, 0, 0, 0]], 'float64') + self.__B = array([[0.375, 0, 0, 0, -5, 2.5], + [14.25, -3.75, 0, 0, -0.125, 0], + [0, 7.25, 0, 0, 0, 0], + [18.5, -0.0625, 0, 0, 0, 0]], 'complex128') + self.__B.imag = array([[1.25, 0, 0, 0, 6, -3.875], + [2.25, 4.125, 0, 0, 0, 2.75], + [0, 4.125, 0, 0, 0, 0], + [-0.0625, 0, 0, 0, 0, 0]], 'float64') + + # fractions are all x/16ths + assert_array_equal((self.__A*16).astype('int32'),16*self.__A) + assert_array_equal((self.__B.real*16).astype('int32'),16*self.__B.real) + assert_array_equal((self.__B.imag*16).astype('int32'),16*self.__B.imag) + + self.__Asp = self.spcreator(self.__A) + self.__Bsp = self.spcreator(self.__B) + + def test_add_sub(self): + self.__arith_init() + + # basic tests + assert_array_equal( + (self.__Asp + self.__Bsp).toarray(), self.__A + self.__B + ) + + # check conversions + for x in supported_dtypes: + with np.errstate(invalid="ignore"): + A = self.__A.astype(x) + Asp = self.spcreator(A) + for y in supported_dtypes: + if not np.issubdtype(y, np.complexfloating): + with np.errstate(invalid="ignore"): + B = self.__B.real.astype(y) + else: + B = self.__B.astype(y) + Bsp = self.spcreator(B) + + # addition + D1 = A + B + S1 = Asp + Bsp + + assert_equal(S1.dtype,D1.dtype) + assert_array_equal(S1.toarray(), D1) + assert_array_equal(Asp + B,D1) # check sparse + dense + assert_array_equal(A + Bsp,D1) # check dense + sparse + + # subtraction + if np.dtype('bool') in [x, y]: + # boolean array subtraction deprecated in 1.9.0 + continue + + D1 = A - B + S1 = Asp - Bsp + + assert_equal(S1.dtype,D1.dtype) + assert_array_equal(S1.toarray(), D1) + assert_array_equal(Asp - B,D1) # check sparse - dense + assert_array_equal(A - Bsp,D1) # check dense - sparse + + def test_mu(self): + self.__arith_init() + + # basic tests + assert_array_equal((self.__Asp @ self.__Bsp.T).toarray(), + self.__A @ self.__B.T) + + for x in supported_dtypes: + with np.errstate(invalid="ignore"): + A = self.__A.astype(x) + Asp = self.spcreator(A) + for y in supported_dtypes: + if np.issubdtype(y, np.complexfloating): + B = self.__B.astype(y) + else: + with np.errstate(invalid="ignore"): + B = self.__B.real.astype(y) + Bsp = self.spcreator(B) + + D1 = A @ B.T + S1 = Asp @ Bsp.T + + assert_allclose(S1.toarray(), D1, + atol=1e-14*abs(D1).max()) + assert_equal(S1.dtype,D1.dtype) + + +class _TestMinMax: + def test_minmax(self): + for dtype in [np.float32, np.float64, np.int32, np.int64, np.complex128]: + D = np.arange(20, dtype=dtype).reshape(5,4) + + X = self.spcreator(D) + assert_equal(X.min(), 0) + assert_equal(X.max(), 19) + assert_equal(X.min().dtype, dtype) + assert_equal(X.max().dtype, dtype) + + D *= -1 + X = self.spcreator(D) + assert_equal(X.min(), -19) + assert_equal(X.max(), 0) + + D += 5 + X = self.spcreator(D) + assert_equal(X.min(), -14) + assert_equal(X.max(), 5) + + # try a fully dense matrix + X = self.spcreator(np.arange(1, 10).reshape(3, 3)) + assert_equal(X.min(), 1) + assert_equal(X.min().dtype, X.dtype) + + X = -X + assert_equal(X.max(), -1) + + # and a fully sparse matrix + Z = self.spcreator(np.zeros(1)) + assert_equal(Z.min(), 0) + assert_equal(Z.max(), 0) + assert_equal(Z.max().dtype, Z.dtype) + + # another test + D = np.arange(20, dtype=float).reshape(5,4) + D[0:2, :] = 0 + X = self.spcreator(D) + assert_equal(X.min(), 0) + assert_equal(X.max(), 19) + + # zero-size matrices + for D in [np.zeros((0, 0)), np.zeros((0, 10)), np.zeros((10, 0))]: + X = self.spcreator(D) + assert_raises(ValueError, X.min) + assert_raises(ValueError, X.max) + + def test_minmax_axis(self): + D = np.arange(50).reshape(5, 10) + # completely empty rows, leaving some completely full: + D[1, :] = 0 + # empty at end for reduceat: + D[:, 9] = 0 + # partial rows/cols: + D[3, 3] = 0 + # entries on either side of 0: + D[2, 2] = -1 + X = self.spcreator(D) + + axes = [-2, -1, 0, 1] + for axis in axes: + assert_array_equal( + X.max(axis=axis).toarray(), D.max(axis=axis, keepdims=True) + ) + assert_array_equal( + X.min(axis=axis).toarray(), D.min(axis=axis, keepdims=True) + ) + + # full matrix + D = np.arange(1, 51).reshape(10, 5) + X = self.spcreator(D) + for axis in axes: + assert_array_equal( + X.max(axis=axis).toarray(), D.max(axis=axis, keepdims=True) + ) + assert_array_equal( + X.min(axis=axis).toarray(), D.min(axis=axis, keepdims=True) + ) + + # empty matrix + D = np.zeros((10, 5)) + X = self.spcreator(D) + for axis in axes: + assert_array_equal( + X.max(axis=axis).toarray(), D.max(axis=axis, keepdims=True) + ) + assert_array_equal( + X.min(axis=axis).toarray(), D.min(axis=axis, keepdims=True) + ) + + axes_even = [0, -2] + axes_odd = [1, -1] + + # zero-size matrices + D = np.zeros((0, 10)) + X = self.spcreator(D) + for axis in axes_even: + assert_raises(ValueError, X.min, axis=axis) + assert_raises(ValueError, X.max, axis=axis) + for axis in axes_odd: + assert_array_equal(np.zeros((0, 1)), X.min(axis=axis).toarray()) + assert_array_equal(np.zeros((0, 1)), X.max(axis=axis).toarray()) + + D = np.zeros((10, 0)) + X = self.spcreator(D) + for axis in axes_odd: + assert_raises(ValueError, X.min, axis=axis) + assert_raises(ValueError, X.max, axis=axis) + for axis in axes_even: + assert_array_equal(np.zeros((1, 0)), X.min(axis=axis).toarray()) + assert_array_equal(np.zeros((1, 0)), X.max(axis=axis).toarray()) + + def test_nanminmax(self): + D = matrix(np.arange(50).reshape(5,10), dtype=float) + D[1, :] = 0 + D[:, 9] = 0 + D[3, 3] = 0 + D[2, 2] = -1 + D[4, 2] = np.nan + D[1, 4] = np.nan + X = self.spcreator(D) + + X_nan_maximum = X.nanmax() + assert np.isscalar(X_nan_maximum) + assert X_nan_maximum == np.nanmax(D) + + X_nan_minimum = X.nanmin() + assert np.isscalar(X_nan_minimum) + assert X_nan_minimum == np.nanmin(D) + + axes = [-2, -1, 0, 1] + for axis in axes: + X_nan_maxima = X.nanmax(axis=axis) + assert isinstance(X_nan_maxima, coo_matrix) + assert_allclose(X_nan_maxima.toarray(), + np.nanmax(D, axis=axis)) + + X_nan_minima = X.nanmin(axis=axis) + assert isinstance(X_nan_minima, coo_matrix) + assert_allclose(X_nan_minima.toarray(), + np.nanmin(D, axis=axis)) + + def test_minmax_invalid_params(self): + dat = array([[0, 1, 2], + [3, -4, 5], + [-6, 7, 9]]) + datsp = self.spcreator(dat) + + for fname in ('min', 'max'): + func = getattr(datsp, fname) + assert_raises(ValueError, func, axis=3) + assert_raises(TypeError, func, axis=(0, 1)) + assert_raises(TypeError, func, axis=1.5) + assert_raises(ValueError, func, axis=1, out=1) + + def test_numpy_minmax(self): + # See gh-5987 + # xref gh-7460 in 'numpy' + from scipy.sparse import _data + + dat = array([[0, 1, 2], + [3, -4, 5], + [-6, 7, 9]]) + datsp = self.spcreator(dat) + + # We are only testing sparse matrices who have + # implemented 'min' and 'max' because they are + # the ones with the compatibility issues with + # the 'numpy' implementation. + if isinstance(datsp, _data._minmax_mixin): + assert_array_equal(np.min(datsp), np.min(dat)) + assert_array_equal(np.max(datsp), np.max(dat)) + + def test_argmax(self): + from scipy.sparse import _data + D1 = np.array([ + [-1, 5, 2, 3], + [0, 0, -1, -2], + [-1, -2, -3, -4], + [1, 2, 3, 4], + [1, 2, 0, 0], + ]) + D2 = D1.transpose() + # Non-regression test cases for gh-16929. + D3 = np.array([[4, 3], [7, 5]]) + D4 = np.array([[4, 3], [7, 0]]) + D5 = np.array([[5, 5, 3], [4, 9, 10], [3, 4, 9]]) + + for D in [D1, D2, D3, D4, D5]: + mat = self.spcreator(D) + if not isinstance(mat, _data._minmax_mixin): + continue + + assert_equal(mat.argmax(), np.argmax(D)) + assert_equal(mat.argmin(), np.argmin(D)) + + assert_equal(mat.argmax(axis=0), + asmatrix(np.argmax(D, axis=0))) + assert_equal(mat.argmin(axis=0), + asmatrix(np.argmin(D, axis=0))) + + assert_equal(mat.argmax(axis=1), + asmatrix(np.argmax(D, axis=1).reshape(-1, 1))) + assert_equal(mat.argmin(axis=1), + asmatrix(np.argmin(D, axis=1).reshape(-1, 1))) + + D1 = np.empty((0, 5)) + D2 = np.empty((5, 0)) + + for axis in [None, 0]: + mat = self.spcreator(D1) + assert_raises(ValueError, mat.argmax, axis=axis) + assert_raises(ValueError, mat.argmin, axis=axis) + + for axis in [None, 1]: + mat = self.spcreator(D2) + assert_raises(ValueError, mat.argmax, axis=axis) + assert_raises(ValueError, mat.argmin, axis=axis) + + +class _TestGetNnzAxis: + def test_getnnz_axis(self): + dat = array([[0, 2], + [3, 5], + [-6, 9]]) + bool_dat = dat.astype(bool) + datsp = self.spcreator(dat) + + accepted_return_dtypes = (np.int32, np.int64) + + assert_array_equal(bool_dat.sum(axis=None), datsp.getnnz(axis=None)) + assert_array_equal(bool_dat.sum(), datsp.getnnz()) + assert_array_equal(bool_dat.sum(axis=0), datsp.getnnz(axis=0)) + assert_in(datsp.getnnz(axis=0).dtype, accepted_return_dtypes) + assert_array_equal(bool_dat.sum(axis=1), datsp.getnnz(axis=1)) + assert_in(datsp.getnnz(axis=1).dtype, accepted_return_dtypes) + assert_array_equal(bool_dat.sum(axis=-2), datsp.getnnz(axis=-2)) + assert_in(datsp.getnnz(axis=-2).dtype, accepted_return_dtypes) + assert_array_equal(bool_dat.sum(axis=-1), datsp.getnnz(axis=-1)) + assert_in(datsp.getnnz(axis=-1).dtype, accepted_return_dtypes) + + assert_raises(ValueError, datsp.getnnz, axis=2) + + +#------------------------------------------------------------------------------ +# Tailored base class for generic tests +#------------------------------------------------------------------------------ + +def _possibly_unimplemented(cls, require=True): + """ + Construct a class that either runs tests as usual (require=True), + or each method skips if it encounters a common error. + """ + if require: + return cls + else: + def wrap(fc): + @functools.wraps(fc) + def wrapper(*a, **kw): + try: + return fc(*a, **kw) + except (NotImplementedError, TypeError, ValueError, + IndexError, AttributeError): + raise pytest.skip("feature not implemented") + + return wrapper + + new_dict = dict(cls.__dict__) + for name, func in cls.__dict__.items(): + if name.startswith('test_'): + new_dict[name] = wrap(func) + return type(cls.__name__ + "NotImplemented", + cls.__bases__, + new_dict) + + +def sparse_test_class(getset=True, slicing=True, slicing_assign=True, + fancy_indexing=True, fancy_assign=True, + fancy_multidim_indexing=True, fancy_multidim_assign=True, + minmax=True, nnz_axis=True): + """ + Construct a base class, optionally converting some of the tests in + the suite to check that the feature is not implemented. + """ + bases = (_TestCommon, + _possibly_unimplemented(_TestGetSet, getset), + _TestSolve, + _TestInplaceArithmetic, + _TestArithmetic, + _possibly_unimplemented(_TestSlicing, slicing), + _possibly_unimplemented(_TestSlicingAssign, slicing_assign), + _possibly_unimplemented(_TestFancyIndexing, fancy_indexing), + _possibly_unimplemented(_TestFancyIndexingAssign, + fancy_assign), + _possibly_unimplemented(_TestFancyMultidim, + fancy_indexing and fancy_multidim_indexing), + _possibly_unimplemented(_TestFancyMultidimAssign, + fancy_multidim_assign and fancy_assign), + _possibly_unimplemented(_TestMinMax, minmax), + _possibly_unimplemented(_TestGetNnzAxis, nnz_axis)) + + # check that test names do not clash + names = {} + for cls in bases: + for name in cls.__dict__: + if not name.startswith('test_'): + continue + old_cls = names.get(name) + if old_cls is not None: + raise ValueError("Test class {} overloads test {} defined in {}".format( + cls.__name__, name, old_cls.__name__)) + names[name] = cls + + return type("TestBase", bases, {}) + + +#------------------------------------------------------------------------------ +# Matrix class based tests +#------------------------------------------------------------------------------ + +class TestCSR(sparse_test_class()): + @classmethod + def spcreator(cls, *args, **kwargs): + with suppress_warnings() as sup: + sup.filter(SparseEfficiencyWarning, + "Changing the sparsity structure of a csr_matrix is expensive") + return csr_matrix(*args, **kwargs) + math_dtypes = [np.bool_, np.int_, np.float64, np.complex128] + + def test_constructor1(self): + b = array([[0, 4, 0], + [3, 0, 0], + [0, 2, 0]], 'd') + bsp = csr_matrix(b) + assert_array_almost_equal(bsp.data,[4,3,2]) + assert_array_equal(bsp.indices,[1,0,1]) + assert_array_equal(bsp.indptr,[0,1,2,3]) + assert_equal(bsp.getnnz(),3) + assert_equal(bsp.format,'csr') + assert_array_equal(bsp.toarray(), b) + + def test_constructor2(self): + b = zeros((6,6),'d') + b[3,4] = 5 + bsp = csr_matrix(b) + assert_array_almost_equal(bsp.data,[5]) + assert_array_equal(bsp.indices,[4]) + assert_array_equal(bsp.indptr,[0,0,0,0,1,1,1]) + assert_array_almost_equal(bsp.toarray(), b) + + def test_constructor3(self): + b = array([[1, 0], + [0, 2], + [3, 0]], 'd') + bsp = csr_matrix(b) + assert_array_almost_equal(bsp.data,[1,2,3]) + assert_array_equal(bsp.indices,[0,1,0]) + assert_array_equal(bsp.indptr,[0,1,2,3]) + assert_array_almost_equal(bsp.toarray(), b) + + def test_constructor4(self): + # using (data, ij) format + row = array([2, 3, 1, 3, 0, 1, 3, 0, 2, 1, 2]) + col = array([0, 1, 0, 0, 1, 1, 2, 2, 2, 2, 1]) + data = array([6., 10., 3., 9., 1., 4., + 11., 2., 8., 5., 7.]) + + ij = vstack((row,col)) + csr = csr_matrix((data,ij),(4,3)) + assert_array_equal(arange(12).reshape(4, 3), csr.toarray()) + + # using Python lists and a specified dtype + csr = csr_matrix(([2**63 + 1, 1], ([0, 1], [0, 1])), dtype=np.uint64) + dense = array([[2**63 + 1, 0], [0, 1]], dtype=np.uint64) + assert_array_equal(dense, csr.toarray()) + + def test_constructor5(self): + # infer dimensions from arrays + indptr = array([0,1,3,3]) + indices = array([0,5,1,2]) + data = array([1,2,3,4]) + csr = csr_matrix((data, indices, indptr)) + assert_array_equal(csr.shape,(3,6)) + + def test_constructor6(self): + # infer dimensions and dtype from lists + indptr = [0, 1, 3, 3] + indices = [0, 5, 1, 2] + data = [1, 2, 3, 4] + csr = csr_matrix((data, indices, indptr)) + assert_array_equal(csr.shape, (3,6)) + assert_(np.issubdtype(csr.dtype, np.signedinteger)) + + def test_constructor_smallcol(self): + # int64 indices not required + data = arange(6) + 1 + col = array([1, 2, 1, 0, 0, 2], dtype=np.int64) + ptr = array([0, 2, 4, 6], dtype=np.int64) + + a = csr_matrix((data, col, ptr), shape=(3, 3)) + + b = array([[0, 1, 2], + [4, 3, 0], + [5, 0, 6]], 'd') + + assert_equal(a.indptr.dtype, np.dtype(np.int32)) + assert_equal(a.indices.dtype, np.dtype(np.int32)) + assert_array_equal(a.toarray(), b) + + def test_constructor_largecol(self): + # int64 indices required + data = arange(6) + 1 + large = np.iinfo(np.int32).max + 100 + col = array([0, 1, 2, large, large+1, large+2], dtype=np.int64) + ptr = array([0, 2, 4, 6], dtype=np.int64) + + a = csr_matrix((data, col, ptr)) + + assert_equal(a.indptr.dtype, np.dtype(np.int64)) + assert_equal(a.indices.dtype, np.dtype(np.int64)) + assert_array_equal(a.shape, (3, max(col)+1)) + + def test_sort_indices(self): + data = arange(5) + indices = array([7, 2, 1, 5, 4]) + indptr = array([0, 3, 5]) + asp = csr_matrix((data, indices, indptr), shape=(2,10)) + bsp = asp.copy() + asp.sort_indices() + assert_array_equal(asp.indices,[1, 2, 7, 4, 5]) + assert_array_equal(asp.toarray(), bsp.toarray()) + + def test_eliminate_zeros(self): + data = array([1, 0, 0, 0, 2, 0, 3, 0]) + indices = array([1, 2, 3, 4, 5, 6, 7, 8]) + indptr = array([0, 3, 8]) + asp = csr_matrix((data, indices, indptr), shape=(2,10)) + bsp = asp.copy() + asp.eliminate_zeros() + assert_array_equal(asp.nnz, 3) + assert_array_equal(asp.data,[1, 2, 3]) + assert_array_equal(asp.toarray(), bsp.toarray()) + + def test_ufuncs(self): + X = csr_matrix(np.arange(20).reshape(4, 5) / 20.) + for f in ["sin", "tan", "arcsin", "arctan", "sinh", "tanh", + "arcsinh", "arctanh", "rint", "sign", "expm1", "log1p", + "deg2rad", "rad2deg", "floor", "ceil", "trunc", "sqrt"]: + assert_equal(hasattr(csr_matrix, f), True) + X2 = getattr(X, f)() + assert_equal(X.shape, X2.shape) + assert_array_equal(X.indices, X2.indices) + assert_array_equal(X.indptr, X2.indptr) + assert_array_equal(X2.toarray(), getattr(np, f)(X.toarray())) + + def test_unsorted_arithmetic(self): + data = arange(5) + indices = array([7, 2, 1, 5, 4]) + indptr = array([0, 3, 5]) + asp = csr_matrix((data, indices, indptr), shape=(2,10)) + data = arange(6) + indices = array([8, 1, 5, 7, 2, 4]) + indptr = array([0, 2, 6]) + bsp = csr_matrix((data, indices, indptr), shape=(2,10)) + assert_equal((asp + bsp).toarray(), asp.toarray() + bsp.toarray()) + + def test_fancy_indexing_broadcast(self): + # broadcasting indexing mode is supported + I = np.array([[1], [2], [3]]) + J = np.array([3, 4, 2]) + + np.random.seed(1234) + D = asmatrix(np.random.rand(5, 7)) + S = self.spcreator(D) + + SIJ = S[I,J] + if issparse(SIJ): + SIJ = SIJ.toarray() + assert_equal(SIJ, D[I,J]) + + def test_has_sorted_indices(self): + "Ensure has_sorted_indices memoizes sorted state for sort_indices" + sorted_inds = np.array([0, 1]) + unsorted_inds = np.array([1, 0]) + data = np.array([1, 1]) + indptr = np.array([0, 2]) + M = csr_matrix((data, sorted_inds, indptr)).copy() + assert_equal(True, M.has_sorted_indices) + assert isinstance(M.has_sorted_indices, bool) + + M = csr_matrix((data, unsorted_inds, indptr)).copy() + assert_equal(False, M.has_sorted_indices) + + # set by sorting + M.sort_indices() + assert_equal(True, M.has_sorted_indices) + assert_array_equal(M.indices, sorted_inds) + + M = csr_matrix((data, unsorted_inds, indptr)).copy() + # set manually (although underlyingly unsorted) + M.has_sorted_indices = True + assert_equal(True, M.has_sorted_indices) + assert_array_equal(M.indices, unsorted_inds) + + # ensure sort bypassed when has_sorted_indices == True + M.sort_indices() + assert_array_equal(M.indices, unsorted_inds) + + def test_has_canonical_format(self): + "Ensure has_canonical_format memoizes state for sum_duplicates" + + M = csr_matrix((np.array([2]), np.array([0]), np.array([0, 1]))) + assert_equal(True, M.has_canonical_format) + + indices = np.array([0, 0]) # contains duplicate + data = np.array([1, 1]) + indptr = np.array([0, 2]) + + M = csr_matrix((data, indices, indptr)).copy() + assert_equal(False, M.has_canonical_format) + assert isinstance(M.has_canonical_format, bool) + + # set by deduplicating + M.sum_duplicates() + assert_equal(True, M.has_canonical_format) + assert_equal(1, len(M.indices)) + + M = csr_matrix((data, indices, indptr)).copy() + # set manually (although underlyingly duplicated) + M.has_canonical_format = True + assert_equal(True, M.has_canonical_format) + assert_equal(2, len(M.indices)) # unaffected content + + # ensure deduplication bypassed when has_canonical_format == True + M.sum_duplicates() + assert_equal(2, len(M.indices)) # unaffected content + + def test_scalar_idx_dtype(self): + # Check that index dtype takes into account all parameters + # passed to sparsetools, including the scalar ones + indptr = np.zeros(2, dtype=np.int32) + indices = np.zeros(0, dtype=np.int32) + vals = np.zeros(0) + a = csr_matrix((vals, indices, indptr), shape=(1, 2**31-1)) + b = csr_matrix((vals, indices, indptr), shape=(1, 2**31)) + ij = np.zeros((2, 0), dtype=np.int32) + c = csr_matrix((vals, ij), shape=(1, 2**31-1)) + d = csr_matrix((vals, ij), shape=(1, 2**31)) + e = csr_matrix((1, 2**31-1)) + f = csr_matrix((1, 2**31)) + assert_equal(a.indptr.dtype, np.int32) + assert_equal(b.indptr.dtype, np.int64) + assert_equal(c.indptr.dtype, np.int32) + assert_equal(d.indptr.dtype, np.int64) + assert_equal(e.indptr.dtype, np.int32) + assert_equal(f.indptr.dtype, np.int64) + + # These shouldn't fail + for x in [a, b, c, d, e, f]: + x + x + + def test_binop_explicit_zeros(self): + # Check that binary ops don't introduce spurious explicit zeros. + # See gh-9619 for context. + a = csr_matrix([0, 1, 0]) + b = csr_matrix([1, 1, 0]) + assert (a + b).nnz == 2 + assert a.multiply(b).nnz == 1 + + +TestCSR.init_class() + + +class TestCSC(sparse_test_class()): + @classmethod + def spcreator(cls, *args, **kwargs): + with suppress_warnings() as sup: + sup.filter(SparseEfficiencyWarning, + "Changing the sparsity structure of a csc_matrix is expensive") + return csc_matrix(*args, **kwargs) + math_dtypes = [np.bool_, np.int_, np.float64, np.complex128] + + def test_constructor1(self): + b = array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 2, 0, 3]], 'd') + bsp = csc_matrix(b) + assert_array_almost_equal(bsp.data,[1,2,1,3]) + assert_array_equal(bsp.indices,[0,2,1,2]) + assert_array_equal(bsp.indptr,[0,1,2,3,4]) + assert_equal(bsp.getnnz(),4) + assert_equal(bsp.shape,b.shape) + assert_equal(bsp.format,'csc') + + def test_constructor2(self): + b = zeros((6,6),'d') + b[2,4] = 5 + bsp = csc_matrix(b) + assert_array_almost_equal(bsp.data,[5]) + assert_array_equal(bsp.indices,[2]) + assert_array_equal(bsp.indptr,[0,0,0,0,0,1,1]) + + def test_constructor3(self): + b = array([[1, 0], [0, 0], [0, 2]], 'd') + bsp = csc_matrix(b) + assert_array_almost_equal(bsp.data,[1,2]) + assert_array_equal(bsp.indices,[0,2]) + assert_array_equal(bsp.indptr,[0,1,2]) + + def test_constructor4(self): + # using (data, ij) format + row = array([2, 3, 1, 3, 0, 1, 3, 0, 2, 1, 2]) + col = array([0, 1, 0, 0, 1, 1, 2, 2, 2, 2, 1]) + data = array([6., 10., 3., 9., 1., 4., 11., 2., 8., 5., 7.]) + + ij = vstack((row,col)) + csc = csc_matrix((data,ij),(4,3)) + assert_array_equal(arange(12).reshape(4, 3), csc.toarray()) + + def test_constructor5(self): + # infer dimensions from arrays + indptr = array([0,1,3,3]) + indices = array([0,5,1,2]) + data = array([1,2,3,4]) + csc = csc_matrix((data, indices, indptr)) + assert_array_equal(csc.shape,(6,3)) + + def test_constructor6(self): + # infer dimensions and dtype from lists + indptr = [0, 1, 3, 3] + indices = [0, 5, 1, 2] + data = [1, 2, 3, 4] + csc = csc_matrix((data, indices, indptr)) + assert_array_equal(csc.shape,(6,3)) + assert_(np.issubdtype(csc.dtype, np.signedinteger)) + + def test_eliminate_zeros(self): + data = array([1, 0, 0, 0, 2, 0, 3, 0]) + indices = array([1, 2, 3, 4, 5, 6, 7, 8]) + indptr = array([0, 3, 8]) + asp = csc_matrix((data, indices, indptr), shape=(10,2)) + bsp = asp.copy() + asp.eliminate_zeros() + assert_array_equal(asp.nnz, 3) + assert_array_equal(asp.data,[1, 2, 3]) + assert_array_equal(asp.toarray(), bsp.toarray()) + + def test_sort_indices(self): + data = arange(5) + row = array([7, 2, 1, 5, 4]) + ptr = [0, 3, 5] + asp = csc_matrix((data, row, ptr), shape=(10,2)) + bsp = asp.copy() + asp.sort_indices() + assert_array_equal(asp.indices,[1, 2, 7, 4, 5]) + assert_array_equal(asp.toarray(), bsp.toarray()) + + def test_ufuncs(self): + X = csc_matrix(np.arange(21).reshape(7, 3) / 21.) + for f in ["sin", "tan", "arcsin", "arctan", "sinh", "tanh", + "arcsinh", "arctanh", "rint", "sign", "expm1", "log1p", + "deg2rad", "rad2deg", "floor", "ceil", "trunc", "sqrt"]: + assert_equal(hasattr(csr_matrix, f), True) + X2 = getattr(X, f)() + assert_equal(X.shape, X2.shape) + assert_array_equal(X.indices, X2.indices) + assert_array_equal(X.indptr, X2.indptr) + assert_array_equal(X2.toarray(), getattr(np, f)(X.toarray())) + + def test_unsorted_arithmetic(self): + data = arange(5) + indices = array([7, 2, 1, 5, 4]) + indptr = array([0, 3, 5]) + asp = csc_matrix((data, indices, indptr), shape=(10,2)) + data = arange(6) + indices = array([8, 1, 5, 7, 2, 4]) + indptr = array([0, 2, 6]) + bsp = csc_matrix((data, indices, indptr), shape=(10,2)) + assert_equal((asp + bsp).toarray(), asp.toarray() + bsp.toarray()) + + def test_fancy_indexing_broadcast(self): + # broadcasting indexing mode is supported + I = np.array([[1], [2], [3]]) + J = np.array([3, 4, 2]) + + np.random.seed(1234) + D = asmatrix(np.random.rand(5, 7)) + S = self.spcreator(D) + + SIJ = S[I,J] + if issparse(SIJ): + SIJ = SIJ.toarray() + assert_equal(SIJ, D[I,J]) + + def test_scalar_idx_dtype(self): + # Check that index dtype takes into account all parameters + # passed to sparsetools, including the scalar ones + indptr = np.zeros(2, dtype=np.int32) + indices = np.zeros(0, dtype=np.int32) + vals = np.zeros(0) + a = csc_matrix((vals, indices, indptr), shape=(2**31-1, 1)) + b = csc_matrix((vals, indices, indptr), shape=(2**31, 1)) + ij = np.zeros((2, 0), dtype=np.int32) + c = csc_matrix((vals, ij), shape=(2**31-1, 1)) + d = csc_matrix((vals, ij), shape=(2**31, 1)) + e = csr_matrix((1, 2**31-1)) + f = csr_matrix((1, 2**31)) + assert_equal(a.indptr.dtype, np.int32) + assert_equal(b.indptr.dtype, np.int64) + assert_equal(c.indptr.dtype, np.int32) + assert_equal(d.indptr.dtype, np.int64) + assert_equal(e.indptr.dtype, np.int32) + assert_equal(f.indptr.dtype, np.int64) + + # These shouldn't fail + for x in [a, b, c, d, e, f]: + x + x + + +TestCSC.init_class() + + +class TestDOK(sparse_test_class(minmax=False, nnz_axis=False)): + spcreator = dok_matrix + math_dtypes = [np.int_, np.float64, np.complex128] + + def test_mult(self): + A = dok_matrix((10,10)) + A[0,3] = 10 + A[5,6] = 20 + D = A*A.T + E = A*A.H + assert_array_equal(D.toarray(), E.toarray()) + + def test_add_nonzero(self): + A = self.spcreator((3,2)) + A[0,1] = -10 + A[2,0] = 20 + A = A + 10 + B = array([[10, 0], [10, 10], [30, 10]]) + assert_array_equal(A.toarray(), B) + + A = A + 1j + B = B + 1j + assert_array_equal(A.toarray(), B) + + def test_dok_divide_scalar(self): + A = self.spcreator((3,2)) + A[0,1] = -10 + A[2,0] = 20 + + assert_array_equal((A/1j).toarray(), A.toarray()/1j) + assert_array_equal((A/9).toarray(), A.toarray()/9) + + def test_convert(self): + # Test provided by Andrew Straw. Fails in SciPy <= r1477. + (m, n) = (6, 7) + a = dok_matrix((m, n)) + + # set a few elements, but none in the last column + a[2,1] = 1 + a[0,2] = 2 + a[3,1] = 3 + a[1,5] = 4 + a[4,3] = 5 + a[4,2] = 6 + + # assert that the last column is all zeros + assert_array_equal(a.toarray()[:,n-1], zeros(m,)) + + # make sure it still works for CSC format + csc = a.tocsc() + assert_array_equal(csc.toarray()[:,n-1], zeros(m,)) + + # now test CSR + (m, n) = (n, m) + b = a.transpose() + assert_equal(b.shape, (m, n)) + # assert that the last row is all zeros + assert_array_equal(b.toarray()[m-1,:], zeros(n,)) + + # make sure it still works for CSR format + csr = b.tocsr() + assert_array_equal(csr.toarray()[m-1,:], zeros(n,)) + + def test_ctor(self): + # Empty ctor + assert_raises(TypeError, dok_matrix) + + # Dense ctor + b = array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 2, 0, 3]], 'd') + A = dok_matrix(b) + assert_equal(b.dtype, A.dtype) + assert_equal(A.toarray(), b) + + # Sparse ctor + c = csr_matrix(b) + assert_equal(A.toarray(), c.toarray()) + + data = [[0, 1, 2], [3, 0, 0]] + d = dok_matrix(data, dtype=np.float32) + assert_equal(d.dtype, np.float32) + da = d.toarray() + assert_equal(da.dtype, np.float32) + assert_array_equal(da, data) + + def test_ticket1160(self): + # Regression test for ticket #1160. + a = dok_matrix((3,3)) + a[0,0] = 0 + # This assert would fail, because the above assignment would + # incorrectly call __set_item__ even though the value was 0. + assert_((0,0) not in a.keys(), "Unexpected entry (0,0) in keys") + + # Slice assignments were also affected. + b = dok_matrix((3,3)) + b[:,0] = 0 + assert_(len(b.keys()) == 0, "Unexpected entries in keys") + + +TestDOK.init_class() + + +class TestLIL(sparse_test_class(minmax=False)): + spcreator = lil_matrix + math_dtypes = [np.int_, np.float64, np.complex128] + + def test_dot(self): + A = zeros((10, 10), np.complex128) + A[0, 3] = 10 + A[5, 6] = 20j + + B = lil_matrix((10, 10), dtype=np.complex128) + B[0, 3] = 10 + B[5, 6] = 20j + + # TODO: properly handle this assertion on ppc64le + if platform.machine() != 'ppc64le': + assert_array_equal(A @ A.T, (B * B.T).toarray()) + + assert_array_equal(A @ A.conjugate().T, (B * B.conjugate().T).toarray()) + + def test_scalar_mul(self): + x = lil_matrix((3, 3)) + x[0, 0] = 2 + + x = x*2 + assert_equal(x[0, 0], 4) + + x = x*0 + assert_equal(x[0, 0], 0) + + def test_truediv_scalar(self): + A = self.spcreator((3, 2)) + A[0, 1] = -10 + A[2, 0] = 20 + + assert_array_equal((A / 1j).toarray(), A.toarray() / 1j) + assert_array_equal((A / 9).toarray(), A.toarray() / 9) + + def test_inplace_ops(self): + A = lil_matrix([[0, 2, 3], [4, 0, 6]]) + B = lil_matrix([[0, 1, 0], [0, 2, 3]]) + + data = {'add': (B, A + B), + 'sub': (B, A - B), + 'mul': (3, A * 3)} + + for op, (other, expected) in data.items(): + result = A.copy() + getattr(result, '__i%s__' % op)(other) + + assert_array_equal(result.toarray(), expected.toarray()) + + # Ticket 1604. + A = lil_matrix((1, 3), dtype=np.dtype('float64')) + B = array([0.1, 0.1, 0.1]) + A[0, :] += B + assert_array_equal(A[0, :].toarray().squeeze(), B) + + def test_lil_iteration(self): + row_data = [[1, 2, 3], [4, 5, 6]] + B = lil_matrix(array(row_data)) + for r, row in enumerate(B): + assert_array_equal(row.toarray(), array(row_data[r], ndmin=2)) + + def test_lil_from_csr(self): + # Tests whether a lil_matrix can be constructed from a + # csr_matrix. + B = lil_matrix((10, 10)) + B[0, 3] = 10 + B[5, 6] = 20 + B[8, 3] = 30 + B[3, 8] = 40 + B[8, 9] = 50 + C = B.tocsr() + D = lil_matrix(C) + assert_array_equal(C.toarray(), D.toarray()) + + def test_fancy_indexing_lil(self): + M = asmatrix(arange(25).reshape(5, 5)) + A = lil_matrix(M) + + assert_equal(A[array([1, 2, 3]), 2:3].toarray(), + M[array([1, 2, 3]), 2:3]) + + def test_point_wise_multiply(self): + l = lil_matrix((4, 3)) + l[0, 0] = 1 + l[1, 1] = 2 + l[2, 2] = 3 + l[3, 1] = 4 + + m = lil_matrix((4, 3)) + m[0, 0] = 1 + m[0, 1] = 2 + m[2, 2] = 3 + m[3, 1] = 4 + m[3, 2] = 4 + + assert_array_equal(l.multiply(m).toarray(), + m.multiply(l).toarray()) + + assert_array_equal(l.multiply(m).toarray(), + [[1, 0, 0], + [0, 0, 0], + [0, 0, 9], + [0, 16, 0]]) + + def test_lil_multiply_removal(self): + # Ticket #1427. + a = lil_matrix(np.ones((3, 3))) + a *= 2. + a[0, :] = 0 + + +TestLIL.init_class() + + +class TestCOO(sparse_test_class(getset=False, + slicing=False, slicing_assign=False, + fancy_indexing=False, fancy_assign=False)): + spcreator = coo_matrix + math_dtypes = [np.int_, np.float64, np.complex128] + + def test_constructor1(self): + # unsorted triplet format + row = array([2, 3, 1, 3, 0, 1, 3, 0, 2, 1, 2]) + col = array([0, 1, 0, 0, 1, 1, 2, 2, 2, 2, 1]) + data = array([6., 10., 3., 9., 1., 4., 11., 2., 8., 5., 7.]) + + coo = coo_matrix((data,(row,col)),(4,3)) + assert_array_equal(arange(12).reshape(4, 3), coo.toarray()) + + # using Python lists and a specified dtype + coo = coo_matrix(([2**63 + 1, 1], ([0, 1], [0, 1])), dtype=np.uint64) + dense = array([[2**63 + 1, 0], [0, 1]], dtype=np.uint64) + assert_array_equal(dense, coo.toarray()) + + def test_constructor2(self): + # unsorted triplet format with duplicates (which are summed) + row = array([0,1,2,2,2,2,0,0,2,2]) + col = array([0,2,0,2,1,1,1,0,0,2]) + data = array([2,9,-4,5,7,0,-1,2,1,-5]) + coo = coo_matrix((data,(row,col)),(3,3)) + + mat = array([[4, -1, 0], [0, 0, 9], [-3, 7, 0]]) + + assert_array_equal(mat, coo.toarray()) + + def test_constructor3(self): + # empty matrix + coo = coo_matrix((4,3)) + + assert_array_equal(coo.shape,(4,3)) + assert_array_equal(coo.row,[]) + assert_array_equal(coo.col,[]) + assert_array_equal(coo.data,[]) + assert_array_equal(coo.toarray(), zeros((4, 3))) + + def test_constructor4(self): + # from dense matrix + mat = array([[0,1,0,0], + [7,0,3,0], + [0,4,0,0]]) + coo = coo_matrix(mat) + assert_array_equal(coo.toarray(), mat) + + # upgrade rank 1 arrays to row matrix + mat = array([0,1,0,0]) + coo = coo_matrix(mat) + assert_array_equal(coo.toarray(), mat.reshape(1, -1)) + + # error if second arg interpreted as shape (gh-9919) + with pytest.raises(TypeError, match=r'object cannot be interpreted'): + coo_matrix([0, 11, 22, 33], ([0, 1, 2, 3], [0, 0, 0, 0])) + + # error if explicit shape arg doesn't match the dense matrix + with pytest.raises(ValueError, match=r'inconsistent shapes'): + coo_matrix([0, 11, 22, 33], shape=(4, 4)) + + def test_constructor_data_ij_dtypeNone(self): + data = [1] + coo = coo_matrix((data, ([0], [0])), dtype=None) + assert coo.dtype == np.array(data).dtype + + @pytest.mark.xfail(run=False, reason='COO does not have a __getitem__') + def test_iterator(self): + pass + + def test_todia_all_zeros(self): + zeros = [[0, 0]] + dia = coo_matrix(zeros).todia() + assert_array_equal(dia.toarray(), zeros) + + def test_sum_duplicates(self): + coo = coo_matrix((4,3)) + coo.sum_duplicates() + coo = coo_matrix(([1,2], ([1,0], [1,0]))) + coo.sum_duplicates() + assert_array_equal(coo.toarray(), [[2,0],[0,1]]) + coo = coo_matrix(([1,2], ([1,1], [1,1]))) + coo.sum_duplicates() + assert_array_equal(coo.toarray(), [[0,0],[0,3]]) + assert_array_equal(coo.row, [1]) + assert_array_equal(coo.col, [1]) + assert_array_equal(coo.data, [3]) + + def test_todok_duplicates(self): + coo = coo_matrix(([1,1,1,1], ([0,2,2,0], [0,1,1,0]))) + dok = coo.todok() + assert_array_equal(dok.toarray(), coo.toarray()) + + def test_eliminate_zeros(self): + data = array([1, 0, 0, 0, 2, 0, 3, 0]) + row = array([0, 0, 0, 1, 1, 1, 1, 1]) + col = array([1, 2, 3, 4, 5, 6, 7, 8]) + asp = coo_matrix((data, (row, col)), shape=(2,10)) + bsp = asp.copy() + asp.eliminate_zeros() + assert_((asp.data != 0).all()) + assert_array_equal(asp.toarray(), bsp.toarray()) + + def test_reshape_copy(self): + arr = [[0, 10, 0, 0], [0, 0, 0, 0], [0, 20, 30, 40]] + new_shape = (2, 6) + x = coo_matrix(arr) + + y = x.reshape(new_shape) + assert_(y.data is x.data) + + y = x.reshape(new_shape, copy=False) + assert_(y.data is x.data) + + y = x.reshape(new_shape, copy=True) + assert_(not np.may_share_memory(y.data, x.data)) + + def test_large_dimensions_reshape(self): + # Test that reshape is immune to integer overflow when number of elements + # exceeds 2^31-1 + mat1 = coo_matrix(([1], ([3000000], [1000])), (3000001, 1001)) + mat2 = coo_matrix(([1], ([1000], [3000000])), (1001, 3000001)) + + # assert_array_equal is slow for big matrices because it expects dense + # Using __ne__ and nnz instead + assert_((mat1.reshape((1001, 3000001), order='C') != mat2).nnz == 0) + assert_((mat2.reshape((3000001, 1001), order='F') != mat1).nnz == 0) + + +TestCOO.init_class() + + +class TestDIA(sparse_test_class(getset=False, slicing=False, slicing_assign=False, + fancy_indexing=False, fancy_assign=False, + minmax=False, nnz_axis=False)): + spcreator = dia_matrix + math_dtypes = [np.int_, np.float64, np.complex128] + + def test_constructor1(self): + D = array([[1, 0, 3, 0], + [1, 2, 0, 4], + [0, 2, 3, 0], + [0, 0, 3, 4]]) + data = np.array([[1,2,3,4]]).repeat(3,axis=0) + offsets = np.array([0,-1,2]) + assert_equal(dia_matrix((data, offsets), shape=(4, 4)).toarray(), D) + + @pytest.mark.xfail(run=False, reason='DIA does not have a __getitem__') + def test_iterator(self): + pass + + @with_64bit_maxval_limit(3) + def test_setdiag_dtype(self): + m = dia_matrix(np.eye(3)) + assert_equal(m.offsets.dtype, np.int32) + m.setdiag((3,), k=2) + assert_equal(m.offsets.dtype, np.int32) + + m = dia_matrix(np.eye(4)) + assert_equal(m.offsets.dtype, np.int64) + m.setdiag((3,), k=3) + assert_equal(m.offsets.dtype, np.int64) + + @pytest.mark.skip(reason='DIA stores extra zeros') + def test_getnnz_axis(self): + pass + + def test_convert_gh14555(self): + # regression test for gh-14555 + m = dia_matrix(([[1, 1, 0]], [-1]), shape=(4, 2)) + expected = m.toarray() + assert_array_equal(m.tocsc().toarray(), expected) + assert_array_equal(m.tocsr().toarray(), expected) + + def test_tocoo_gh10050(self): + # regression test for gh-10050 + m = dia_matrix([[1, 2], [3, 4]]).tocoo() + flat_inds = np.ravel_multi_index((m.row, m.col), m.shape) + inds_are_sorted = np.all(np.diff(flat_inds) > 0) + assert m.has_canonical_format == inds_are_sorted + + def test_tocoo_tocsr_tocsc_gh19245(self): + # test index_dtype with tocoo, tocsr, tocsc + data = np.array([[1, 2, 3, 4]]).repeat(3, axis=0) + offsets = np.array([0, -1, 2], dtype=np.int32) + dia = sparse.dia_array((data, offsets), shape=(4, 4)) + + coo = dia.tocoo() + assert coo.col.dtype == np.int32 + csr = dia.tocsr() + assert csr.indices.dtype == np.int32 + csc = dia.tocsc() + assert csc.indices.dtype == np.int32 + + +TestDIA.init_class() + + +class TestBSR(sparse_test_class(getset=False, + slicing=False, slicing_assign=False, + fancy_indexing=False, fancy_assign=False, + nnz_axis=False)): + spcreator = bsr_matrix + math_dtypes = [np.int_, np.float64, np.complex128] + + def test_constructor1(self): + # check native BSR format constructor + indptr = array([0,2,2,4]) + indices = array([0,2,2,3]) + data = zeros((4,2,3)) + + data[0] = array([[0, 1, 2], + [3, 0, 5]]) + data[1] = array([[0, 2, 4], + [6, 0, 10]]) + data[2] = array([[0, 4, 8], + [12, 0, 20]]) + data[3] = array([[0, 5, 10], + [15, 0, 25]]) + + A = kron([[1,0,2,0],[0,0,0,0],[0,0,4,5]], [[0,1,2],[3,0,5]]) + Asp = bsr_matrix((data,indices,indptr),shape=(6,12)) + assert_equal(Asp.toarray(), A) + + # infer shape from arrays + Asp = bsr_matrix((data,indices,indptr)) + assert_equal(Asp.toarray(), A) + + def test_constructor2(self): + # construct from dense + + # test zero mats + for shape in [(1,1), (5,1), (1,10), (10,4), (3,7), (2,1)]: + A = zeros(shape) + assert_equal(bsr_matrix(A).toarray(), A) + A = zeros((4,6)) + assert_equal(bsr_matrix(A, blocksize=(2, 2)).toarray(), A) + assert_equal(bsr_matrix(A, blocksize=(2, 3)).toarray(), A) + + A = kron([[1,0,2,0],[0,0,0,0],[0,0,4,5]], [[0,1,2],[3,0,5]]) + assert_equal(bsr_matrix(A).toarray(), A) + assert_equal(bsr_matrix(A, shape=(6, 12)).toarray(), A) + assert_equal(bsr_matrix(A, blocksize=(1, 1)).toarray(), A) + assert_equal(bsr_matrix(A, blocksize=(2, 3)).toarray(), A) + assert_equal(bsr_matrix(A, blocksize=(2, 6)).toarray(), A) + assert_equal(bsr_matrix(A, blocksize=(2, 12)).toarray(), A) + assert_equal(bsr_matrix(A, blocksize=(3, 12)).toarray(), A) + assert_equal(bsr_matrix(A, blocksize=(6, 12)).toarray(), A) + + A = kron([[1,0,2,0],[0,1,0,0],[0,0,0,0]], [[0,1,2],[3,0,5]]) + assert_equal(bsr_matrix(A, blocksize=(2, 3)).toarray(), A) + + def test_constructor3(self): + # construct from coo-like (data,(row,col)) format + arg = ([1,2,3], ([0,1,1], [0,0,1])) + A = array([[1,0],[2,3]]) + assert_equal(bsr_matrix(arg, blocksize=(2, 2)).toarray(), A) + + def test_constructor4(self): + # regression test for gh-6292: bsr_matrix((data, indices, indptr)) was + # trying to compare an int to a None + n = 8 + data = np.ones((n, n, 1), dtype=np.int8) + indptr = np.array([0, n], dtype=np.int32) + indices = np.arange(n, dtype=np.int32) + bsr_matrix((data, indices, indptr), blocksize=(n, 1), copy=False) + + def test_constructor5(self): + # check for validations introduced in gh-13400 + n = 8 + data_1dim = np.ones(n) + data = np.ones((n, n, n)) + indptr = np.array([0, n]) + indices = np.arange(n) + + with assert_raises(ValueError): + # data ndim check + bsr_matrix((data_1dim, indices, indptr)) + + with assert_raises(ValueError): + # invalid blocksize + bsr_matrix((data, indices, indptr), blocksize=(1, 1, 1)) + + with assert_raises(ValueError): + # mismatching blocksize + bsr_matrix((data, indices, indptr), blocksize=(1, 1)) + + def test_default_dtype(self): + # As a numpy array, `values` has shape (2, 2, 1). + values = [[[1], [1]], [[1], [1]]] + indptr = np.array([0, 2], dtype=np.int32) + indices = np.array([0, 1], dtype=np.int32) + b = bsr_matrix((values, indices, indptr), blocksize=(2, 1)) + assert b.dtype == np.array(values).dtype + + def test_bsr_tocsr(self): + # check native conversion from BSR to CSR + indptr = array([0, 2, 2, 4]) + indices = array([0, 2, 2, 3]) + data = zeros((4, 2, 3)) + + data[0] = array([[0, 1, 2], + [3, 0, 5]]) + data[1] = array([[0, 2, 4], + [6, 0, 10]]) + data[2] = array([[0, 4, 8], + [12, 0, 20]]) + data[3] = array([[0, 5, 10], + [15, 0, 25]]) + + A = kron([[1, 0, 2, 0], [0, 0, 0, 0], [0, 0, 4, 5]], + [[0, 1, 2], [3, 0, 5]]) + Absr = bsr_matrix((data, indices, indptr), shape=(6, 12)) + Acsr = Absr.tocsr() + Acsr_via_coo = Absr.tocoo().tocsr() + assert_equal(Acsr.toarray(), A) + assert_equal(Acsr.toarray(), Acsr_via_coo.toarray()) + + def test_eliminate_zeros(self): + data = kron([1, 0, 0, 0, 2, 0, 3, 0], [[1,1],[1,1]]).T + data = data.reshape(-1,2,2) + indices = array([1, 2, 3, 4, 5, 6, 7, 8]) + indptr = array([0, 3, 8]) + asp = bsr_matrix((data, indices, indptr), shape=(4,20)) + bsp = asp.copy() + asp.eliminate_zeros() + assert_array_equal(asp.nnz, 3*4) + assert_array_equal(asp.toarray(), bsp.toarray()) + + # github issue #9687 + def test_eliminate_zeros_all_zero(self): + np.random.seed(0) + m = bsr_matrix(np.random.random((12, 12)), blocksize=(2, 3)) + + # eliminate some blocks, but not all + m.data[m.data <= 0.9] = 0 + m.eliminate_zeros() + assert_equal(m.nnz, 66) + assert_array_equal(m.data.shape, (11, 2, 3)) + + # eliminate all remaining blocks + m.data[m.data <= 1.0] = 0 + m.eliminate_zeros() + assert_equal(m.nnz, 0) + assert_array_equal(m.data.shape, (0, 2, 3)) + assert_array_equal(m.toarray(), np.zeros((12, 12))) + + # test fast path + m.eliminate_zeros() + assert_equal(m.nnz, 0) + assert_array_equal(m.data.shape, (0, 2, 3)) + assert_array_equal(m.toarray(), np.zeros((12, 12))) + + def test_bsr_matvec(self): + A = bsr_matrix(arange(2*3*4*5).reshape(2*4,3*5), blocksize=(4,5)) + x = arange(A.shape[1]).reshape(-1,1) + assert_equal(A*x, A.toarray() @ x) + + def test_bsr_matvecs(self): + A = bsr_matrix(arange(2*3*4*5).reshape(2*4,3*5), blocksize=(4,5)) + x = arange(A.shape[1]*6).reshape(-1,6) + assert_equal(A*x, A.toarray() @ x) + + @pytest.mark.xfail(run=False, reason='BSR does not have a __getitem__') + def test_iterator(self): + pass + + @pytest.mark.xfail(run=False, reason='BSR does not have a __setitem__') + def test_setdiag(self): + pass + + def test_resize_blocked(self): + # test resize() with non-(1,1) blocksize + D = np.array([[1, 0, 3, 4], + [2, 0, 0, 0], + [3, 0, 0, 0]]) + S = self.spcreator(D, blocksize=(1, 2)) + assert_(S.resize((3, 2)) is None) + assert_array_equal(S.toarray(), [[1, 0], + [2, 0], + [3, 0]]) + S.resize((2, 2)) + assert_array_equal(S.toarray(), [[1, 0], + [2, 0]]) + S.resize((3, 2)) + assert_array_equal(S.toarray(), [[1, 0], + [2, 0], + [0, 0]]) + S.resize((3, 4)) + assert_array_equal(S.toarray(), [[1, 0, 0, 0], + [2, 0, 0, 0], + [0, 0, 0, 0]]) + assert_raises(ValueError, S.resize, (2, 3)) + + @pytest.mark.xfail(run=False, reason='BSR does not have a __setitem__') + def test_setdiag_comprehensive(self): + pass + + @pytest.mark.skipif(IS_COLAB, reason="exceeds memory limit") + def test_scalar_idx_dtype(self): + # Check that index dtype takes into account all parameters + # passed to sparsetools, including the scalar ones + indptr = np.zeros(2, dtype=np.int32) + indices = np.zeros(0, dtype=np.int32) + vals = np.zeros((0, 1, 1)) + a = bsr_matrix((vals, indices, indptr), shape=(1, 2**31-1)) + b = bsr_matrix((vals, indices, indptr), shape=(1, 2**31)) + c = bsr_matrix((1, 2**31-1)) + d = bsr_matrix((1, 2**31)) + assert_equal(a.indptr.dtype, np.int32) + assert_equal(b.indptr.dtype, np.int64) + assert_equal(c.indptr.dtype, np.int32) + assert_equal(d.indptr.dtype, np.int64) + + try: + vals2 = np.zeros((0, 1, 2**31-1)) + vals3 = np.zeros((0, 1, 2**31)) + e = bsr_matrix((vals2, indices, indptr), shape=(1, 2**31-1)) + f = bsr_matrix((vals3, indices, indptr), shape=(1, 2**31)) + assert_equal(e.indptr.dtype, np.int32) + assert_equal(f.indptr.dtype, np.int64) + except (MemoryError, ValueError): + # May fail on 32-bit Python + e = 0 + f = 0 + + # These shouldn't fail + for x in [a, b, c, d, e, f]: + x + x + + +TestBSR.init_class() + + +#------------------------------------------------------------------------------ +# Tests for non-canonical representations (with duplicates, unsorted indices) +#------------------------------------------------------------------------------ + +def _same_sum_duplicate(data, *inds, **kwargs): + """Duplicates entries to produce the same matrix""" + indptr = kwargs.pop('indptr', None) + if np.issubdtype(data.dtype, np.bool_) or \ + np.issubdtype(data.dtype, np.unsignedinteger): + if indptr is None: + return (data,) + inds + else: + return (data,) + inds + (indptr,) + + zeros_pos = (data == 0).nonzero() + + # duplicate data + data = data.repeat(2, axis=0) + data[::2] -= 1 + data[1::2] = 1 + + # don't spoil all explicit zeros + if zeros_pos[0].size > 0: + pos = tuple(p[0] for p in zeros_pos) + pos1 = (2*pos[0],) + pos[1:] + pos2 = (2*pos[0]+1,) + pos[1:] + data[pos1] = 0 + data[pos2] = 0 + + inds = tuple(indices.repeat(2) for indices in inds) + + if indptr is None: + return (data,) + inds + else: + return (data,) + inds + (indptr * 2,) + + +class _NonCanonicalMixin: + def spcreator(self, D, sorted_indices=False, **kwargs): + """Replace D with a non-canonical equivalent: containing + duplicate elements and explicit zeros""" + construct = super().spcreator + M = construct(D, **kwargs) + + zero_pos = (M.toarray() == 0).nonzero() + has_zeros = (zero_pos[0].size > 0) + if has_zeros: + k = zero_pos[0].size//2 + with suppress_warnings() as sup: + sup.filter( + SparseEfficiencyWarning, + "Changing the sparsity structure of a cs[cr]_matrix is expensive" + ) + M = self._insert_explicit_zero(M, zero_pos[0][k], zero_pos[1][k]) + + arg1 = self._arg1_for_noncanonical(M, sorted_indices) + if 'shape' not in kwargs: + kwargs['shape'] = M.shape + NC = construct(arg1, **kwargs) + + # check that result is valid + if NC.dtype in [np.float32, np.complex64]: + # For single-precision floats, the differences between M and NC + # that are introduced by the extra operations involved in the + # construction of NC necessitate a more lenient tolerance level + # than the default. + rtol = 1e-05 + else: + rtol = 1e-07 + assert_allclose(NC.toarray(), M.toarray(), rtol=rtol) + + # check that at least one explicit zero + if has_zeros: + assert_((NC.data == 0).any()) + # TODO check that NC has duplicates (which are not explicit zeros) + + return NC + + @pytest.mark.skip(reason='bool(matrix) counts explicit zeros') + def test_bool(self): + pass + + @pytest.mark.skip(reason='getnnz-axis counts explicit zeros') + def test_getnnz_axis(self): + pass + + @pytest.mark.skip(reason='nnz counts explicit zeros') + def test_empty(self): + pass + + +class _NonCanonicalCompressedMixin(_NonCanonicalMixin): + def _arg1_for_noncanonical(self, M, sorted_indices=False): + """Return non-canonical constructor arg1 equivalent to M""" + data, indices, indptr = _same_sum_duplicate(M.data, M.indices, + indptr=M.indptr) + if not sorted_indices: + for start, stop in zip(indptr, indptr[1:]): + indices[start:stop] = indices[start:stop][::-1].copy() + data[start:stop] = data[start:stop][::-1].copy() + return data, indices, indptr + + def _insert_explicit_zero(self, M, i, j): + M[i,j] = 0 + return M + + +class _NonCanonicalCSMixin(_NonCanonicalCompressedMixin): + def test_getelement(self): + def check(dtype, sorted_indices): + D = array([[1,0,0], + [4,3,0], + [0,2,0], + [0,0,0]], dtype=dtype) + A = self.spcreator(D, sorted_indices=sorted_indices) + + M,N = D.shape + + for i in range(-M, M): + for j in range(-N, N): + assert_equal(A[i,j], D[i,j]) + + for ij in [(0,3),(-1,3),(4,0),(4,3),(4,-1), (1, 2, 3)]: + assert_raises((IndexError, TypeError), A.__getitem__, ij) + + for dtype in supported_dtypes: + for sorted_indices in [False, True]: + check(np.dtype(dtype), sorted_indices) + + def test_setitem_sparse(self): + D = np.eye(3) + A = self.spcreator(D) + B = self.spcreator([[1,2,3]]) + + D[1,:] = B.toarray() + with suppress_warnings() as sup: + sup.filter( + SparseEfficiencyWarning, + "Changing the sparsity structure of a cs[cr]_matrix is expensive" + ) + A[1,:] = B + assert_array_equal(A.toarray(), D) + + D[:,2] = B.toarray().ravel() + with suppress_warnings() as sup: + sup.filter( + SparseEfficiencyWarning, + "Changing the sparsity structure of a cs[cr]_matrix is expensive" + ) + A[:,2] = B.T + assert_array_equal(A.toarray(), D) + + @pytest.mark.xfail(run=False, reason='inverse broken with non-canonical matrix') + def test_inv(self): + pass + + @pytest.mark.xfail(run=False, reason='solve broken with non-canonical matrix') + def test_solve(self): + pass + + +class TestCSRNonCanonical(_NonCanonicalCSMixin, TestCSR): + pass + + +class TestCSCNonCanonical(_NonCanonicalCSMixin, TestCSC): + pass + + +class TestBSRNonCanonical(_NonCanonicalCompressedMixin, TestBSR): + def _insert_explicit_zero(self, M, i, j): + x = M.tocsr() + x[i,j] = 0 + return x.tobsr(blocksize=M.blocksize) + + @pytest.mark.xfail(run=False, reason='diagonal broken with non-canonical BSR') + def test_diagonal(self): + pass + + @pytest.mark.xfail(run=False, reason='expm broken with non-canonical BSR') + def test_expm(self): + pass + + +class TestCOONonCanonical(_NonCanonicalMixin, TestCOO): + def _arg1_for_noncanonical(self, M, sorted_indices=None): + """Return non-canonical constructor arg1 equivalent to M""" + data, row, col = _same_sum_duplicate(M.data, M.row, M.col) + return data, (row, col) + + def _insert_explicit_zero(self, M, i, j): + M.data = np.r_[M.data.dtype.type(0), M.data] + M.row = np.r_[M.row.dtype.type(i), M.row] + M.col = np.r_[M.col.dtype.type(j), M.col] + return M + + def test_setdiag_noncanonical(self): + m = self.spcreator(np.eye(3)) + m.sum_duplicates() + m.setdiag([3, 2], k=1) + m.sum_duplicates() + assert_(np.all(np.diff(m.col) >= 0)) + + +def cases_64bit(): + TEST_CLASSES = [TestBSR, TestCOO, TestCSC, TestCSR, TestDIA, + # lil/dok->other conversion operations have get_index_dtype + TestDOK, TestLIL + ] + + # The following features are missing, so skip the tests: + SKIP_TESTS = { + 'test_expm': 'expm for 64-bit indices not available', + 'test_inv': 'linsolve for 64-bit indices not available', + 'test_solve': 'linsolve for 64-bit indices not available', + 'test_scalar_idx_dtype': 'test implemented in base class', + 'test_large_dimensions_reshape': 'test actually requires 64-bit to work', + 'test_constructor_smallcol': 'test verifies int32 indexes', + 'test_constructor_largecol': 'test verifies int64 indexes', + 'test_tocoo_tocsr_tocsc_gh19245': 'test verifies int32 indexes', + } + + for cls in TEST_CLASSES: + for method_name in sorted(dir(cls)): + method = getattr(cls, method_name) + if (method_name.startswith('test_') and + not getattr(method, 'slow', False)): + marks = [] + + msg = SKIP_TESTS.get(method_name) + if bool(msg): + marks += [pytest.mark.skip(reason=msg)] + + if _pep440.parse(pytest.__version__) >= _pep440.Version("3.6.0"): + markers = getattr(method, 'pytestmark', []) + for mark in markers: + if mark.name in ('skipif', 'skip', 'xfail', 'xslow'): + marks.append(mark) + else: + for mname in ['skipif', 'skip', 'xfail', 'xslow']: + if hasattr(method, mname): + marks += [getattr(method, mname)] + + yield pytest.param(cls, method_name, marks=marks) + + +class Test64Bit: + MAT_CLASSES = [bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dia_matrix] + + def _create_some_matrix(self, mat_cls, m, n): + return mat_cls(np.random.rand(m, n)) + + def _compare_index_dtype(self, m, dtype): + dtype = np.dtype(dtype) + if isinstance(m, (csc_matrix, csr_matrix, bsr_matrix)): + return (m.indices.dtype == dtype) and (m.indptr.dtype == dtype) + elif isinstance(m, coo_matrix): + return (m.row.dtype == dtype) and (m.col.dtype == dtype) + elif isinstance(m, dia_matrix): + return (m.offsets.dtype == dtype) + else: + raise ValueError(f"matrix {m!r} has no integer indices") + + def test_decorator_maxval_limit(self): + # Test that the with_64bit_maxval_limit decorator works + + @with_64bit_maxval_limit(maxval_limit=10) + def check(mat_cls): + m = mat_cls(np.random.rand(10, 1)) + assert_(self._compare_index_dtype(m, np.int32)) + m = mat_cls(np.random.rand(11, 1)) + assert_(self._compare_index_dtype(m, np.int64)) + + for mat_cls in self.MAT_CLASSES: + check(mat_cls) + + def test_decorator_maxval_random(self): + # Test that the with_64bit_maxval_limit decorator works (2) + + @with_64bit_maxval_limit(random=True) + def check(mat_cls): + seen_32 = False + seen_64 = False + for k in range(100): + m = self._create_some_matrix(mat_cls, 9, 9) + seen_32 = seen_32 or self._compare_index_dtype(m, np.int32) + seen_64 = seen_64 or self._compare_index_dtype(m, np.int64) + if seen_32 and seen_64: + break + else: + raise AssertionError("both 32 and 64 bit indices not seen") + + for mat_cls in self.MAT_CLASSES: + check(mat_cls) + + def _check_resiliency(self, cls, method_name, **kw): + # Resiliency test, to check that sparse matrices deal reasonably + # with varying index data types. + + @with_64bit_maxval_limit(**kw) + def check(cls, method_name): + instance = cls() + if hasattr(instance, 'setup_method'): + instance.setup_method() + try: + getattr(instance, method_name)() + finally: + if hasattr(instance, 'teardown_method'): + instance.teardown_method() + + check(cls, method_name) + + @pytest.mark.parametrize('cls,method_name', cases_64bit()) + def test_resiliency_limit_10(self, cls, method_name): + self._check_resiliency(cls, method_name, maxval_limit=10) + + @pytest.mark.parametrize('cls,method_name', cases_64bit()) + def test_resiliency_random(self, cls, method_name): + # bsr_matrix.eliminate_zeros relies on csr_matrix constructor + # not making copies of index arrays --- this is not + # necessarily true when we pick the index data type randomly + self._check_resiliency(cls, method_name, random=True) + + @pytest.mark.parametrize('cls,method_name', cases_64bit()) + def test_resiliency_all_32(self, cls, method_name): + self._check_resiliency(cls, method_name, fixed_dtype=np.int32) + + @pytest.mark.parametrize('cls,method_name', cases_64bit()) + def test_resiliency_all_64(self, cls, method_name): + self._check_resiliency(cls, method_name, fixed_dtype=np.int64) + + @pytest.mark.parametrize('cls,method_name', cases_64bit()) + def test_no_64(self, cls, method_name): + self._check_resiliency(cls, method_name, assert_32bit=True) + + def test_downcast_intp(self): + # Check that bincount and ufunc.reduceat intp downcasts are + # dealt with. The point here is to trigger points in the code + # that can fail on 32-bit systems when using 64-bit indices, + # due to use of functions that only work with intp-size + # indices. + + @with_64bit_maxval_limit(fixed_dtype=np.int64, + downcast_maxval=1) + def check_limited(): + # These involve indices larger than `downcast_maxval` + a = csc_matrix([[1, 2], [3, 4], [5, 6]]) + assert_raises(AssertionError, a.getnnz, axis=1) + assert_raises(AssertionError, a.sum, axis=0) + + a = csr_matrix([[1, 2, 3], [3, 4, 6]]) + assert_raises(AssertionError, a.getnnz, axis=0) + + a = coo_matrix([[1, 2, 3], [3, 4, 5]]) + assert_raises(AssertionError, a.getnnz, axis=0) + + @with_64bit_maxval_limit(fixed_dtype=np.int64) + def check_unlimited(): + # These involve indices larger than `downcast_maxval` + a = csc_matrix([[1, 2], [3, 4], [5, 6]]) + a.getnnz(axis=1) + a.sum(axis=0) + + a = csr_matrix([[1, 2, 3], [3, 4, 6]]) + a.getnnz(axis=0) + + a = coo_matrix([[1, 2, 3], [3, 4, 5]]) + a.getnnz(axis=0) + + check_limited() + check_unlimited() diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_common1d.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_common1d.py new file mode 100644 index 0000000000000000000000000000000000000000..a80d26d739299b22508ad73de5bdd25474145e5d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_common1d.py @@ -0,0 +1,441 @@ +"""Test of 1D aspects of sparse array classes""" + +import pytest + +import numpy as np + +import scipy as sp +from scipy.sparse._sputils import supported_dtypes, matrix +from scipy._lib._util import ComplexWarning + + +sup_complex = np.testing.suppress_warnings() +sup_complex.filter(ComplexWarning) + + +spcreators = [sp.sparse.coo_array, sp.sparse.dok_array] +math_dtypes = [np.int64, np.float64, np.complex128] + + +@pytest.fixture +def dat1d(): + return np.array([3, 0, 1, 0], 'd') + + +@pytest.fixture +def datsp_math_dtypes(dat1d): + dat_dtypes = {dtype: dat1d.astype(dtype) for dtype in math_dtypes} + return { + sp: [(dtype, dat, sp(dat)) for dtype, dat in dat_dtypes.items()] + for sp in spcreators + } + + +@pytest.mark.parametrize("spcreator", spcreators) +class TestCommon1D: + """test common functionality shared by 1D sparse formats""" + + def test_create_empty(self, spcreator): + assert np.array_equal(spcreator((3,)).toarray(), np.zeros(3)) + assert np.array_equal(spcreator((3,)).nnz, 0) + assert np.array_equal(spcreator((3,)).count_nonzero(), 0) + + def test_invalid_shapes(self, spcreator): + with pytest.raises(ValueError, match='elements cannot be negative'): + spcreator((-3,)) + + def test_repr(self, spcreator, dat1d): + repr(spcreator(dat1d)) + + def test_str(self, spcreator, dat1d): + str(spcreator(dat1d)) + + def test_neg(self, spcreator): + A = np.array([-1, 0, 17, 0, -5, 0, 1, -4, 0, 0, 0, 0], 'd') + assert np.array_equal(-A, (-spcreator(A)).toarray()) + + def test_reshape_1d_tofrom_row_or_column(self, spcreator): + # add a dimension 1d->2d + x = spcreator([1, 0, 7, 0, 0, 0, 0, -3, 0, 0, 0, 5]) + y = x.reshape(1, 12) + desired = [[1, 0, 7, 0, 0, 0, 0, -3, 0, 0, 0, 5]] + assert np.array_equal(y.toarray(), desired) + + # remove a size-1 dimension 2d->1d + x = spcreator(desired) + y = x.reshape(12) + assert np.array_equal(y.toarray(), desired[0]) + y2 = x.reshape((12,)) + assert y.shape == y2.shape + + # make a 2d column into 1d. 2d->1d + y = x.T.reshape(12) + assert np.array_equal(y.toarray(), desired[0]) + + def test_reshape(self, spcreator): + x = spcreator([1, 0, 7, 0, 0, 0, 0, -3, 0, 0, 0, 5]) + y = x.reshape((4, 3)) + desired = [[1, 0, 7], [0, 0, 0], [0, -3, 0], [0, 0, 5]] + assert np.array_equal(y.toarray(), desired) + + y = x.reshape((12,)) + assert y is x + + y = x.reshape(12) + assert np.array_equal(y.toarray(), x.toarray()) + + def test_sum(self, spcreator): + np.random.seed(1234) + dat_1 = np.array([0, 1, 2, 3, -4, 5, -6, 7, 9]) + dat_2 = np.random.rand(5) + dat_3 = np.array([]) + dat_4 = np.zeros((40,)) + arrays = [dat_1, dat_2, dat_3, dat_4] + + for dat in arrays: + datsp = spcreator(dat) + with np.errstate(over='ignore'): + assert np.isscalar(datsp.sum()) + assert np.allclose(dat.sum(), datsp.sum()) + assert np.allclose(dat.sum(axis=None), datsp.sum(axis=None)) + assert np.allclose(dat.sum(axis=0), datsp.sum(axis=0)) + assert np.allclose(dat.sum(axis=-1), datsp.sum(axis=-1)) + + # test `out` parameter + datsp.sum(axis=0, out=np.zeros(())) + + def test_sum_invalid_params(self, spcreator): + out = np.zeros((3,)) # wrong size for out + dat = np.array([0, 1, 2]) + datsp = spcreator(dat) + + with pytest.raises(ValueError, match='axis must be None, -1 or 0'): + datsp.sum(axis=1) + with pytest.raises(TypeError, match='Tuples are not accepted'): + datsp.sum(axis=(0, 1)) + with pytest.raises(TypeError, match='axis must be an integer'): + datsp.sum(axis=1.5) + with pytest.raises(ValueError, match='dimensions do not match'): + datsp.sum(axis=0, out=out) + + def test_numpy_sum(self, spcreator): + dat = np.array([0, 1, 2]) + datsp = spcreator(dat) + + dat_sum = np.sum(dat) + datsp_sum = np.sum(datsp) + + assert np.allclose(dat_sum, datsp_sum) + + def test_mean(self, spcreator): + dat = np.array([0, 1, 2]) + datsp = spcreator(dat) + + assert np.allclose(dat.mean(), datsp.mean()) + assert np.isscalar(datsp.mean(axis=None)) + assert np.allclose(dat.mean(axis=None), datsp.mean(axis=None)) + assert np.allclose(dat.mean(axis=0), datsp.mean(axis=0)) + assert np.allclose(dat.mean(axis=-1), datsp.mean(axis=-1)) + + with pytest.raises(ValueError, match='axis'): + datsp.mean(axis=1) + with pytest.raises(ValueError, match='axis'): + datsp.mean(axis=-2) + + def test_mean_invalid_params(self, spcreator): + out = np.asarray(np.zeros((1, 3))) + dat = np.array([[0, 1, 2], [3, -4, 5], [-6, 7, 9]]) + + if spcreator._format == 'uni': + with pytest.raises(ValueError, match='zq'): + spcreator(dat) + return + + datsp = spcreator(dat) + with pytest.raises(ValueError, match='axis out of range'): + datsp.mean(axis=3) + with pytest.raises(TypeError, match='Tuples are not accepted'): + datsp.mean(axis=(0, 1)) + with pytest.raises(TypeError, match='axis must be an integer'): + datsp.mean(axis=1.5) + with pytest.raises(ValueError, match='dimensions do not match'): + datsp.mean(axis=1, out=out) + + def test_sum_dtype(self, spcreator): + dat = np.array([0, 1, 2]) + datsp = spcreator(dat) + + for dtype in supported_dtypes: + dat_sum = dat.sum(dtype=dtype) + datsp_sum = datsp.sum(dtype=dtype) + + assert np.allclose(dat_sum, datsp_sum) + assert np.array_equal(dat_sum.dtype, datsp_sum.dtype) + + def test_mean_dtype(self, spcreator): + dat = np.array([0, 1, 2]) + datsp = spcreator(dat) + + for dtype in supported_dtypes: + dat_mean = dat.mean(dtype=dtype) + datsp_mean = datsp.mean(dtype=dtype) + + assert np.allclose(dat_mean, datsp_mean) + assert np.array_equal(dat_mean.dtype, datsp_mean.dtype) + + def test_mean_out(self, spcreator): + dat = np.array([0, 1, 2]) + datsp = spcreator(dat) + + dat_out = np.array([0]) + datsp_out = np.array([0]) + + dat.mean(out=dat_out, keepdims=True) + datsp.mean(out=datsp_out) + assert np.allclose(dat_out, datsp_out) + + dat.mean(axis=0, out=dat_out, keepdims=True) + datsp.mean(axis=0, out=datsp_out) + assert np.allclose(dat_out, datsp_out) + + def test_numpy_mean(self, spcreator): + dat = np.array([0, 1, 2]) + datsp = spcreator(dat) + + dat_mean = np.mean(dat) + datsp_mean = np.mean(datsp) + + assert np.allclose(dat_mean, datsp_mean) + assert np.array_equal(dat_mean.dtype, datsp_mean.dtype) + + @sup_complex + def test_from_array(self, spcreator): + A = np.array([2, 3, 4]) + assert np.array_equal(spcreator(A).toarray(), A) + + A = np.array([1.0 + 3j, 0, -1]) + assert np.array_equal(spcreator(A).toarray(), A) + assert np.array_equal(spcreator(A, dtype='int16').toarray(), A.astype('int16')) + + @sup_complex + def test_from_list(self, spcreator): + A = [2, 3, 4] + assert np.array_equal(spcreator(A).toarray(), A) + + A = [1.0 + 3j, 0, -1] + assert np.array_equal(spcreator(A).toarray(), np.array(A)) + assert np.array_equal( + spcreator(A, dtype='int16').toarray(), np.array(A).astype('int16') + ) + + @sup_complex + def test_from_sparse(self, spcreator): + D = np.array([1, 0, 0]) + S = sp.sparse.coo_array(D) + assert np.array_equal(spcreator(S).toarray(), D) + S = spcreator(D) + assert np.array_equal(spcreator(S).toarray(), D) + + D = np.array([1.0 + 3j, 0, -1]) + S = sp.sparse.coo_array(D) + assert np.array_equal(spcreator(S).toarray(), D) + assert np.array_equal(spcreator(S, dtype='int16').toarray(), D.astype('int16')) + S = spcreator(D) + assert np.array_equal(spcreator(S).toarray(), D) + assert np.array_equal(spcreator(S, dtype='int16').toarray(), D.astype('int16')) + + def test_toarray(self, spcreator, dat1d): + datsp = spcreator(dat1d) + # Check C- or F-contiguous (default). + chk = datsp.toarray() + assert np.array_equal(chk, dat1d) + assert chk.flags.c_contiguous == chk.flags.f_contiguous + + # Check C-contiguous (with arg). + chk = datsp.toarray(order='C') + assert np.array_equal(chk, dat1d) + assert chk.flags.c_contiguous + assert chk.flags.f_contiguous + + # Check F-contiguous (with arg). + chk = datsp.toarray(order='F') + assert np.array_equal(chk, dat1d) + assert chk.flags.c_contiguous + assert chk.flags.f_contiguous + + # Check with output arg. + out = np.zeros(datsp.shape, dtype=datsp.dtype) + datsp.toarray(out=out) + assert np.array_equal(out, dat1d) + + # Check that things are fine when we don't initialize with zeros. + out[...] = 1.0 + datsp.toarray(out=out) + assert np.array_equal(out, dat1d) + + # np.dot does not work with sparse matrices (unless scalars) + # so this is testing whether dat1d matches datsp.toarray() + a = np.array([1.0, 2.0, 3.0, 4.0]) + dense_dot_dense = np.dot(a, dat1d) + check = np.dot(a, datsp.toarray()) + assert np.array_equal(dense_dot_dense, check) + + b = np.array([1.0, 2.0, 3.0, 4.0]) + dense_dot_dense = np.dot(dat1d, b) + check = np.dot(datsp.toarray(), b) + assert np.array_equal(dense_dot_dense, check) + + # Check bool data works. + spbool = spcreator(dat1d, dtype=bool) + arrbool = dat1d.astype(bool) + assert np.array_equal(spbool.toarray(), arrbool) + + def test_add(self, spcreator, datsp_math_dtypes): + for dtype, dat, datsp in datsp_math_dtypes[spcreator]: + a = dat.copy() + a[0] = 2.0 + b = datsp + c = b + a + assert np.array_equal(c, b.toarray() + a) + + # test broadcasting + # Note: cant add nonzero scalar to sparray. Can add len 1 array + c = b + a[0:1] + assert np.array_equal(c, b.toarray() + a[0]) + + def test_radd(self, spcreator, datsp_math_dtypes): + for dtype, dat, datsp in datsp_math_dtypes[spcreator]: + a = dat.copy() + a[0] = 2.0 + b = datsp + c = a + b + assert np.array_equal(c, a + b.toarray()) + + def test_rsub(self, spcreator, datsp_math_dtypes): + for dtype, dat, datsp in datsp_math_dtypes[spcreator]: + if dtype == np.dtype('bool'): + # boolean array subtraction deprecated in 1.9.0 + continue + + assert np.array_equal((dat - datsp), [0, 0, 0, 0]) + assert np.array_equal((datsp - dat), [0, 0, 0, 0]) + assert np.array_equal((0 - datsp).toarray(), -dat) + + A = spcreator([1, -4, 0, 2], dtype='d') + assert np.array_equal((dat - A), dat - A.toarray()) + assert np.array_equal((A - dat), A.toarray() - dat) + assert np.array_equal(A.toarray() - datsp, A.toarray() - dat) + assert np.array_equal(datsp - A.toarray(), dat - A.toarray()) + + # test broadcasting + assert np.array_equal(dat[:1] - datsp, dat[:1] - dat) + + def test_matvec(self, spcreator): + A = np.array([2, 0, 3.0]) + Asp = spcreator(A) + col = np.array([[1, 2, 3]]).T + + assert np.allclose(Asp @ col, Asp.toarray() @ col) + + assert (A @ np.array([1, 2, 3])).shape == () + assert Asp @ np.array([1, 2, 3]) == 11 + assert (Asp @ np.array([1, 2, 3])).shape == () + assert (Asp @ np.array([[1], [2], [3]])).shape == () + # check result type + assert isinstance(Asp @ matrix([[1, 2, 3]]).T, np.ndarray) + assert (Asp @ np.array([[1, 2, 3]]).T).shape == () + + # ensure exception is raised for improper dimensions + bad_vecs = [np.array([1, 2]), np.array([1, 2, 3, 4]), np.array([[1], [2]])] + for x in bad_vecs: + with pytest.raises(ValueError, match='dimension mismatch'): + Asp.__matmul__(x) + + # The current relationship between sparse matrix products and array + # products is as follows: + dot_result = np.dot(Asp.toarray(), [1, 2, 3]) + assert np.allclose(Asp @ np.array([1, 2, 3]), dot_result) + assert np.allclose(Asp @ [[1], [2], [3]], dot_result.T) + # Note that the result of Asp @ x is dense if x has a singleton dimension. + + def test_rmatvec(self, spcreator, dat1d): + M = spcreator(dat1d) + assert np.allclose([1, 2, 3, 4] @ M, np.dot([1, 2, 3, 4], M.toarray())) + row = np.array([[1, 2, 3, 4]]) + assert np.allclose(row @ M, row @ M.toarray()) + + def test_transpose(self, spcreator, dat1d): + for A in [dat1d, np.array([])]: + B = spcreator(A) + assert np.array_equal(B.toarray(), A) + assert np.array_equal(B.transpose().toarray(), A) + assert np.array_equal(B.dtype, A.dtype) + + def test_add_dense_to_sparse(self, spcreator, datsp_math_dtypes): + for dtype, dat, datsp in datsp_math_dtypes[spcreator]: + sum1 = dat + datsp + assert np.array_equal(sum1, dat + dat) + sum2 = datsp + dat + assert np.array_equal(sum2, dat + dat) + + def test_iterator(self, spcreator): + # test that __iter__ is compatible with NumPy + B = np.arange(5) + A = spcreator(B) + + if A.format not in ['coo', 'dia', 'bsr']: + for x, y in zip(A, B): + assert np.array_equal(x, y) + + def test_resize(self, spcreator): + # resize(shape) resizes the matrix in-place + D = np.array([1, 0, 3, 4]) + S = spcreator(D) + assert S.resize((3,)) is None + assert np.array_equal(S.toarray(), [1, 0, 3]) + S.resize((5,)) + assert np.array_equal(S.toarray(), [1, 0, 3, 0, 0]) + + +@pytest.mark.parametrize("spcreator", [sp.sparse.dok_array]) +class TestGetSet1D: + def test_getelement(self, spcreator): + D = np.array([4, 3, 0]) + A = spcreator(D) + + N = D.shape[0] + for j in range(-N, N): + assert np.array_equal(A[j], D[j]) + + for ij in [3, -4]: + with pytest.raises( + (IndexError, TypeError), match='index value out of bounds' + ): + A.__getitem__(ij) + + # single element tuples unwrapped + assert A[(0,)] == 4 + + with pytest.raises(IndexError, match='index value out of bounds'): + A.__getitem__((4,)) + + def test_setelement(self, spcreator): + dtype = np.float64 + A = spcreator((12,), dtype=dtype) + with np.testing.suppress_warnings() as sup: + sup.filter( + sp.sparse.SparseEfficiencyWarning, + "Changing the sparsity structure of a cs[cr]_matrix is expensive", + ) + A[0] = dtype(0) + A[1] = dtype(3) + A[8] = dtype(9.0) + A[-2] = dtype(7) + A[5] = 9 + + A[-9,] = dtype(8) + A[1,] = dtype(5) # overwrite using 1-tuple index + + for ij in [13, -14, (13,), (14,)]: + with pytest.raises(IndexError, match='index value out of bounds'): + A.__setitem__(ij, 123.0) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_construct.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_construct.py new file mode 100644 index 0000000000000000000000000000000000000000..ff174158cb99356733a0535cf38a43216a995895 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_construct.py @@ -0,0 +1,764 @@ +"""test sparse matrix construction functions""" + +import numpy as np +from numpy import array +from numpy.testing import (assert_equal, assert_, + assert_array_equal, assert_array_almost_equal_nulp) +import pytest +from pytest import raises as assert_raises +from scipy._lib._testutils import check_free_memory +from scipy._lib._util import check_random_state + +from scipy.sparse import (csr_matrix, coo_matrix, + csr_array, coo_array, + sparray, spmatrix, + _construct as construct) +from scipy.sparse._construct import rand as sprand + +sparse_formats = ['csr','csc','coo','bsr','dia','lil','dok'] + +#TODO check whether format=XXX is respected + + +def _sprandn(m, n, density=0.01, format="coo", dtype=None, random_state=None): + # Helper function for testing. + random_state = check_random_state(random_state) + data_rvs = random_state.standard_normal + return construct.random(m, n, density, format, dtype, + random_state, data_rvs) + + +def _sprandn_array(m, n, density=0.01, format="coo", dtype=None, random_state=None): + # Helper function for testing. + random_state = check_random_state(random_state) + data_sampler = random_state.standard_normal + return construct.random_array((m, n), density=density, format=format, dtype=dtype, + random_state=random_state, data_sampler=data_sampler) + + +class TestConstructUtils: + def test_spdiags(self): + diags1 = array([[1, 2, 3, 4, 5]]) + diags2 = array([[1, 2, 3, 4, 5], + [6, 7, 8, 9,10]]) + diags3 = array([[1, 2, 3, 4, 5], + [6, 7, 8, 9,10], + [11,12,13,14,15]]) + + cases = [] + cases.append((diags1, 0, 1, 1, [[1]])) + cases.append((diags1, [0], 1, 1, [[1]])) + cases.append((diags1, [0], 2, 1, [[1],[0]])) + cases.append((diags1, [0], 1, 2, [[1,0]])) + cases.append((diags1, [1], 1, 2, [[0,2]])) + cases.append((diags1,[-1], 1, 2, [[0,0]])) + cases.append((diags1, [0], 2, 2, [[1,0],[0,2]])) + cases.append((diags1,[-1], 2, 2, [[0,0],[1,0]])) + cases.append((diags1, [3], 2, 2, [[0,0],[0,0]])) + cases.append((diags1, [0], 3, 4, [[1,0,0,0],[0,2,0,0],[0,0,3,0]])) + cases.append((diags1, [1], 3, 4, [[0,2,0,0],[0,0,3,0],[0,0,0,4]])) + cases.append((diags1, [2], 3, 5, [[0,0,3,0,0],[0,0,0,4,0],[0,0,0,0,5]])) + + cases.append((diags2, [0,2], 3, 3, [[1,0,8],[0,2,0],[0,0,3]])) + cases.append((diags2, [-1,0], 3, 4, [[6,0,0,0],[1,7,0,0],[0,2,8,0]])) + cases.append((diags2, [2,-3], 6, 6, [[0,0,3,0,0,0], + [0,0,0,4,0,0], + [0,0,0,0,5,0], + [6,0,0,0,0,0], + [0,7,0,0,0,0], + [0,0,8,0,0,0]])) + + cases.append((diags3, [-1,0,1], 6, 6, [[6,12, 0, 0, 0, 0], + [1, 7,13, 0, 0, 0], + [0, 2, 8,14, 0, 0], + [0, 0, 3, 9,15, 0], + [0, 0, 0, 4,10, 0], + [0, 0, 0, 0, 5, 0]])) + cases.append((diags3, [-4,2,-1], 6, 5, [[0, 0, 8, 0, 0], + [11, 0, 0, 9, 0], + [0,12, 0, 0,10], + [0, 0,13, 0, 0], + [1, 0, 0,14, 0], + [0, 2, 0, 0,15]])) + cases.append((diags3, [-1, 1, 2], len(diags3[0]), len(diags3[0]), + [[0, 7, 13, 0, 0], + [1, 0, 8, 14, 0], + [0, 2, 0, 9, 15], + [0, 0, 3, 0, 10], + [0, 0, 0, 4, 0]])) + + for d, o, m, n, result in cases: + if len(d[0]) == m and m == n: + assert_equal(construct.spdiags(d, o).toarray(), result) + assert_equal(construct.spdiags(d, o, m, n).toarray(), result) + assert_equal(construct.spdiags(d, o, (m, n)).toarray(), result) + + def test_diags(self): + a = array([1, 2, 3, 4, 5]) + b = array([6, 7, 8, 9, 10]) + c = array([11, 12, 13, 14, 15]) + + cases = [] + cases.append((a[:1], 0, (1, 1), [[1]])) + cases.append(([a[:1]], [0], (1, 1), [[1]])) + cases.append(([a[:1]], [0], (2, 1), [[1],[0]])) + cases.append(([a[:1]], [0], (1, 2), [[1,0]])) + cases.append(([a[:1]], [1], (1, 2), [[0,1]])) + cases.append(([a[:2]], [0], (2, 2), [[1,0],[0,2]])) + cases.append(([a[:1]],[-1], (2, 2), [[0,0],[1,0]])) + cases.append(([a[:3]], [0], (3, 4), [[1,0,0,0],[0,2,0,0],[0,0,3,0]])) + cases.append(([a[:3]], [1], (3, 4), [[0,1,0,0],[0,0,2,0],[0,0,0,3]])) + cases.append(([a[:1]], [-2], (3, 5), [[0,0,0,0,0],[0,0,0,0,0],[1,0,0,0,0]])) + cases.append(([a[:2]], [-1], (3, 5), [[0,0,0,0,0],[1,0,0,0,0],[0,2,0,0,0]])) + cases.append(([a[:3]], [0], (3, 5), [[1,0,0,0,0],[0,2,0,0,0],[0,0,3,0,0]])) + cases.append(([a[:3]], [1], (3, 5), [[0,1,0,0,0],[0,0,2,0,0],[0,0,0,3,0]])) + cases.append(([a[:3]], [2], (3, 5), [[0,0,1,0,0],[0,0,0,2,0],[0,0,0,0,3]])) + cases.append(([a[:2]], [3], (3, 5), [[0,0,0,1,0],[0,0,0,0,2],[0,0,0,0,0]])) + cases.append(([a[:1]], [4], (3, 5), [[0,0,0,0,1],[0,0,0,0,0],[0,0,0,0,0]])) + cases.append(([a[:1]], [-4], (5, 3), [[0,0,0],[0,0,0],[0,0,0],[0,0,0],[1,0,0]])) + cases.append(([a[:2]], [-3], (5, 3), [[0,0,0],[0,0,0],[0,0,0],[1,0,0],[0,2,0]])) + cases.append(([a[:3]], [-2], (5, 3), [[0,0,0],[0,0,0],[1,0,0],[0,2,0],[0,0,3]])) + cases.append(([a[:3]], [-1], (5, 3), [[0,0,0],[1,0,0],[0,2,0],[0,0,3],[0,0,0]])) + cases.append(([a[:3]], [0], (5, 3), [[1,0,0],[0,2,0],[0,0,3],[0,0,0],[0,0,0]])) + cases.append(([a[:2]], [1], (5, 3), [[0,1,0],[0,0,2],[0,0,0],[0,0,0],[0,0,0]])) + cases.append(([a[:1]], [2], (5, 3), [[0,0,1],[0,0,0],[0,0,0],[0,0,0],[0,0,0]])) + + cases.append(([a[:3],b[:1]], [0,2], (3, 3), [[1,0,6],[0,2,0],[0,0,3]])) + cases.append(([a[:2],b[:3]], [-1,0], (3, 4), [[6,0,0,0],[1,7,0,0],[0,2,8,0]])) + cases.append(([a[:4],b[:3]], [2,-3], (6, 6), [[0,0,1,0,0,0], + [0,0,0,2,0,0], + [0,0,0,0,3,0], + [6,0,0,0,0,4], + [0,7,0,0,0,0], + [0,0,8,0,0,0]])) + + cases.append(([a[:4],b,c[:4]], [-1,0,1], (5, 5), [[6,11, 0, 0, 0], + [1, 7,12, 0, 0], + [0, 2, 8,13, 0], + [0, 0, 3, 9,14], + [0, 0, 0, 4,10]])) + cases.append(([a[:2],b[:3],c], [-4,2,-1], (6, 5), [[0, 0, 6, 0, 0], + [11, 0, 0, 7, 0], + [0,12, 0, 0, 8], + [0, 0,13, 0, 0], + [1, 0, 0,14, 0], + [0, 2, 0, 0,15]])) + + # too long arrays are OK + cases.append(([a], [0], (1, 1), [[1]])) + cases.append(([a[:3],b], [0,2], (3, 3), [[1, 0, 6], [0, 2, 0], [0, 0, 3]])) + cases.append(( + np.array([[1, 2, 3], [4, 5, 6]]), + [0,-1], + (3, 3), + [[1, 0, 0], [4, 2, 0], [0, 5, 3]] + )) + + # scalar case: broadcasting + cases.append(([1,-2,1], [1,0,-1], (3, 3), [[-2, 1, 0], + [1, -2, 1], + [0, 1, -2]])) + + for d, o, shape, result in cases: + err_msg = f"{d!r} {o!r} {shape!r} {result!r}" + assert_equal(construct.diags(d, offsets=o, shape=shape).toarray(), + result, err_msg=err_msg) + + if (shape[0] == shape[1] + and hasattr(d[0], '__len__') + and len(d[0]) <= max(shape)): + # should be able to find the shape automatically + assert_equal(construct.diags(d, offsets=o).toarray(), result, + err_msg=err_msg) + + def test_diags_default(self): + a = array([1, 2, 3, 4, 5]) + assert_equal(construct.diags(a).toarray(), np.diag(a)) + + def test_diags_default_bad(self): + a = array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6]]) + assert_raises(ValueError, construct.diags, a) + + def test_diags_bad(self): + a = array([1, 2, 3, 4, 5]) + b = array([6, 7, 8, 9, 10]) + c = array([11, 12, 13, 14, 15]) + + cases = [] + cases.append(([a[:0]], 0, (1, 1))) + cases.append(([a[:4],b,c[:3]], [-1,0,1], (5, 5))) + cases.append(([a[:2],c,b[:3]], [-4,2,-1], (6, 5))) + cases.append(([a[:2],c,b[:3]], [-4,2,-1], None)) + cases.append(([], [-4,2,-1], None)) + cases.append(([1], [-5], (4, 4))) + cases.append(([a], 0, None)) + + for d, o, shape in cases: + assert_raises(ValueError, construct.diags, d, offsets=o, shape=shape) + + assert_raises(TypeError, construct.diags, [[None]], offsets=[0]) + + def test_diags_vs_diag(self): + # Check that + # + # diags([a, b, ...], [i, j, ...]) == diag(a, i) + diag(b, j) + ... + # + + np.random.seed(1234) + + for n_diags in [1, 2, 3, 4, 5, 10]: + n = 1 + n_diags//2 + np.random.randint(0, 10) + + offsets = np.arange(-n+1, n-1) + np.random.shuffle(offsets) + offsets = offsets[:n_diags] + + diagonals = [np.random.rand(n - abs(q)) for q in offsets] + + mat = construct.diags(diagonals, offsets=offsets) + dense_mat = sum([np.diag(x, j) for x, j in zip(diagonals, offsets)]) + + assert_array_almost_equal_nulp(mat.toarray(), dense_mat) + + if len(offsets) == 1: + mat = construct.diags(diagonals[0], offsets=offsets[0]) + dense_mat = np.diag(diagonals[0], offsets[0]) + assert_array_almost_equal_nulp(mat.toarray(), dense_mat) + + def test_diags_dtype(self): + x = construct.diags([2.2], offsets=[0], shape=(2, 2), dtype=int) + assert_equal(x.dtype, int) + assert_equal(x.toarray(), [[2, 0], [0, 2]]) + + def test_diags_one_diagonal(self): + d = list(range(5)) + for k in range(-5, 6): + assert_equal(construct.diags(d, offsets=k).toarray(), + construct.diags([d], offsets=[k]).toarray()) + + def test_diags_empty(self): + x = construct.diags([]) + assert_equal(x.shape, (0, 0)) + + @pytest.mark.parametrize("identity", [construct.identity, construct.eye_array]) + def test_identity(self, identity): + assert_equal(identity(1).toarray(), [[1]]) + assert_equal(identity(2).toarray(), [[1,0],[0,1]]) + + I = identity(3, dtype='int8', format='dia') + assert_equal(I.dtype, np.dtype('int8')) + assert_equal(I.format, 'dia') + + for fmt in sparse_formats: + I = identity(3, format=fmt) + assert_equal(I.format, fmt) + assert_equal(I.toarray(), [[1,0,0],[0,1,0],[0,0,1]]) + + @pytest.mark.parametrize("eye", [construct.eye, construct.eye_array]) + def test_eye(self, eye): + assert_equal(eye(1,1).toarray(), [[1]]) + assert_equal(eye(2,3).toarray(), [[1,0,0],[0,1,0]]) + assert_equal(eye(3,2).toarray(), [[1,0],[0,1],[0,0]]) + assert_equal(eye(3,3).toarray(), [[1,0,0],[0,1,0],[0,0,1]]) + + assert_equal(eye(3,3,dtype='int16').dtype, np.dtype('int16')) + + for m in [3, 5]: + for n in [3, 5]: + for k in range(-5,6): + # scipy.sparse.eye deviates from np.eye here. np.eye will + # create arrays of all 0's when the diagonal offset is + # greater than the size of the array. For sparse arrays + # this makes less sense, especially as it results in dia + # arrays with negative diagonals. Therefore sp.sparse.eye + # validates that diagonal offsets fall within the shape of + # the array. See gh-18555. + if (k > 0 and k > n) or (k < 0 and abs(k) > m): + with pytest.raises( + ValueError, match="Offset.*out of bounds" + ): + eye(m, n, k=k) + + else: + assert_equal( + eye(m, n, k=k).toarray(), + np.eye(m, n, k=k) + ) + if m == n: + assert_equal( + eye(m, k=k).toarray(), + np.eye(m, n, k=k) + ) + + @pytest.mark.parametrize("eye", [construct.eye, construct.eye_array]) + def test_eye_one(self, eye): + assert_equal(eye(1).toarray(), [[1]]) + assert_equal(eye(2).toarray(), [[1,0],[0,1]]) + + I = eye(3, dtype='int8', format='dia') + assert_equal(I.dtype, np.dtype('int8')) + assert_equal(I.format, 'dia') + + for fmt in sparse_formats: + I = eye(3, format=fmt) + assert_equal(I.format, fmt) + assert_equal(I.toarray(), [[1,0,0],[0,1,0],[0,0,1]]) + + def test_eye_array_vs_matrix(self): + assert isinstance(construct.eye_array(3), sparray) + assert not isinstance(construct.eye(3), sparray) + + def test_kron(self): + cases = [] + + cases.append(array([[0]])) + cases.append(array([[-1]])) + cases.append(array([[4]])) + cases.append(array([[10]])) + cases.append(array([[0],[0]])) + cases.append(array([[0,0]])) + cases.append(array([[1,2],[3,4]])) + cases.append(array([[0,2],[5,0]])) + cases.append(array([[0,2,-6],[8,0,14]])) + cases.append(array([[5,4],[0,0],[6,0]])) + cases.append(array([[5,4,4],[1,0,0],[6,0,8]])) + cases.append(array([[0,1,0,2,0,5,8]])) + cases.append(array([[0.5,0.125,0,3.25],[0,2.5,0,0]])) + + # test all cases with some formats + for a in cases: + ca = csr_array(a) + for b in cases: + cb = csr_array(b) + expected = np.kron(a, b) + for fmt in sparse_formats[1:4]: + result = construct.kron(ca, cb, format=fmt) + assert_equal(result.format, fmt) + assert_array_equal(result.toarray(), expected) + assert isinstance(result, sparray) + + # test one case with all formats + a = cases[-1] + b = cases[-3] + ca = csr_array(a) + cb = csr_array(b) + + expected = np.kron(a, b) + for fmt in sparse_formats: + result = construct.kron(ca, cb, format=fmt) + assert_equal(result.format, fmt) + assert_array_equal(result.toarray(), expected) + assert isinstance(result, sparray) + + # check that spmatrix returned when both inputs are spmatrix + result = construct.kron(csr_matrix(a), csr_matrix(b), format=fmt) + assert_equal(result.format, fmt) + assert_array_equal(result.toarray(), expected) + assert isinstance(result, spmatrix) + + def test_kron_large(self): + n = 2**16 + a = construct.diags_array([1], shape=(1, n), offsets=n-1) + b = construct.diags_array([1], shape=(n, 1), offsets=1-n) + + construct.kron(a, a) + construct.kron(b, b) + + def test_kronsum(self): + cases = [] + + cases.append(array([[0]])) + cases.append(array([[-1]])) + cases.append(array([[4]])) + cases.append(array([[10]])) + cases.append(array([[1,2],[3,4]])) + cases.append(array([[0,2],[5,0]])) + cases.append(array([[0,2,-6],[8,0,14],[0,3,0]])) + cases.append(array([[1,0,0],[0,5,-1],[4,-2,8]])) + + # test all cases with default format + for a in cases: + for b in cases: + result = construct.kronsum(csr_array(a), csr_array(b)).toarray() + expected = (np.kron(np.eye(b.shape[0]), a) + + np.kron(b, np.eye(a.shape[0]))) + assert_array_equal(result, expected) + + # check that spmatrix returned when both inputs are spmatrix + result = construct.kronsum(csr_matrix(a), csr_matrix(b)).toarray() + assert_array_equal(result, expected) + + @pytest.mark.parametrize("coo_cls", [coo_matrix, coo_array]) + def test_vstack(self, coo_cls): + A = coo_cls([[1,2],[3,4]]) + B = coo_cls([[5,6]]) + + expected = array([[1, 2], + [3, 4], + [5, 6]]) + assert_equal(construct.vstack([A, B]).toarray(), expected) + assert_equal(construct.vstack([A, B], dtype=np.float32).dtype, + np.float32) + + assert_equal(construct.vstack([A.todok(), B.todok()]).toarray(), expected) + + assert_equal(construct.vstack([A.tocsr(), B.tocsr()]).toarray(), + expected) + result = construct.vstack([A.tocsr(), B.tocsr()], + format="csr", dtype=np.float32) + assert_equal(result.dtype, np.float32) + assert_equal(result.indices.dtype, np.int32) + assert_equal(result.indptr.dtype, np.int32) + + assert_equal(construct.vstack([A.tocsc(), B.tocsc()]).toarray(), + expected) + result = construct.vstack([A.tocsc(), B.tocsc()], + format="csc", dtype=np.float32) + assert_equal(result.dtype, np.float32) + assert_equal(result.indices.dtype, np.int32) + assert_equal(result.indptr.dtype, np.int32) + + def test_vstack_matrix_or_array(self): + A = [[1,2],[3,4]] + B = [[5,6]] + assert isinstance(construct.vstack([coo_array(A), coo_array(B)]), sparray) + assert isinstance(construct.vstack([coo_array(A), coo_matrix(B)]), sparray) + assert isinstance(construct.vstack([coo_matrix(A), coo_array(B)]), sparray) + assert isinstance(construct.vstack([coo_matrix(A), coo_matrix(B)]), spmatrix) + + @pytest.mark.parametrize("coo_cls", [coo_matrix, coo_array]) + def test_hstack(self, coo_cls): + A = coo_cls([[1,2],[3,4]]) + B = coo_cls([[5],[6]]) + + expected = array([[1, 2, 5], + [3, 4, 6]]) + assert_equal(construct.hstack([A, B]).toarray(), expected) + assert_equal(construct.hstack([A, B], dtype=np.float32).dtype, + np.float32) + + assert_equal(construct.hstack([A.todok(), B.todok()]).toarray(), expected) + + assert_equal(construct.hstack([A.tocsc(), B.tocsc()]).toarray(), + expected) + assert_equal(construct.hstack([A.tocsc(), B.tocsc()], + dtype=np.float32).dtype, + np.float32) + assert_equal(construct.hstack([A.tocsr(), B.tocsr()]).toarray(), + expected) + assert_equal(construct.hstack([A.tocsr(), B.tocsr()], + dtype=np.float32).dtype, + np.float32) + + def test_hstack_matrix_or_array(self): + A = [[1,2],[3,4]] + B = [[5],[6]] + assert isinstance(construct.hstack([coo_array(A), coo_array(B)]), sparray) + assert isinstance(construct.hstack([coo_array(A), coo_matrix(B)]), sparray) + assert isinstance(construct.hstack([coo_matrix(A), coo_array(B)]), sparray) + assert isinstance(construct.hstack([coo_matrix(A), coo_matrix(B)]), spmatrix) + + @pytest.mark.parametrize("block_array", (construct.bmat, construct.block_array)) + def test_block_creation(self, block_array): + + A = coo_array([[1, 2], [3, 4]]) + B = coo_array([[5],[6]]) + C = coo_array([[7]]) + D = coo_array((0, 0)) + + expected = array([[1, 2, 5], + [3, 4, 6], + [0, 0, 7]]) + assert_equal(block_array([[A, B], [None, C]]).toarray(), expected) + E = csr_array((1, 2), dtype=np.int32) + assert_equal(block_array([[A.tocsr(), B.tocsr()], + [E, C.tocsr()]]).toarray(), + expected) + assert_equal(block_array([[A.tocsc(), B.tocsc()], + [E.tocsc(), C.tocsc()]]).toarray(), + expected) + + expected = array([[1, 2, 0], + [3, 4, 0], + [0, 0, 7]]) + assert_equal(block_array([[A, None], [None, C]]).toarray(), expected) + assert_equal(block_array([[A.tocsr(), E.T.tocsr()], + [E, C.tocsr()]]).toarray(), + expected) + assert_equal(block_array([[A.tocsc(), E.T.tocsc()], + [E.tocsc(), C.tocsc()]]).toarray(), + expected) + + Z = csr_array((1, 1), dtype=np.int32) + expected = array([[0, 5], + [0, 6], + [7, 0]]) + assert_equal(block_array([[None, B], [C, None]]).toarray(), expected) + assert_equal(block_array([[E.T.tocsr(), B.tocsr()], + [C.tocsr(), Z]]).toarray(), + expected) + assert_equal(block_array([[E.T.tocsc(), B.tocsc()], + [C.tocsc(), Z.tocsc()]]).toarray(), + expected) + + expected = np.empty((0, 0)) + assert_equal(block_array([[None, None]]).toarray(), expected) + assert_equal(block_array([[None, D], [D, None]]).toarray(), + expected) + + # test bug reported in gh-5976 + expected = array([[7]]) + assert_equal(block_array([[None, D], [C, None]]).toarray(), + expected) + + # test failure cases + with assert_raises(ValueError) as excinfo: + block_array([[A], [B]]) + excinfo.match(r'Got blocks\[1,0\]\.shape\[1\] == 1, expected 2') + + with assert_raises(ValueError) as excinfo: + block_array([[A.tocsr()], [B.tocsr()]]) + excinfo.match(r'incompatible dimensions for axis 1') + + with assert_raises(ValueError) as excinfo: + block_array([[A.tocsc()], [B.tocsc()]]) + excinfo.match(r'Mismatching dimensions along axis 1: ({1, 2}|{2, 1})') + + with assert_raises(ValueError) as excinfo: + block_array([[A, C]]) + excinfo.match(r'Got blocks\[0,1\]\.shape\[0\] == 1, expected 2') + + with assert_raises(ValueError) as excinfo: + block_array([[A.tocsr(), C.tocsr()]]) + excinfo.match(r'Mismatching dimensions along axis 0: ({1, 2}|{2, 1})') + + with assert_raises(ValueError) as excinfo: + block_array([[A.tocsc(), C.tocsc()]]) + excinfo.match(r'incompatible dimensions for axis 0') + + def test_block_return_type(self): + block = construct.block_array + + # csr format ensures we hit _compressed_sparse_stack + # shape of F,G ensure we hit _stack_along_minor_axis + # list version ensure we hit the path with neither helper function + Fl, Gl = [[1, 2],[3, 4]], [[7], [5]] + Fm, Gm = csr_matrix(Fl), csr_matrix(Gl) + assert isinstance(block([[None, Fl], [Gl, None]], format="csr"), sparray) + assert isinstance(block([[None, Fm], [Gm, None]], format="csr"), sparray) + assert isinstance(block([[Fm, Gm]], format="csr"), sparray) + + def test_bmat_return_type(self): + """This can be removed after sparse matrix is removed""" + bmat = construct.bmat + # check return type. if any input _is_array output array, else matrix + Fl, Gl = [[1, 2],[3, 4]], [[7], [5]] + Fm, Gm = csr_matrix(Fl), csr_matrix(Gl) + Fa, Ga = csr_array(Fl), csr_array(Gl) + assert isinstance(bmat([[Fa, Ga]], format="csr"), sparray) + assert isinstance(bmat([[Fm, Gm]], format="csr"), spmatrix) + assert isinstance(bmat([[None, Fa], [Ga, None]], format="csr"), sparray) + assert isinstance(bmat([[None, Fm], [Ga, None]], format="csr"), sparray) + assert isinstance(bmat([[None, Fm], [Gm, None]], format="csr"), spmatrix) + assert isinstance(bmat([[None, Fl], [Gl, None]], format="csr"), spmatrix) + + # type returned by _compressed_sparse_stack (all csr) + assert isinstance(bmat([[Ga, Ga]], format="csr"), sparray) + assert isinstance(bmat([[Gm, Ga]], format="csr"), sparray) + assert isinstance(bmat([[Ga, Gm]], format="csr"), sparray) + assert isinstance(bmat([[Gm, Gm]], format="csr"), spmatrix) + # shape is 2x2 so no _stack_along_minor_axis + assert isinstance(bmat([[Fa, Fm]], format="csr"), sparray) + assert isinstance(bmat([[Fm, Fm]], format="csr"), spmatrix) + + # type returned by _compressed_sparse_stack (all csc) + assert isinstance(bmat([[Gm.tocsc(), Ga.tocsc()]], format="csc"), sparray) + assert isinstance(bmat([[Gm.tocsc(), Gm.tocsc()]], format="csc"), spmatrix) + # shape is 2x2 so no _stack_along_minor_axis + assert isinstance(bmat([[Fa.tocsc(), Fm.tocsc()]], format="csr"), sparray) + assert isinstance(bmat([[Fm.tocsc(), Fm.tocsc()]], format="csr"), spmatrix) + + # type returned when mixed input + assert isinstance(bmat([[Gl, Ga]], format="csr"), sparray) + assert isinstance(bmat([[Gm.tocsc(), Ga]], format="csr"), sparray) + assert isinstance(bmat([[Gm.tocsc(), Gm]], format="csr"), spmatrix) + assert isinstance(bmat([[Gm, Gm]], format="csc"), spmatrix) + + @pytest.mark.slow + @pytest.mark.xfail_on_32bit("Can't create large array for test") + def test_concatenate_int32_overflow(self): + """ test for indptr overflow when concatenating matrices """ + check_free_memory(30000) + + n = 33000 + A = csr_array(np.ones((n, n), dtype=bool)) + B = A.copy() + C = construct._compressed_sparse_stack((A, B), axis=0, + return_spmatrix=False) + + assert_(np.all(np.equal(np.diff(C.indptr), n))) + assert_equal(C.indices.dtype, np.int64) + assert_equal(C.indptr.dtype, np.int64) + + def test_block_diag_basic(self): + """ basic test for block_diag """ + A = coo_array([[1,2],[3,4]]) + B = coo_array([[5],[6]]) + C = coo_array([[7]]) + + expected = array([[1, 2, 0, 0], + [3, 4, 0, 0], + [0, 0, 5, 0], + [0, 0, 6, 0], + [0, 0, 0, 7]]) + + assert_equal(construct.block_diag((A, B, C)).toarray(), expected) + + def test_block_diag_scalar_1d_args(self): + """ block_diag with scalar and 1d arguments """ + # one 1d matrix and a scalar + assert_array_equal(construct.block_diag([[2,3], 4]).toarray(), + [[2, 3, 0], [0, 0, 4]]) + # 1d sparse arrays + A = coo_array([1,0,3]) + B = coo_array([0,4]) + assert_array_equal(construct.block_diag([A, B]).toarray(), + [[1, 0, 3, 0, 0], [0, 0, 0, 0, 4]]) + + + def test_block_diag_1(self): + """ block_diag with one matrix """ + assert_equal(construct.block_diag([[1, 0]]).toarray(), + array([[1, 0]])) + assert_equal(construct.block_diag([[[1, 0]]]).toarray(), + array([[1, 0]])) + assert_equal(construct.block_diag([[[1], [0]]]).toarray(), + array([[1], [0]])) + # just on scalar + assert_equal(construct.block_diag([1]).toarray(), + array([[1]])) + + def test_block_diag_sparse_arrays(self): + """ block_diag with sparse arrays """ + + A = coo_array([[1, 2, 3]], shape=(1, 3)) + B = coo_array([[4, 5]], shape=(1, 2)) + assert_equal(construct.block_diag([A, B]).toarray(), + array([[1, 2, 3, 0, 0], [0, 0, 0, 4, 5]])) + + A = coo_array([[1], [2], [3]], shape=(3, 1)) + B = coo_array([[4], [5]], shape=(2, 1)) + assert_equal(construct.block_diag([A, B]).toarray(), + array([[1, 0], [2, 0], [3, 0], [0, 4], [0, 5]])) + + def test_block_diag_return_type(self): + A, B = coo_array([[1, 2, 3]]), coo_matrix([[2, 3, 4]]) + assert isinstance(construct.block_diag([A, A]), sparray) + assert isinstance(construct.block_diag([A, B]), sparray) + assert isinstance(construct.block_diag([B, A]), sparray) + assert isinstance(construct.block_diag([B, B]), spmatrix) + + def test_random_sampling(self): + # Simple sanity checks for sparse random sampling. + for f in sprand, _sprandn: + for t in [np.float32, np.float64, np.longdouble, + np.int32, np.int64, np.complex64, np.complex128]: + x = f(5, 10, density=0.1, dtype=t) + assert_equal(x.dtype, t) + assert_equal(x.shape, (5, 10)) + assert_equal(x.nnz, 5) + + x1 = f(5, 10, density=0.1, random_state=4321) + assert_equal(x1.dtype, np.float64) + + x2 = f(5, 10, density=0.1, + random_state=np.random.RandomState(4321)) + + assert_array_equal(x1.data, x2.data) + assert_array_equal(x1.row, x2.row) + assert_array_equal(x1.col, x2.col) + + for density in [0.0, 0.1, 0.5, 1.0]: + x = f(5, 10, density=density) + assert_equal(x.nnz, int(density * np.prod(x.shape))) + + for fmt in ['coo', 'csc', 'csr', 'lil']: + x = f(5, 10, format=fmt) + assert_equal(x.format, fmt) + + assert_raises(ValueError, lambda: f(5, 10, 1.1)) + assert_raises(ValueError, lambda: f(5, 10, -0.1)) + + def test_rand(self): + # Simple distributional checks for sparse.rand. + random_states = [None, 4321, np.random.RandomState()] + try: + gen = np.random.default_rng() + random_states.append(gen) + except AttributeError: + pass + + for random_state in random_states: + x = sprand(10, 20, density=0.5, dtype=np.float64, + random_state=random_state) + assert_(np.all(np.less_equal(0, x.data))) + assert_(np.all(np.less_equal(x.data, 1))) + + def test_randn(self): + # Simple distributional checks for sparse.randn. + # Statistically, some of these should be negative + # and some should be greater than 1. + random_states = [None, 4321, np.random.RandomState()] + try: + gen = np.random.default_rng() + random_states.append(gen) + except AttributeError: + pass + + for rs in random_states: + x = _sprandn(10, 20, density=0.5, dtype=np.float64, random_state=rs) + assert_(np.any(np.less(x.data, 0))) + assert_(np.any(np.less(1, x.data))) + x = _sprandn_array(10, 20, density=0.5, dtype=np.float64, random_state=rs) + assert_(np.any(np.less(x.data, 0))) + assert_(np.any(np.less(1, x.data))) + + def test_random_accept_str_dtype(self): + # anything that np.dtype can convert to a dtype should be accepted + # for the dtype + construct.random(10, 10, dtype='d') + construct.random_array((10, 10), dtype='d') + + def test_random_sparse_matrix_returns_correct_number_of_non_zero_elements(self): + # A 10 x 10 matrix, with density of 12.65%, should have 13 nonzero elements. + # 10 x 10 x 0.1265 = 12.65, which should be rounded up to 13, not 12. + sparse_matrix = construct.random(10, 10, density=0.1265) + assert_equal(sparse_matrix.count_nonzero(),13) + # check random_array + sparse_array = construct.random_array((10, 10), density=0.1265) + assert_equal(sparse_array.count_nonzero(),13) + assert isinstance(sparse_array, sparray) + # check big size + shape = (2**33, 2**33) + sparse_array = construct.random_array(shape, density=2.7105e-17) + assert_equal(sparse_array.count_nonzero(),2000) + + +def test_diags_array(): + """Tests of diags_array that do not rely on diags wrapper.""" + diag = np.arange(1, 5) + + assert_array_equal(construct.diags_array(diag).toarray(), np.diag(diag)) + + assert_array_equal( + construct.diags_array(diag, offsets=2).toarray(), np.diag(diag, k=2) + ) + + assert_array_equal( + construct.diags_array(diag, offsets=2, shape=(4, 4)).toarray(), + np.diag(diag, k=2)[:4, :4] + ) + + # Offset outside bounds when shape specified + with pytest.raises(ValueError, match=".*out of bounds"): + construct.diags(np.arange(1, 5), 5, shape=(4, 4)) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_coo.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_coo.py new file mode 100644 index 0000000000000000000000000000000000000000..b9c80e9e16d61fdd4519b53af77bde8571c7d975 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_coo.py @@ -0,0 +1,274 @@ +import numpy as np +import pytest +from scipy.sparse import coo_array + + +def test_shape_constructor(): + empty1d = coo_array((3,)) + assert empty1d.shape == (3,) + assert np.array_equal(empty1d.toarray(), np.zeros((3,))) + + empty2d = coo_array((3, 2)) + assert empty2d.shape == (3, 2) + assert np.array_equal(empty2d.toarray(), np.zeros((3, 2))) + + with pytest.raises(TypeError, match='invalid input format'): + coo_array((3, 2, 2)) + + +def test_dense_constructor(): + res1d = coo_array([1, 2, 3]) + assert res1d.shape == (3,) + assert np.array_equal(res1d.toarray(), np.array([1, 2, 3])) + + res2d = coo_array([[1, 2, 3], [4, 5, 6]]) + assert res2d.shape == (2, 3) + assert np.array_equal(res2d.toarray(), np.array([[1, 2, 3], [4, 5, 6]])) + + with pytest.raises(ValueError, match='shape must be a 1- or 2-tuple'): + coo_array([[[3]], [[4]]]) + + +def test_dense_constructor_with_shape(): + res1d = coo_array([1, 2, 3], shape=(3,)) + assert res1d.shape == (3,) + assert np.array_equal(res1d.toarray(), np.array([1, 2, 3])) + + res2d = coo_array([[1, 2, 3], [4, 5, 6]], shape=(2, 3)) + assert res2d.shape == (2, 3) + assert np.array_equal(res2d.toarray(), np.array([[1, 2, 3], [4, 5, 6]])) + + with pytest.raises(ValueError, match='shape must be a 1- or 2-tuple'): + coo_array([[[3]], [[4]]], shape=(2, 1, 1)) + + +def test_dense_constructor_with_inconsistent_shape(): + with pytest.raises(ValueError, match='inconsistent shapes'): + coo_array([1, 2, 3], shape=(4,)) + + with pytest.raises(ValueError, match='inconsistent shapes'): + coo_array([1, 2, 3], shape=(3, 1)) + + with pytest.raises(ValueError, match='inconsistent shapes'): + coo_array([[1, 2, 3]], shape=(3,)) + + with pytest.raises(ValueError, + match='axis 0 index 2 exceeds matrix dimension 2'): + coo_array(([1], ([2],)), shape=(2,)) + + with pytest.raises(ValueError, match='negative axis 0 index: -1'): + coo_array(([1], ([-1],))) + + +def test_1d_sparse_constructor(): + empty1d = coo_array((3,)) + res = coo_array(empty1d) + assert res.shape == (3,) + assert np.array_equal(res.toarray(), np.zeros((3,))) + + +def test_1d_tuple_constructor(): + res = coo_array(([9,8], ([1,2],))) + assert res.shape == (3,) + assert np.array_equal(res.toarray(), np.array([0, 9, 8])) + + +def test_1d_tuple_constructor_with_shape(): + res = coo_array(([9,8], ([1,2],)), shape=(4,)) + assert res.shape == (4,) + assert np.array_equal(res.toarray(), np.array([0, 9, 8, 0])) + +def test_non_subscriptability(): + coo_2d = coo_array((2, 2)) + + with pytest.raises(TypeError, + match="'coo_array' object does not support item assignment"): + coo_2d[0, 0] = 1 + + with pytest.raises(TypeError, + match="'coo_array' object is not subscriptable"): + coo_2d[0, :] + +def test_reshape(): + arr1d = coo_array([1, 0, 3]) + assert arr1d.shape == (3,) + + col_vec = arr1d.reshape((3, 1)) + assert col_vec.shape == (3, 1) + assert np.array_equal(col_vec.toarray(), np.array([[1], [0], [3]])) + + row_vec = arr1d.reshape((1, 3)) + assert row_vec.shape == (1, 3) + assert np.array_equal(row_vec.toarray(), np.array([[1, 0, 3]])) + + arr2d = coo_array([[1, 2, 0], [0, 0, 3]]) + assert arr2d.shape == (2, 3) + + flat = arr2d.reshape((6,)) + assert flat.shape == (6,) + assert np.array_equal(flat.toarray(), np.array([1, 2, 0, 0, 0, 3])) + + +def test_nnz(): + arr1d = coo_array([1, 0, 3]) + assert arr1d.shape == (3,) + assert arr1d.nnz == 2 + + arr2d = coo_array([[1, 2, 0], [0, 0, 3]]) + assert arr2d.shape == (2, 3) + assert arr2d.nnz == 3 + + +def test_transpose(): + arr1d = coo_array([1, 0, 3]).T + assert arr1d.shape == (3,) + assert np.array_equal(arr1d.toarray(), np.array([1, 0, 3])) + + arr2d = coo_array([[1, 2, 0], [0, 0, 3]]).T + assert arr2d.shape == (3, 2) + assert np.array_equal(arr2d.toarray(), np.array([[1, 0], [2, 0], [0, 3]])) + + +def test_transpose_with_axis(): + arr1d = coo_array([1, 0, 3]).transpose(axes=(0,)) + assert arr1d.shape == (3,) + assert np.array_equal(arr1d.toarray(), np.array([1, 0, 3])) + + arr2d = coo_array([[1, 2, 0], [0, 0, 3]]).transpose(axes=(0, 1)) + assert arr2d.shape == (2, 3) + assert np.array_equal(arr2d.toarray(), np.array([[1, 2, 0], [0, 0, 3]])) + + with pytest.raises(ValueError, match="axes don't match matrix dimensions"): + coo_array([1, 0, 3]).transpose(axes=(0, 1)) + + with pytest.raises(ValueError, match="repeated axis in transpose"): + coo_array([[1, 2, 0], [0, 0, 3]]).transpose(axes=(1, 1)) + + +def test_1d_row_and_col(): + res = coo_array([1, -2, -3]) + assert np.array_equal(res.col, np.array([0, 1, 2])) + assert np.array_equal(res.row, np.zeros_like(res.col)) + assert res.row.dtype == res.col.dtype + assert res.row.flags.writeable is False + + res.col = [1, 2, 3] + assert len(res.coords) == 1 + assert np.array_equal(res.col, np.array([1, 2, 3])) + assert res.row.dtype == res.col.dtype + + with pytest.raises(ValueError, match="cannot set row attribute"): + res.row = [1, 2, 3] + + +def test_1d_toformats(): + res = coo_array([1, -2, -3]) + for f in [res.tocsc, res.tocsr, res.todia, res.tolil, res.tobsr]: + with pytest.raises(ValueError, match='Cannot convert'): + f() + for f in [res.tocoo, res.todok]: + assert np.array_equal(f().toarray(), res.toarray()) + + +@pytest.mark.parametrize('arg', [1, 2, 4, 5, 8]) +def test_1d_resize(arg: int): + den = np.array([1, -2, -3]) + res = coo_array(den) + den.resize(arg, refcheck=False) + res.resize(arg) + assert res.shape == den.shape + assert np.array_equal(res.toarray(), den) + + +@pytest.mark.parametrize('arg', zip([1, 2, 3, 4], [1, 2, 3, 4])) +def test_1d_to_2d_resize(arg: tuple[int, int]): + den = np.array([1, 0, 3]) + res = coo_array(den) + + den.resize(arg, refcheck=False) + res.resize(arg) + assert res.shape == den.shape + assert np.array_equal(res.toarray(), den) + + +@pytest.mark.parametrize('arg', [1, 4, 6, 8]) +def test_2d_to_1d_resize(arg: int): + den = np.array([[1, 0, 3], [4, 0, 0]]) + res = coo_array(den) + den.resize(arg, refcheck=False) + res.resize(arg) + assert res.shape == den.shape + assert np.array_equal(res.toarray(), den) + + +def test_sum_duplicates(): + arr1d = coo_array(([2, 2, 2], ([1, 0, 1],))) + assert arr1d.nnz == 3 + assert np.array_equal(arr1d.toarray(), np.array([2, 4])) + arr1d.sum_duplicates() + assert arr1d.nnz == 2 + assert np.array_equal(arr1d.toarray(), np.array([2, 4])) + + +def test_eliminate_zeros(): + arr1d = coo_array(([0, 0, 1], ([1, 0, 1],))) + assert arr1d.nnz == 3 + assert arr1d.count_nonzero() == 1 + assert np.array_equal(arr1d.toarray(), np.array([0, 1])) + arr1d.eliminate_zeros() + assert arr1d.nnz == 1 + assert arr1d.count_nonzero() == 1 + assert np.array_equal(arr1d.toarray(), np.array([0, 1])) + assert np.array_equal(arr1d.col, np.array([1])) + assert np.array_equal(arr1d.row, np.array([0])) + + +def test_1d_add_dense(): + den_a = np.array([0, -2, -3, 0]) + den_b = np.array([0, 1, 2, 3]) + exp = den_a + den_b + res = coo_array(den_a) + den_b + assert type(res) == type(exp) + assert np.array_equal(res, exp) + + +def test_1d_add_sparse(): + den_a = np.array([0, -2, -3, 0]) + den_b = np.array([0, 1, 2, 3]) + # Currently this routes through CSR format, so 1d sparse addition + # isn't supported. + with pytest.raises(ValueError, + match='Cannot convert a 1d sparse array'): + coo_array(den_a) + coo_array(den_b) + + +def test_1d_matmul_vector(): + den_a = np.array([0, -2, -3, 0]) + den_b = np.array([0, 1, 2, 3]) + exp = den_a @ den_b + res = coo_array(den_a) @ den_b + assert np.ndim(res) == 0 + assert np.array_equal(res, exp) + + +def test_1d_matmul_multivector(): + den = np.array([0, -2, -3, 0]) + other = np.array([[0, 1, 2, 3], [3, 2, 1, 0]]).T + exp = den @ other + res = coo_array(den) @ other + assert type(res) == type(exp) + assert np.array_equal(res, exp) + + +def test_2d_matmul_multivector(): + den = np.array([[0, 1, 2, 3], [3, 2, 1, 0]]) + arr2d = coo_array(den) + exp = den @ den.T + res = arr2d @ arr2d.T + assert np.array_equal(res.toarray(), exp) + + +def test_1d_diagonal(): + den = np.array([0, -2, -3, 0]) + with pytest.raises(ValueError, match='diagonal requires two dimensions'): + coo_array(den).diagonal() diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_csc.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_csc.py new file mode 100644 index 0000000000000000000000000000000000000000..6313751e41899ae7c5daf01fbdbbacdc1f303fa1 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_csc.py @@ -0,0 +1,98 @@ +import numpy as np +from numpy.testing import assert_array_almost_equal, assert_ +from scipy.sparse import csr_matrix, csc_matrix, lil_matrix + +import pytest + + +def test_csc_getrow(): + N = 10 + np.random.seed(0) + X = np.random.random((N, N)) + X[X > 0.7] = 0 + Xcsc = csc_matrix(X) + + for i in range(N): + arr_row = X[i:i + 1, :] + csc_row = Xcsc.getrow(i) + + assert_array_almost_equal(arr_row, csc_row.toarray()) + assert_(type(csc_row) is csr_matrix) + + +def test_csc_getcol(): + N = 10 + np.random.seed(0) + X = np.random.random((N, N)) + X[X > 0.7] = 0 + Xcsc = csc_matrix(X) + + for i in range(N): + arr_col = X[:, i:i + 1] + csc_col = Xcsc.getcol(i) + + assert_array_almost_equal(arr_col, csc_col.toarray()) + assert_(type(csc_col) is csc_matrix) + +@pytest.mark.parametrize("matrix_input, axis, expected_shape", + [(csc_matrix([[1, 0], + [0, 0], + [0, 2]]), + 0, (0, 2)), + (csc_matrix([[1, 0], + [0, 0], + [0, 2]]), + 1, (3, 0)), + (csc_matrix([[1, 0], + [0, 0], + [0, 2]]), + 'both', (0, 0)), + (csc_matrix([[0, 1, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 2, 3, 0, 1]]), + 0, (0, 6))]) +def test_csc_empty_slices(matrix_input, axis, expected_shape): + # see gh-11127 for related discussion + slice_1 = matrix_input.toarray().shape[0] - 1 + slice_2 = slice_1 + slice_3 = slice_2 - 1 + + if axis == 0: + actual_shape_1 = matrix_input[slice_1:slice_2, :].toarray().shape + actual_shape_2 = matrix_input[slice_1:slice_3, :].toarray().shape + elif axis == 1: + actual_shape_1 = matrix_input[:, slice_1:slice_2].toarray().shape + actual_shape_2 = matrix_input[:, slice_1:slice_3].toarray().shape + elif axis == 'both': + actual_shape_1 = matrix_input[slice_1:slice_2, slice_1:slice_2].toarray().shape + actual_shape_2 = matrix_input[slice_1:slice_3, slice_1:slice_3].toarray().shape + + assert actual_shape_1 == expected_shape + assert actual_shape_1 == actual_shape_2 + + +@pytest.mark.parametrize('ax', (-2, -1, 0, 1, None)) +def test_argmax_overflow(ax): + # See gh-13646: Windows integer overflow for large sparse matrices. + dim = (100000, 100000) + A = lil_matrix(dim) + A[-2, -2] = 42 + A[-3, -3] = 0.1234 + A = csc_matrix(A) + idx = A.argmax(axis=ax) + + if ax is None: + # idx is a single flattened index + # that we need to convert to a 2d index pair; + # can't do this with np.unravel_index because + # the dimensions are too large + ii = idx % dim[0] + jj = idx // dim[0] + else: + # idx is an array of size of A.shape[ax]; + # check the max index to make sure no overflows + # we encountered + assert np.count_nonzero(idx) == A.nnz + ii, jj = np.max(idx), np.argmax(idx) + + assert A[ii, jj] == A[-2, -2] diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_csr.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_csr.py new file mode 100644 index 0000000000000000000000000000000000000000..57101c5d89f342149d56ab295aefa26ff5b497d0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_csr.py @@ -0,0 +1,184 @@ +import numpy as np +from numpy.testing import assert_array_almost_equal, assert_ +from scipy.sparse import csr_matrix, csc_matrix, csr_array, csc_array, hstack +from scipy import sparse +import pytest + + +def _check_csr_rowslice(i, sl, X, Xcsr): + np_slice = X[i, sl] + csr_slice = Xcsr[i, sl] + assert_array_almost_equal(np_slice, csr_slice.toarray()[0]) + assert_(type(csr_slice) is csr_matrix) + + +def test_csr_rowslice(): + N = 10 + np.random.seed(0) + X = np.random.random((N, N)) + X[X > 0.7] = 0 + Xcsr = csr_matrix(X) + + slices = [slice(None, None, None), + slice(None, None, -1), + slice(1, -2, 2), + slice(-2, 1, -2)] + + for i in range(N): + for sl in slices: + _check_csr_rowslice(i, sl, X, Xcsr) + + +def test_csr_getrow(): + N = 10 + np.random.seed(0) + X = np.random.random((N, N)) + X[X > 0.7] = 0 + Xcsr = csr_matrix(X) + + for i in range(N): + arr_row = X[i:i + 1, :] + csr_row = Xcsr.getrow(i) + + assert_array_almost_equal(arr_row, csr_row.toarray()) + assert_(type(csr_row) is csr_matrix) + + +def test_csr_getcol(): + N = 10 + np.random.seed(0) + X = np.random.random((N, N)) + X[X > 0.7] = 0 + Xcsr = csr_matrix(X) + + for i in range(N): + arr_col = X[:, i:i + 1] + csr_col = Xcsr.getcol(i) + + assert_array_almost_equal(arr_col, csr_col.toarray()) + assert_(type(csr_col) is csr_matrix) + +@pytest.mark.parametrize("matrix_input, axis, expected_shape", + [(csr_matrix([[1, 0, 0, 0], + [0, 0, 0, 0], + [0, 2, 3, 0]]), + 0, (0, 4)), + (csr_matrix([[1, 0, 0, 0], + [0, 0, 0, 0], + [0, 2, 3, 0]]), + 1, (3, 0)), + (csr_matrix([[1, 0, 0, 0], + [0, 0, 0, 0], + [0, 2, 3, 0]]), + 'both', (0, 0)), + (csr_matrix([[0, 1, 0, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 2, 3, 0]]), + 0, (0, 5))]) +def test_csr_empty_slices(matrix_input, axis, expected_shape): + # see gh-11127 for related discussion + slice_1 = matrix_input.toarray().shape[0] - 1 + slice_2 = slice_1 + slice_3 = slice_2 - 1 + + if axis == 0: + actual_shape_1 = matrix_input[slice_1:slice_2, :].toarray().shape + actual_shape_2 = matrix_input[slice_1:slice_3, :].toarray().shape + elif axis == 1: + actual_shape_1 = matrix_input[:, slice_1:slice_2].toarray().shape + actual_shape_2 = matrix_input[:, slice_1:slice_3].toarray().shape + elif axis == 'both': + actual_shape_1 = matrix_input[slice_1:slice_2, slice_1:slice_2].toarray().shape + actual_shape_2 = matrix_input[slice_1:slice_3, slice_1:slice_3].toarray().shape + + assert actual_shape_1 == expected_shape + assert actual_shape_1 == actual_shape_2 + + +def test_csr_bool_indexing(): + data = csr_matrix([[0, 1, 2], [3, 4, 5], [6, 7, 8]]) + list_indices1 = [False, True, False] + array_indices1 = np.array(list_indices1) + list_indices2 = [[False, True, False], [False, True, False], [False, True, False]] + array_indices2 = np.array(list_indices2) + list_indices3 = ([False, True, False], [False, True, False]) + array_indices3 = (np.array(list_indices3[0]), np.array(list_indices3[1])) + slice_list1 = data[list_indices1].toarray() + slice_array1 = data[array_indices1].toarray() + slice_list2 = data[list_indices2] + slice_array2 = data[array_indices2] + slice_list3 = data[list_indices3] + slice_array3 = data[array_indices3] + assert (slice_list1 == slice_array1).all() + assert (slice_list2 == slice_array2).all() + assert (slice_list3 == slice_array3).all() + + +def test_csr_hstack_int64(): + """ + Tests if hstack properly promotes to indices and indptr arrays to np.int64 + when using np.int32 during concatenation would result in either array + overflowing. + """ + max_int32 = np.iinfo(np.int32).max + + # First case: indices would overflow with int32 + data = [1.0] + row = [0] + + max_indices_1 = max_int32 - 1 + max_indices_2 = 3 + + # Individual indices arrays are representable with int32 + col_1 = [max_indices_1 - 1] + col_2 = [max_indices_2 - 1] + + X_1 = csr_matrix((data, (row, col_1))) + X_2 = csr_matrix((data, (row, col_2))) + + assert max(max_indices_1 - 1, max_indices_2 - 1) < max_int32 + assert X_1.indices.dtype == X_1.indptr.dtype == np.int32 + assert X_2.indices.dtype == X_2.indptr.dtype == np.int32 + + # ... but when concatenating their CSR matrices, the resulting indices + # array can't be represented with int32 and must be promoted to int64. + X_hs = hstack([X_1, X_2], format="csr") + + assert X_hs.indices.max() == max_indices_1 + max_indices_2 - 1 + assert max_indices_1 + max_indices_2 - 1 > max_int32 + assert X_hs.indices.dtype == X_hs.indptr.dtype == np.int64 + + # Even if the matrices are empty, we must account for their size + # contribution so that we may safely set the final elements. + X_1_empty = csr_matrix(X_1.shape) + X_2_empty = csr_matrix(X_2.shape) + X_hs_empty = hstack([X_1_empty, X_2_empty], format="csr") + + assert X_hs_empty.shape == X_hs.shape + assert X_hs_empty.indices.dtype == np.int64 + + # Should be just small enough to stay in int32 after stack. Note that + # we theoretically could support indices.max() == max_int32, but due to an + # edge-case in the underlying sparsetools code + # (namely the `coo_tocsr` routine), + # we require that max(X_hs_32.shape) < max_int32 as well. + # Hence we can only support max_int32 - 1. + col_3 = [max_int32 - max_indices_1 - 1] + X_3 = csr_matrix((data, (row, col_3))) + X_hs_32 = hstack([X_1, X_3], format="csr") + assert X_hs_32.indices.dtype == np.int32 + assert X_hs_32.indices.max() == max_int32 - 1 + +@pytest.mark.parametrize("cls", [csr_matrix, csr_array, csc_matrix, csc_array]) +def test_mixed_index_dtype_int_indexing(cls): + # https://github.com/scipy/scipy/issues/20182 + rng = np.random.default_rng(0) + base_mtx = cls(sparse.random(50, 50, random_state=rng, density=0.1)) + indptr_64bit = base_mtx.copy() + indices_64bit = base_mtx.copy() + indptr_64bit.indptr = base_mtx.indptr.astype(np.int64) + indices_64bit.indices = base_mtx.indices.astype(np.int64) + + for mtx in [base_mtx, indptr_64bit, indices_64bit]: + np.testing.assert_array_equal(mtx[[1,2], :].toarray(), base_mtx[[1, 2], :].toarray()) + np.testing.assert_array_equal(mtx[:, [1, 2]].toarray(), base_mtx[:, [1, 2]].toarray()) \ No newline at end of file diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_deprecations.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_deprecations.py new file mode 100644 index 0000000000000000000000000000000000000000..c73b9bc4bb4783d5b2cf4331dadeb038f9deae54 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_deprecations.py @@ -0,0 +1,31 @@ +import scipy as sp +import pytest + + +def test_array_api_deprecations(): + X = sp.sparse.csr_array([ + [1,2,3], + [4,0,6] + ]) + msg = "1.14.0" + + with pytest.deprecated_call(match=msg): + X.get_shape() + + with pytest.deprecated_call(match=msg): + X.set_shape((2,3)) + + with pytest.deprecated_call(match=msg): + X.asfptype() + + with pytest.deprecated_call(match=msg): + X.getmaxprint() + + with pytest.deprecated_call(match=msg): + X.getH() + + with pytest.deprecated_call(match=msg): + X.getcol(1).todense() + + with pytest.deprecated_call(match=msg): + X.getrow(1).todense() diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_dok.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_dok.py new file mode 100644 index 0000000000000000000000000000000000000000..65db3b6ddecd9288cca95762dea334b47fac00cc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_dok.py @@ -0,0 +1,203 @@ +import pytest +import numpy as np +from numpy.testing import assert_equal +import scipy as sp +from scipy.sparse import dok_array, dok_matrix + + +@pytest.fixture +def d(): + return {(0, 1): 1, (0, 2): 2} + +@pytest.fixture +def A(): + return np.array([[0, 1, 2], [0, 0, 0], [0, 0, 0]]) + +@pytest.fixture(params=[dok_array, dok_matrix]) +def Asp(request): + A = request.param((3, 3)) + A[(0, 1)] = 1 + A[(0, 2)] = 2 + yield A + +# Note: __iter__ and comparison dunders act like ndarrays for DOK, not dict. +# Dunders reversed, or, ror, ior work as dict for dok_matrix, raise for dok_array +# All other dict methods on DOK format act like dict methods (with extra checks). + +# Start of tests +################ +def test_dict_methods_covered(d, Asp): + d_methods = set(dir(d)) - {"__class_getitem__"} + asp_methods = set(dir(Asp)) + assert d_methods < asp_methods + +def test_clear(d, Asp): + assert d.items() == Asp.items() + d.clear() + Asp.clear() + assert d.items() == Asp.items() + +def test_copy(d, Asp): + assert d.items() == Asp.items() + dd = d.copy() + asp = Asp.copy() + assert dd.items() == asp.items() + assert asp.items() == Asp.items() + asp[(0, 1)] = 3 + assert Asp[(0, 1)] == 1 + +def test_fromkeys_default(): + # test with default value + edges = [(0, 2), (1, 0), (2, 1)] + Xdok = dok_array.fromkeys(edges) + X = [[0, 0, 1], [1, 0, 0], [0, 1, 0]] + assert_equal(Xdok.toarray(), X) + +def test_fromkeys_positional(): + # test with positional value + edges = [(0, 2), (1, 0), (2, 1)] + Xdok = dok_array.fromkeys(edges, -1) + X = [[0, 0, -1], [-1, 0, 0], [0, -1, 0]] + assert_equal(Xdok.toarray(), X) + +def test_fromkeys_iterator(): + it = ((a, a % 2) for a in range(4)) + Xdok = dok_array.fromkeys(it) + X = [[1, 0], [0, 1], [1, 0], [0, 1]] + assert_equal(Xdok.toarray(), X) + +def test_get(d, Asp): + assert Asp.get((0, 1)) == d.get((0, 1)) + assert Asp.get((0, 0), 99) == d.get((0, 0), 99) + with pytest.raises(IndexError, match="out of bounds"): + Asp.get((0, 4), 99) + +def test_items(d, Asp): + assert Asp.items() == d.items() + +def test_keys(d, Asp): + assert Asp.keys() == d.keys() + +def test_pop(d, Asp): + assert d.pop((0, 1)) == 1 + assert Asp.pop((0, 1)) == 1 + assert d.items() == Asp.items() + +def test_popitem(d, Asp): + assert d.popitem() == Asp.popitem() + assert d.items() == Asp.items() + +def test_setdefault(d, Asp): + assert Asp.setdefault((0, 1), 4) == 1 + assert Asp.setdefault((2, 2), 4) == 4 + d.setdefault((0, 1), 4) + d.setdefault((2, 2), 4) + assert d.items() == Asp.items() + +def test_update(d, Asp): + with pytest.raises(NotImplementedError): + Asp.update(Asp) + +def test_values(d, Asp): + # Note: dict.values are strange: d={1: 1}; d.values() == d.values() is False + # Using list(d.values()) makes them comparable. + assert list(Asp.values()) == list(d.values()) + +def test_dunder_getitem(d, Asp): + assert Asp[(0, 1)] == d[(0, 1)] + +def test_dunder_setitem(d, Asp): + Asp[(1, 1)] = 5 + d[(1, 1)] = 5 + assert d.items() == Asp.items() + +def test_dunder_delitem(d, Asp): + del Asp[(0, 1)] + del d[(0, 1)] + assert d.items() == Asp.items() + +def test_dunder_contains(d, Asp): + assert ((0, 1) in d) == ((0, 1) in Asp) + assert ((0, 0) in d) == ((0, 0) in Asp) + +def test_dunder_len(d, Asp): + assert len(d) == len(Asp) + +# Note: dunders reversed, or, ror, ior work as dict for dok_matrix, raise for dok_array +def test_dunder_reversed(d, Asp): + if isinstance(Asp, dok_array): + with pytest.raises(TypeError): + list(reversed(Asp)) + else: + list(reversed(Asp)) == list(reversed(d)) + +def test_dunder_ior(d, Asp): + if isinstance(Asp, dok_array): + with pytest.raises(TypeError): + Asp |= Asp + else: + dd = {(0, 0): 5} + Asp |= dd + assert Asp[(0, 0)] == 5 + d |= dd + assert d.items() == Asp.items() + dd |= Asp + assert dd.items() == Asp.items() + +def test_dunder_or(d, Asp): + if isinstance(Asp, dok_array): + with pytest.raises(TypeError): + Asp | Asp + else: + assert d | d == Asp | d + assert d | d == Asp | Asp + +def test_dunder_ror(d, Asp): + if isinstance(Asp, dok_array): + with pytest.raises(TypeError): + Asp | Asp + with pytest.raises(TypeError): + d | Asp + else: + assert Asp.__ror__(d) == Asp.__ror__(Asp) + assert d.__ror__(d) == Asp.__ror__(d) + assert d | Asp + +# Note: comparison dunders, e.g. ==, >=, etc follow np.array not dict +def test_dunder_eq(A, Asp): + with np.testing.suppress_warnings() as sup: + sup.filter(sp.sparse.SparseEfficiencyWarning) + assert (Asp == Asp).toarray().all() + assert (A == Asp).all() + +def test_dunder_ne(A, Asp): + assert not (Asp != Asp).toarray().any() + assert not (A != Asp).any() + +def test_dunder_lt(A, Asp): + assert not (Asp < Asp).toarray().any() + assert not (A < Asp).any() + +def test_dunder_gt(A, Asp): + assert not (Asp > Asp).toarray().any() + assert not (A > Asp).any() + +def test_dunder_le(A, Asp): + with np.testing.suppress_warnings() as sup: + sup.filter(sp.sparse.SparseEfficiencyWarning) + assert (Asp <= Asp).toarray().all() + assert (A <= Asp).all() + +def test_dunder_ge(A, Asp): + with np.testing.suppress_warnings() as sup: + sup.filter(sp.sparse.SparseEfficiencyWarning) + assert (Asp >= Asp).toarray().all() + assert (A >= Asp).all() + +# Note: iter dunder follows np.array not dict +def test_dunder_iter(A, Asp): + if isinstance(Asp, dok_array): + with pytest.raises(NotImplementedError): + [a.toarray() for a in Asp] + else: + assert all((a == asp).all() for a, asp in zip(A, Asp)) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_extract.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_extract.py new file mode 100644 index 0000000000000000000000000000000000000000..a7c9f68bb2bde76d74ca767abba3c99b89d6e771 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_extract.py @@ -0,0 +1,51 @@ +"""test sparse matrix construction functions""" + +from numpy.testing import assert_equal +from scipy.sparse import csr_matrix, csr_array, sparray + +import numpy as np +from scipy.sparse import _extract + + +class TestExtract: + def setup_method(self): + self.cases = [ + csr_array([[1,2]]), + csr_array([[1,0]]), + csr_array([[0,0]]), + csr_array([[1],[2]]), + csr_array([[1],[0]]), + csr_array([[0],[0]]), + csr_array([[1,2],[3,4]]), + csr_array([[0,1],[0,0]]), + csr_array([[0,0],[1,0]]), + csr_array([[0,0],[0,0]]), + csr_array([[1,2,0,0,3],[4,5,0,6,7],[0,0,8,9,0]]), + csr_array([[1,2,0,0,3],[4,5,0,6,7],[0,0,8,9,0]]).T, + ] + + def test_find(self): + for A in self.cases: + I,J,V = _extract.find(A) + B = csr_array((V,(I,J)), shape=A.shape) + assert_equal(A.toarray(), B.toarray()) + + def test_tril(self): + for A in self.cases: + B = A.toarray() + for k in [-3,-2,-1,0,1,2,3]: + assert_equal(_extract.tril(A,k=k).toarray(), np.tril(B,k=k)) + + def test_triu(self): + for A in self.cases: + B = A.toarray() + for k in [-3,-2,-1,0,1,2,3]: + assert_equal(_extract.triu(A,k=k).toarray(), np.triu(B,k=k)) + + def test_array_vs_matrix(self): + for A in self.cases: + assert isinstance(_extract.tril(A), sparray) + assert isinstance(_extract.triu(A), sparray) + M = csr_matrix(A) + assert not isinstance(_extract.tril(M), sparray) + assert not isinstance(_extract.triu(M), sparray) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_matrix_io.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_matrix_io.py new file mode 100644 index 0000000000000000000000000000000000000000..90b4ea64a8928073eb5dd3f1b2752379f57327d9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_matrix_io.py @@ -0,0 +1,109 @@ +import os +import numpy as np +import tempfile + +from pytest import raises as assert_raises +from numpy.testing import assert_equal, assert_ + +from scipy.sparse import (sparray, csc_matrix, csr_matrix, bsr_matrix, dia_matrix, + coo_matrix, dok_matrix, csr_array, save_npz, load_npz) + + +DATA_DIR = os.path.join(os.path.dirname(__file__), 'data') + + +def _save_and_load(matrix): + fd, tmpfile = tempfile.mkstemp(suffix='.npz') + os.close(fd) + try: + save_npz(tmpfile, matrix) + loaded_matrix = load_npz(tmpfile) + finally: + os.remove(tmpfile) + return loaded_matrix + +def _check_save_and_load(dense_matrix): + for matrix_class in [csc_matrix, csr_matrix, bsr_matrix, dia_matrix, coo_matrix]: + matrix = matrix_class(dense_matrix) + loaded_matrix = _save_and_load(matrix) + assert_(type(loaded_matrix) is matrix_class) + assert_(loaded_matrix.shape == dense_matrix.shape) + assert_(loaded_matrix.dtype == dense_matrix.dtype) + assert_equal(loaded_matrix.toarray(), dense_matrix) + +def test_save_and_load_random(): + N = 10 + np.random.seed(0) + dense_matrix = np.random.random((N, N)) + dense_matrix[dense_matrix > 0.7] = 0 + _check_save_and_load(dense_matrix) + +def test_save_and_load_empty(): + dense_matrix = np.zeros((4,6)) + _check_save_and_load(dense_matrix) + +def test_save_and_load_one_entry(): + dense_matrix = np.zeros((4,6)) + dense_matrix[1,2] = 1 + _check_save_and_load(dense_matrix) + +def test_sparray_vs_spmatrix(): + #save/load matrix + fd, tmpfile = tempfile.mkstemp(suffix='.npz') + os.close(fd) + try: + save_npz(tmpfile, csr_matrix([[1.2, 0, 0.9], [0, 0.3, 0]])) + loaded_matrix = load_npz(tmpfile) + finally: + os.remove(tmpfile) + + #save/load array + fd, tmpfile = tempfile.mkstemp(suffix='.npz') + os.close(fd) + try: + save_npz(tmpfile, csr_array([[1.2, 0, 0.9], [0, 0.3, 0]])) + loaded_array = load_npz(tmpfile) + finally: + os.remove(tmpfile) + + assert not isinstance(loaded_matrix, sparray) + assert isinstance(loaded_array, sparray) + assert_(loaded_matrix.dtype == loaded_array.dtype) + assert_equal(loaded_matrix.toarray(), loaded_array.toarray()) + +def test_malicious_load(): + class Executor: + def __reduce__(self): + return (assert_, (False, 'unexpected code execution')) + + fd, tmpfile = tempfile.mkstemp(suffix='.npz') + os.close(fd) + try: + np.savez(tmpfile, format=Executor()) + + # Should raise a ValueError, not execute code + assert_raises(ValueError, load_npz, tmpfile) + finally: + os.remove(tmpfile) + + +def test_py23_compatibility(): + # Try loading files saved on Python 2 and Python 3. They are not + # the same, since files saved with SciPy versions < 1.0.0 may + # contain unicode. + + a = load_npz(os.path.join(DATA_DIR, 'csc_py2.npz')) + b = load_npz(os.path.join(DATA_DIR, 'csc_py3.npz')) + c = csc_matrix([[0]]) + + assert_equal(a.toarray(), c.toarray()) + assert_equal(b.toarray(), c.toarray()) + +def test_implemented_error(): + # Attempts to save an unsupported type and checks that an + # NotImplementedError is raised. + + x = dok_matrix((2,3)) + x[0,1] = 1 + + assert_raises(NotImplementedError, save_npz, 'x.npz', x) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_minmax1d.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_minmax1d.py new file mode 100644 index 0000000000000000000000000000000000000000..53e9619314920195ba7083a0c499c3d8ebc32c37 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_minmax1d.py @@ -0,0 +1,82 @@ +"""Test of min-max 1D features of sparse array classes""" + +import pytest + +import numpy as np + +from numpy.testing import assert_equal, assert_array_equal + +from scipy.sparse import coo_array +from scipy.sparse._sputils import isscalarlike + + +def toarray(a): + if isinstance(a, np.ndarray) or isscalarlike(a): + return a + return a.toarray() + + +formats_for_minmax = [coo_array] + + +@pytest.mark.parametrize("spcreator", formats_for_minmax) +class Test_MinMaxMixin1D: + def test_minmax(self, spcreator): + D = np.arange(5) + X = spcreator(D) + + assert_equal(X.min(), 0) + assert_equal(X.max(), 4) + assert_equal((-X).min(), -4) + assert_equal((-X).max(), 0) + + + def test_minmax_axis(self, spcreator): + D = np.arange(50) + X = spcreator(D) + + for axis in [0, -1]: + assert_array_equal( + toarray(X.max(axis=axis)), D.max(axis=axis, keepdims=True) + ) + assert_array_equal( + toarray(X.min(axis=axis)), D.min(axis=axis, keepdims=True) + ) + for axis in [-2, 1]: + with pytest.raises(ValueError, match="axis out of range"): + X.min(axis=axis) + with pytest.raises(ValueError, match="axis out of range"): + X.max(axis=axis) + + + def test_numpy_minmax(self, spcreator): + dat = np.array([0, 1, 2]) + datsp = spcreator(dat) + assert_array_equal(np.min(datsp), np.min(dat)) + assert_array_equal(np.max(datsp), np.max(dat)) + + + def test_argmax(self, spcreator): + D1 = np.array([-1, 5, 2, 3]) + D2 = np.array([0, 0, -1, -2]) + D3 = np.array([-1, -2, -3, -4]) + D4 = np.array([1, 2, 3, 4]) + D5 = np.array([1, 2, 0, 0]) + + for D in [D1, D2, D3, D4, D5]: + mat = spcreator(D) + + assert_equal(mat.argmax(), np.argmax(D)) + assert_equal(mat.argmin(), np.argmin(D)) + + assert_equal(mat.argmax(axis=0), np.argmax(D, axis=0)) + assert_equal(mat.argmin(axis=0), np.argmin(D, axis=0)) + + D6 = np.empty((0,)) + + for axis in [None, 0]: + mat = spcreator(D6) + with pytest.raises(ValueError, match="to an empty matrix"): + mat.argmin(axis=axis) + with pytest.raises(ValueError, match="to an empty matrix"): + mat.argmax(axis=axis) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_sparsetools.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_sparsetools.py new file mode 100644 index 0000000000000000000000000000000000000000..6a8b94796116a22c210104fc446c5a17045ed21c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_sparsetools.py @@ -0,0 +1,339 @@ +import sys +import os +import gc +import threading + +import numpy as np +from numpy.testing import assert_equal, assert_, assert_allclose +from scipy.sparse import (_sparsetools, coo_matrix, csr_matrix, csc_matrix, + bsr_matrix, dia_matrix) +from scipy.sparse._sputils import supported_dtypes +from scipy._lib._testutils import check_free_memory + +import pytest +from pytest import raises as assert_raises + + +def int_to_int8(n): + """ + Wrap an integer to the interval [-128, 127]. + """ + return (n + 128) % 256 - 128 + + +def test_exception(): + assert_raises(MemoryError, _sparsetools.test_throw_error) + + +def test_threads(): + # Smoke test for parallel threaded execution; doesn't actually + # check that code runs in parallel, but just that it produces + # expected results. + nthreads = 10 + niter = 100 + + n = 20 + a = csr_matrix(np.ones([n, n])) + bres = [] + + class Worker(threading.Thread): + def run(self): + b = a.copy() + for j in range(niter): + _sparsetools.csr_plus_csr(n, n, + a.indptr, a.indices, a.data, + a.indptr, a.indices, a.data, + b.indptr, b.indices, b.data) + bres.append(b) + + threads = [Worker() for _ in range(nthreads)] + for thread in threads: + thread.start() + for thread in threads: + thread.join() + + for b in bres: + assert_(np.all(b.toarray() == 2)) + + +def test_regression_std_vector_dtypes(): + # Regression test for gh-3780, checking the std::vector typemaps + # in sparsetools.cxx are complete. + for dtype in supported_dtypes: + ad = np.array([[1, 2], [3, 4]]).astype(dtype) + a = csr_matrix(ad, dtype=dtype) + + # getcol is one function using std::vector typemaps, and should not fail + assert_equal(a.getcol(0).toarray(), ad[:, :1]) + + +@pytest.mark.slow +@pytest.mark.xfail_on_32bit("Can't create large array for test") +def test_nnz_overflow(): + # Regression test for gh-7230 / gh-7871, checking that coo_toarray + # with nnz > int32max doesn't overflow. + nnz = np.iinfo(np.int32).max + 1 + # Ensure ~20 GB of RAM is free to run this test. + check_free_memory((4 + 4 + 1) * nnz / 1e6 + 0.5) + + # Use nnz duplicate entries to keep the dense version small. + row = np.zeros(nnz, dtype=np.int32) + col = np.zeros(nnz, dtype=np.int32) + data = np.zeros(nnz, dtype=np.int8) + data[-1] = 4 + s = coo_matrix((data, (row, col)), shape=(1, 1), copy=False) + # Sums nnz duplicates to produce a 1x1 array containing 4. + d = s.toarray() + + assert_allclose(d, [[4]]) + + +@pytest.mark.skipif( + not (sys.platform.startswith('linux') and np.dtype(np.intp).itemsize >= 8), + reason="test requires 64-bit Linux" +) +class TestInt32Overflow: + """ + Some of the sparsetools routines use dense 2D matrices whose + total size is not bounded by the nnz of the sparse matrix. These + routines used to suffer from int32 wraparounds; here, we try to + check that the wraparounds don't occur any more. + """ + # choose n large enough + n = 50000 + + def setup_method(self): + assert self.n**2 > np.iinfo(np.int32).max + + # check there's enough memory even if everything is run at the + # same time + try: + parallel_count = int(os.environ.get('PYTEST_XDIST_WORKER_COUNT', '1')) + except ValueError: + parallel_count = np.inf + + check_free_memory(3000 * parallel_count) + + def teardown_method(self): + gc.collect() + + def test_coo_todense(self): + # Check *_todense routines (cf. gh-2179) + # + # All of them in the end call coo_matrix.todense + + n = self.n + + i = np.array([0, n-1]) + j = np.array([0, n-1]) + data = np.array([1, 2], dtype=np.int8) + m = coo_matrix((data, (i, j))) + + r = m.todense() + assert_equal(r[0,0], 1) + assert_equal(r[-1,-1], 2) + del r + gc.collect() + + @pytest.mark.slow + def test_matvecs(self): + # Check *_matvecs routines + n = self.n + + i = np.array([0, n-1]) + j = np.array([0, n-1]) + data = np.array([1, 2], dtype=np.int8) + m = coo_matrix((data, (i, j))) + + b = np.ones((n, n), dtype=np.int8) + for sptype in (csr_matrix, csc_matrix, bsr_matrix): + m2 = sptype(m) + r = m2.dot(b) + assert_equal(r[0,0], 1) + assert_equal(r[-1,-1], 2) + del r + gc.collect() + + del b + gc.collect() + + @pytest.mark.slow + def test_dia_matvec(self): + # Check: huge dia_matrix _matvec + n = self.n + data = np.ones((n, n), dtype=np.int8) + offsets = np.arange(n) + m = dia_matrix((data, offsets), shape=(n, n)) + v = np.ones(m.shape[1], dtype=np.int8) + r = m.dot(v) + assert_equal(r[0], int_to_int8(n)) + del data, offsets, m, v, r + gc.collect() + + _bsr_ops = [pytest.param("matmat", marks=pytest.mark.xslow), + pytest.param("matvecs", marks=pytest.mark.xslow), + "matvec", + "diagonal", + "sort_indices", + pytest.param("transpose", marks=pytest.mark.xslow)] + + @pytest.mark.slow + @pytest.mark.parametrize("op", _bsr_ops) + def test_bsr_1_block(self, op): + # Check: huge bsr_matrix (1-block) + # + # The point here is that indices inside a block may overflow. + + def get_matrix(): + n = self.n + data = np.ones((1, n, n), dtype=np.int8) + indptr = np.array([0, 1], dtype=np.int32) + indices = np.array([0], dtype=np.int32) + m = bsr_matrix((data, indices, indptr), blocksize=(n, n), copy=False) + del data, indptr, indices + return m + + gc.collect() + try: + getattr(self, "_check_bsr_" + op)(get_matrix) + finally: + gc.collect() + + @pytest.mark.slow + @pytest.mark.parametrize("op", _bsr_ops) + def test_bsr_n_block(self, op): + # Check: huge bsr_matrix (n-block) + # + # The point here is that while indices within a block don't + # overflow, accumulators across many block may. + + def get_matrix(): + n = self.n + data = np.ones((n, n, 1), dtype=np.int8) + indptr = np.array([0, n], dtype=np.int32) + indices = np.arange(n, dtype=np.int32) + m = bsr_matrix((data, indices, indptr), blocksize=(n, 1), copy=False) + del data, indptr, indices + return m + + gc.collect() + try: + getattr(self, "_check_bsr_" + op)(get_matrix) + finally: + gc.collect() + + def _check_bsr_matvecs(self, m): # skip name check + m = m() + n = self.n + + # _matvecs + r = m.dot(np.ones((n, 2), dtype=np.int8)) + assert_equal(r[0, 0], int_to_int8(n)) + + def _check_bsr_matvec(self, m): # skip name check + m = m() + n = self.n + + # _matvec + r = m.dot(np.ones((n,), dtype=np.int8)) + assert_equal(r[0], int_to_int8(n)) + + def _check_bsr_diagonal(self, m): # skip name check + m = m() + n = self.n + + # _diagonal + r = m.diagonal() + assert_equal(r, np.ones(n)) + + def _check_bsr_sort_indices(self, m): # skip name check + # _sort_indices + m = m() + m.sort_indices() + + def _check_bsr_transpose(self, m): # skip name check + # _transpose + m = m() + m.transpose() + + def _check_bsr_matmat(self, m): # skip name check + m = m() + n = self.n + + # _bsr_matmat + m2 = bsr_matrix(np.ones((n, 2), dtype=np.int8), blocksize=(m.blocksize[1], 2)) + m.dot(m2) # shouldn't SIGSEGV + del m2 + + # _bsr_matmat + m2 = bsr_matrix(np.ones((2, n), dtype=np.int8), blocksize=(2, m.blocksize[0])) + m2.dot(m) # shouldn't SIGSEGV + + +@pytest.mark.skip(reason="64-bit indices in sparse matrices not available") +def test_csr_matmat_int64_overflow(): + n = 3037000500 + assert n**2 > np.iinfo(np.int64).max + + # the test would take crazy amounts of memory + check_free_memory(n * (8*2 + 1) * 3 / 1e6) + + # int64 overflow + data = np.ones((n,), dtype=np.int8) + indptr = np.arange(n+1, dtype=np.int64) + indices = np.zeros(n, dtype=np.int64) + a = csr_matrix((data, indices, indptr)) + b = a.T + + assert_raises(RuntimeError, a.dot, b) + + +def test_upcast(): + a0 = csr_matrix([[np.pi, np.pi*1j], [3, 4]], dtype=complex) + b0 = np.array([256+1j, 2**32], dtype=complex) + + for a_dtype in supported_dtypes: + for b_dtype in supported_dtypes: + msg = f"({a_dtype!r}, {b_dtype!r})" + + if np.issubdtype(a_dtype, np.complexfloating): + a = a0.copy().astype(a_dtype) + else: + a = a0.real.copy().astype(a_dtype) + + if np.issubdtype(b_dtype, np.complexfloating): + b = b0.copy().astype(b_dtype) + else: + with np.errstate(invalid="ignore"): + # Casting a large value (2**32) to int8 causes a warning in + # numpy >1.23 + b = b0.real.copy().astype(b_dtype) + + if not (a_dtype == np.bool_ and b_dtype == np.bool_): + c = np.zeros((2,), dtype=np.bool_) + assert_raises(ValueError, _sparsetools.csr_matvec, + 2, 2, a.indptr, a.indices, a.data, b, c) + + if ((np.issubdtype(a_dtype, np.complexfloating) and + not np.issubdtype(b_dtype, np.complexfloating)) or + (not np.issubdtype(a_dtype, np.complexfloating) and + np.issubdtype(b_dtype, np.complexfloating))): + c = np.zeros((2,), dtype=np.float64) + assert_raises(ValueError, _sparsetools.csr_matvec, + 2, 2, a.indptr, a.indices, a.data, b, c) + + c = np.zeros((2,), dtype=np.result_type(a_dtype, b_dtype)) + _sparsetools.csr_matvec(2, 2, a.indptr, a.indices, a.data, b, c) + assert_allclose(c, np.dot(a.toarray(), b), err_msg=msg) + + +def test_endianness(): + d = np.ones((3,4)) + offsets = [-1,0,1] + + a = dia_matrix((d.astype('f8'), offsets), (4, 4)) + v = np.arange(4) + + assert_allclose(a.dot(v), [1, 3, 6, 5]) + assert_allclose(b.dot(v), [1, 3, 6, 5]) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_spfuncs.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_spfuncs.py new file mode 100644 index 0000000000000000000000000000000000000000..75bc2d92c369be5799a904bc0938617f30321f12 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_spfuncs.py @@ -0,0 +1,97 @@ +from numpy import array, kron, diag +from numpy.testing import assert_, assert_equal + +from scipy.sparse import _spfuncs as spfuncs +from scipy.sparse import csr_matrix, csc_matrix, bsr_matrix +from scipy.sparse._sparsetools import (csr_scale_rows, csr_scale_columns, + bsr_scale_rows, bsr_scale_columns) + + +class TestSparseFunctions: + def test_scale_rows_and_cols(self): + D = array([[1, 0, 0, 2, 3], + [0, 4, 0, 5, 0], + [0, 0, 6, 7, 0]]) + + #TODO expose through function + S = csr_matrix(D) + v = array([1,2,3]) + csr_scale_rows(3,5,S.indptr,S.indices,S.data,v) + assert_equal(S.toarray(), diag(v)@D) + + S = csr_matrix(D) + v = array([1,2,3,4,5]) + csr_scale_columns(3,5,S.indptr,S.indices,S.data,v) + assert_equal(S.toarray(), D@diag(v)) + + # blocks + E = kron(D,[[1,2],[3,4]]) + S = bsr_matrix(E,blocksize=(2,2)) + v = array([1,2,3,4,5,6]) + bsr_scale_rows(3,5,2,2,S.indptr,S.indices,S.data,v) + assert_equal(S.toarray(), diag(v)@E) + + S = bsr_matrix(E,blocksize=(2,2)) + v = array([1,2,3,4,5,6,7,8,9,10]) + bsr_scale_columns(3,5,2,2,S.indptr,S.indices,S.data,v) + assert_equal(S.toarray(), E@diag(v)) + + E = kron(D,[[1,2,3],[4,5,6]]) + S = bsr_matrix(E,blocksize=(2,3)) + v = array([1,2,3,4,5,6]) + bsr_scale_rows(3,5,2,3,S.indptr,S.indices,S.data,v) + assert_equal(S.toarray(), diag(v)@E) + + S = bsr_matrix(E,blocksize=(2,3)) + v = array([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]) + bsr_scale_columns(3,5,2,3,S.indptr,S.indices,S.data,v) + assert_equal(S.toarray(), E@diag(v)) + + def test_estimate_blocksize(self): + mats = [] + mats.append([[0,1],[1,0]]) + mats.append([[1,1,0],[0,0,1],[1,0,1]]) + mats.append([[0],[0],[1]]) + mats = [array(x) for x in mats] + + blks = [] + blks.append([[1]]) + blks.append([[1,1],[1,1]]) + blks.append([[1,1],[0,1]]) + blks.append([[1,1,0],[1,0,1],[1,1,1]]) + blks = [array(x) for x in blks] + + for A in mats: + for B in blks: + X = kron(A,B) + r,c = spfuncs.estimate_blocksize(X) + assert_(r >= B.shape[0]) + assert_(c >= B.shape[1]) + + def test_count_blocks(self): + def gold(A,bs): + R,C = bs + I,J = A.nonzero() + return len(set(zip(I//R,J//C))) + + mats = [] + mats.append([[0]]) + mats.append([[1]]) + mats.append([[1,0]]) + mats.append([[1,1]]) + mats.append([[0,1],[1,0]]) + mats.append([[1,1,0],[0,0,1],[1,0,1]]) + mats.append([[0],[0],[1]]) + + for A in mats: + for B in mats: + X = kron(A,B) + Y = csr_matrix(X) + for R in range(1,6): + for C in range(1,6): + assert_equal(spfuncs.count_blocks(Y, (R, C)), gold(X, (R, C))) + + X = kron([[1,1,0],[0,0,1],[1,0,1]],[[1,1]]) + Y = csc_matrix(X) + assert_equal(spfuncs.count_blocks(X, (1, 2)), gold(X, (1, 2))) + assert_equal(spfuncs.count_blocks(Y, (1, 2)), gold(X, (1, 2))) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_sputils.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_sputils.py new file mode 100644 index 0000000000000000000000000000000000000000..4545b49bea2cce465ae039ea7fc0f5a48b3da140 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/test_sputils.py @@ -0,0 +1,196 @@ +"""unit tests for sparse utility functions""" + +import numpy as np +from numpy.testing import assert_equal +from pytest import raises as assert_raises +from scipy.sparse import _sputils as sputils +from scipy.sparse._sputils import matrix + + +class TestSparseUtils: + + def test_upcast(self): + assert_equal(sputils.upcast('intc'), np.intc) + assert_equal(sputils.upcast('int32', 'float32'), np.float64) + assert_equal(sputils.upcast('bool', complex, float), np.complex128) + assert_equal(sputils.upcast('i', 'd'), np.float64) + + def test_getdtype(self): + A = np.array([1], dtype='int8') + + assert_equal(sputils.getdtype(None, default=float), float) + assert_equal(sputils.getdtype(None, a=A), np.int8) + + with assert_raises( + ValueError, + match="object dtype is not supported by sparse matrices", + ): + sputils.getdtype("O") + + def test_isscalarlike(self): + assert_equal(sputils.isscalarlike(3.0), True) + assert_equal(sputils.isscalarlike(-4), True) + assert_equal(sputils.isscalarlike(2.5), True) + assert_equal(sputils.isscalarlike(1 + 3j), True) + assert_equal(sputils.isscalarlike(np.array(3)), True) + assert_equal(sputils.isscalarlike("16"), True) + + assert_equal(sputils.isscalarlike(np.array([3])), False) + assert_equal(sputils.isscalarlike([[3]]), False) + assert_equal(sputils.isscalarlike((1,)), False) + assert_equal(sputils.isscalarlike((1, 2)), False) + + def test_isintlike(self): + assert_equal(sputils.isintlike(-4), True) + assert_equal(sputils.isintlike(np.array(3)), True) + assert_equal(sputils.isintlike(np.array([3])), False) + with assert_raises( + ValueError, + match="Inexact indices into sparse matrices are not allowed" + ): + sputils.isintlike(3.0) + + assert_equal(sputils.isintlike(2.5), False) + assert_equal(sputils.isintlike(1 + 3j), False) + assert_equal(sputils.isintlike((1,)), False) + assert_equal(sputils.isintlike((1, 2)), False) + + def test_isshape(self): + assert_equal(sputils.isshape((1, 2)), True) + assert_equal(sputils.isshape((5, 2)), True) + + assert_equal(sputils.isshape((1.5, 2)), False) + assert_equal(sputils.isshape((2, 2, 2)), False) + assert_equal(sputils.isshape(([2], 2)), False) + assert_equal(sputils.isshape((-1, 2), nonneg=False),True) + assert_equal(sputils.isshape((2, -1), nonneg=False),True) + assert_equal(sputils.isshape((-1, 2), nonneg=True),False) + assert_equal(sputils.isshape((2, -1), nonneg=True),False) + + assert_equal(sputils.isshape((1.5, 2), allow_1d=True), False) + assert_equal(sputils.isshape(([2], 2), allow_1d=True), False) + assert_equal(sputils.isshape((2, 2, -2), nonneg=True, allow_1d=True), + False) + assert_equal(sputils.isshape((2,), allow_1d=True), True) + assert_equal(sputils.isshape((2, 2,), allow_1d=True), True) + assert_equal(sputils.isshape((2, 2, 2), allow_1d=True), False) + + def test_issequence(self): + assert_equal(sputils.issequence((1,)), True) + assert_equal(sputils.issequence((1, 2, 3)), True) + assert_equal(sputils.issequence([1]), True) + assert_equal(sputils.issequence([1, 2, 3]), True) + assert_equal(sputils.issequence(np.array([1, 2, 3])), True) + + assert_equal(sputils.issequence(np.array([[1], [2], [3]])), False) + assert_equal(sputils.issequence(3), False) + + def test_ismatrix(self): + assert_equal(sputils.ismatrix(((),)), True) + assert_equal(sputils.ismatrix([[1], [2]]), True) + assert_equal(sputils.ismatrix(np.arange(3)[None]), True) + + assert_equal(sputils.ismatrix([1, 2]), False) + assert_equal(sputils.ismatrix(np.arange(3)), False) + assert_equal(sputils.ismatrix([[[1]]]), False) + assert_equal(sputils.ismatrix(3), False) + + def test_isdense(self): + assert_equal(sputils.isdense(np.array([1])), True) + assert_equal(sputils.isdense(matrix([1])), True) + + def test_validateaxis(self): + assert_raises(TypeError, sputils.validateaxis, (0, 1)) + assert_raises(TypeError, sputils.validateaxis, 1.5) + assert_raises(ValueError, sputils.validateaxis, 3) + + # These function calls should not raise errors + for axis in (-2, -1, 0, 1, None): + sputils.validateaxis(axis) + + def test_get_index_dtype(self): + imax = np.int64(np.iinfo(np.int32).max) + too_big = imax + 1 + + # Check that uint32's with no values too large doesn't return + # int64 + a1 = np.ones(90, dtype='uint32') + a2 = np.ones(90, dtype='uint32') + assert_equal( + np.dtype(sputils.get_index_dtype((a1, a2), check_contents=True)), + np.dtype('int32') + ) + + # Check that if we can not convert but all values are less than or + # equal to max that we can just convert to int32 + a1[-1] = imax + assert_equal( + np.dtype(sputils.get_index_dtype((a1, a2), check_contents=True)), + np.dtype('int32') + ) + + # Check that if it can not convert directly and the contents are + # too large that we return int64 + a1[-1] = too_big + assert_equal( + np.dtype(sputils.get_index_dtype((a1, a2), check_contents=True)), + np.dtype('int64') + ) + + # test that if can not convert and didn't specify to check_contents + # we return int64 + a1 = np.ones(89, dtype='uint32') + a2 = np.ones(89, dtype='uint32') + assert_equal( + np.dtype(sputils.get_index_dtype((a1, a2))), + np.dtype('int64') + ) + + # Check that even if we have arrays that can be converted directly + # that if we specify a maxval directly it takes precedence + a1 = np.ones(12, dtype='uint32') + a2 = np.ones(12, dtype='uint32') + assert_equal( + np.dtype(sputils.get_index_dtype( + (a1, a2), maxval=too_big, check_contents=True + )), + np.dtype('int64') + ) + + # Check that an array with a too max size and maxval set + # still returns int64 + a1[-1] = too_big + assert_equal( + np.dtype(sputils.get_index_dtype((a1, a2), maxval=too_big)), + np.dtype('int64') + ) + + def test_check_shape_overflow(self): + new_shape = sputils.check_shape([(10, -1)], (65535, 131070)) + assert_equal(new_shape, (10, 858967245)) + + def test_matrix(self): + a = [[1, 2, 3]] + b = np.array(a) + + assert isinstance(sputils.matrix(a), np.matrix) + assert isinstance(sputils.matrix(b), np.matrix) + + c = sputils.matrix(b) + c[:, :] = 123 + assert_equal(b, a) + + c = sputils.matrix(b, copy=False) + c[:, :] = 123 + assert_equal(b, [[123, 123, 123]]) + + def test_asmatrix(self): + a = [[1, 2, 3]] + b = np.array(a) + + assert isinstance(sputils.asmatrix(a), np.matrix) + assert isinstance(sputils.asmatrix(b), np.matrix) + + c = sputils.asmatrix(b) + c[:, :] = 123 + assert_equal(b, [[123, 123, 123]]) diff --git a/llmeval-env/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so b/llmeval-env/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so new file mode 100644 index 0000000000000000000000000000000000000000..91cb5013e4bc0a14459ffc9485cd4f9b68ac050b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7632e06d78ea6d2e94cc85abd2132718558878974857efa75d081cd98457c289 +size 881666489