diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd308f0c6c821f44d504a4db484d201218c7b203 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_measurements.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_measurements.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f629d6699d58947d159066e3db04c3cd03fc1c52 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_measurements.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_morphology.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_morphology.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1737f8fd5bdb1a128f969bb9a93a830faaf2b8f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_morphology.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_ni_docstrings.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_ni_docstrings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e6717e0272140116a843f0e3d32142905a9d678 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_ni_docstrings.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_ni_support.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_ni_support.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc4806910f560f9239262ffae7037e62687f3faa Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_ni_support.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/filters.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/filters.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73ee708555270909bb50e1f0b4a285326a59da37 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/filters.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/fourier.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/fourier.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7cba319e4e0126446585c33cd4e8fafb932d51af Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/fourier.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/interpolation.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/interpolation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26d73cecff998381abe531771f505831d219af8a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/interpolation.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/measurements.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/measurements.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f26d5d21a6db26779c4385ab32d90bc6b12aa81 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/measurements.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/morphology.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/morphology.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2554ee904a7c74d903d343b68cff8dde785d1518 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/morphology.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__init__.py b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ec3dd05726c906ec0d887b72e1de8dd28de00299 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__init__.py @@ -0,0 +1,324 @@ +""" +===================================== +Sparse matrices (:mod:`scipy.sparse`) +===================================== + +.. currentmodule:: scipy.sparse + +.. toctree:: + :hidden: + + sparse.csgraph + sparse.linalg + +SciPy 2-D sparse array package for numeric data. + +.. note:: + + This package is switching to an array interface, compatible with + NumPy arrays, from the older matrix interface. We recommend that + you use the array objects (`bsr_array`, `coo_array`, etc.) for + all new work. + + When using the array interface, please note that: + + - ``x * y`` no longer performs matrix multiplication, but + element-wise multiplication (just like with NumPy arrays). To + make code work with both arrays and matrices, use ``x @ y`` for + matrix multiplication. + - Operations such as `sum`, that used to produce dense matrices, now + produce arrays, whose multiplication behavior differs similarly. + - Sparse arrays currently must be two-dimensional. This also means + that all *slicing* operations on these objects must produce + two-dimensional results, or they will result in an error. This + will be addressed in a future version. + + The construction utilities (`eye`, `kron`, `random`, `diags`, etc.) + have not yet been ported, but their results can be wrapped into arrays:: + + A = csr_array(eye(3)) + +Contents +======== + +Sparse array classes +-------------------- + +.. autosummary:: + :toctree: generated/ + + bsr_array - Block Sparse Row array + coo_array - A sparse array in COOrdinate format + csc_array - Compressed Sparse Column array + csr_array - Compressed Sparse Row array + dia_array - Sparse array with DIAgonal storage + dok_array - Dictionary Of Keys based sparse array + lil_array - Row-based list of lists sparse array + sparray - Sparse array base class + +Sparse matrix classes +--------------------- + +.. autosummary:: + :toctree: generated/ + + bsr_matrix - Block Sparse Row matrix + coo_matrix - A sparse matrix in COOrdinate format + csc_matrix - Compressed Sparse Column matrix + csr_matrix - Compressed Sparse Row matrix + dia_matrix - Sparse matrix with DIAgonal storage + dok_matrix - Dictionary Of Keys based sparse matrix + lil_matrix - Row-based list of lists sparse matrix + spmatrix - Sparse matrix base class + +Functions +--------- + +Building sparse arrays: + +.. autosummary:: + :toctree: generated/ + + diags_array - Return a sparse array from diagonals + eye_array - Sparse MxN array whose k-th diagonal is all ones + random_array - Random values in a given shape array + block_array - Build a sparse array from sub-blocks + +Building sparse matrices: + +.. autosummary:: + :toctree: generated/ + + eye - Sparse MxN matrix whose k-th diagonal is all ones + identity - Identity matrix in sparse matrix format + diags - Return a sparse matrix from diagonals + spdiags - Return a sparse matrix from diagonals + bmat - Build a sparse matrix from sparse sub-blocks + random - Random values in a given shape matrix + rand - Random values in a given shape matrix (old interface) + +Building larger structures from smaller (array or matrix) + +.. autosummary:: + :toctree: generated/ + + kron - kronecker product of two sparse matrices + kronsum - kronecker sum of sparse matrices + block_diag - Build a block diagonal sparse matrix + tril - Lower triangular portion of a matrix in sparse format + triu - Upper triangular portion of a matrix in sparse format + hstack - Stack sparse matrices horizontally (column wise) + vstack - Stack sparse matrices vertically (row wise) + +Save and load sparse matrices: + +.. autosummary:: + :toctree: generated/ + + save_npz - Save a sparse matrix/array to a file using ``.npz`` format. + load_npz - Load a sparse matrix/array from a file using ``.npz`` format. + +Sparse tools: + +.. autosummary:: + :toctree: generated/ + + find + +Identifying sparse arrays: + +- use `isinstance(A, sp.sparse.sparray)` to check whether an array or matrix. +- use `A.format == 'csr'` to check the sparse format + +Identifying sparse matrices: + +.. autosummary:: + :toctree: generated/ + + issparse + isspmatrix + isspmatrix_csc + isspmatrix_csr + isspmatrix_bsr + isspmatrix_lil + isspmatrix_dok + isspmatrix_coo + isspmatrix_dia + +Submodules +---------- + +.. autosummary:: + + csgraph - Compressed sparse graph routines + linalg - sparse linear algebra routines + +Exceptions +---------- + +.. autosummary:: + :toctree: generated/ + + SparseEfficiencyWarning + SparseWarning + + +Usage information +================= + +There are seven available sparse array types: + + 1. `csc_array`: Compressed Sparse Column format + 2. `csr_array`: Compressed Sparse Row format + 3. `bsr_array`: Block Sparse Row format + 4. `lil_array`: List of Lists format + 5. `dok_array`: Dictionary of Keys format + 6. `coo_array`: COOrdinate format (aka IJV, triplet format) + 7. `dia_array`: DIAgonal format + +To construct an array efficiently, use either `dok_array` or `lil_array`. +The `lil_array` class supports basic slicing and fancy indexing with a +similar syntax to NumPy arrays. As illustrated below, the COO format +may also be used to efficiently construct arrays. Despite their +similarity to NumPy arrays, it is **strongly discouraged** to use NumPy +functions directly on these arrays because NumPy may not properly convert +them for computations, leading to unexpected (and incorrect) results. If you +do want to apply a NumPy function to these arrays, first check if SciPy has +its own implementation for the given sparse array class, or **convert the +sparse array to a NumPy array** (e.g., using the ``toarray`` method of the +class) first before applying the method. + +To perform manipulations such as multiplication or inversion, first +convert the array to either CSC or CSR format. The `lil_array` format is +row-based, so conversion to CSR is efficient, whereas conversion to CSC +is less so. + +All conversions among the CSR, CSC, and COO formats are efficient, +linear-time operations. + +Matrix vector product +--------------------- +To do a vector product between a sparse array and a vector simply use +the array ``dot`` method, as described in its docstring: + +>>> import numpy as np +>>> from scipy.sparse import csr_array +>>> A = csr_array([[1, 2, 0], [0, 0, 3], [4, 0, 5]]) +>>> v = np.array([1, 0, -1]) +>>> A.dot(v) +array([ 1, -3, -1], dtype=int64) + +.. warning:: As of NumPy 1.7, ``np.dot`` is not aware of sparse arrays, + therefore using it will result on unexpected results or errors. + The corresponding dense array should be obtained first instead: + + >>> np.dot(A.toarray(), v) + array([ 1, -3, -1], dtype=int64) + + but then all the performance advantages would be lost. + +The CSR format is especially suitable for fast matrix vector products. + +Example 1 +--------- +Construct a 1000x1000 `lil_array` and add some values to it: + +>>> from scipy.sparse import lil_array +>>> from scipy.sparse.linalg import spsolve +>>> from numpy.linalg import solve, norm +>>> from numpy.random import rand + +>>> A = lil_array((1000, 1000)) +>>> A[0, :100] = rand(100) +>>> A[1, 100:200] = A[0, :100] +>>> A.setdiag(rand(1000)) + +Now convert it to CSR format and solve A x = b for x: + +>>> A = A.tocsr() +>>> b = rand(1000) +>>> x = spsolve(A, b) + +Convert it to a dense array and solve, and check that the result +is the same: + +>>> x_ = solve(A.toarray(), b) + +Now we can compute norm of the error with: + +>>> err = norm(x-x_) +>>> err < 1e-10 +True + +It should be small :) + + +Example 2 +--------- + +Construct an array in COO format: + +>>> from scipy import sparse +>>> from numpy import array +>>> I = array([0,3,1,0]) +>>> J = array([0,3,1,2]) +>>> V = array([4,5,7,9]) +>>> A = sparse.coo_array((V,(I,J)),shape=(4,4)) + +Notice that the indices do not need to be sorted. + +Duplicate (i,j) entries are summed when converting to CSR or CSC. + +>>> I = array([0,0,1,3,1,0,0]) +>>> J = array([0,2,1,3,1,0,0]) +>>> V = array([1,1,1,1,1,1,1]) +>>> B = sparse.coo_array((V,(I,J)),shape=(4,4)).tocsr() + +This is useful for constructing finite-element stiffness and mass matrices. + +Further details +--------------- + +CSR column indices are not necessarily sorted. Likewise for CSC row +indices. Use the ``.sorted_indices()`` and ``.sort_indices()`` methods when +sorted indices are required (e.g., when passing data to other libraries). + +""" + +# Original code by Travis Oliphant. +# Modified and extended by Ed Schofield, Robert Cimrman, +# Nathan Bell, and Jake Vanderplas. + +import warnings as _warnings + +from ._base import * +from ._csr import * +from ._csc import * +from ._lil import * +from ._dok import * +from ._coo import * +from ._dia import * +from ._bsr import * +from ._construct import * +from ._extract import * +from ._matrix import spmatrix +from ._matrix_io import * + +# For backward compatibility with v0.19. +from . import csgraph + +# Deprecated namespaces, to be removed in v2.0.0 +from . import ( + base, bsr, compressed, construct, coo, csc, csr, data, dia, dok, extract, + lil, sparsetools, sputils +) + +__all__ = [s for s in dir() if not s.startswith('_')] + +# Filter PendingDeprecationWarning for np.matrix introduced with numpy 1.15 +msg = 'the matrix subclass is not the recommended way' +_warnings.filterwarnings('ignore', message=msg) + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed083429f9af20cb9c423743f0734a502ffd00ef Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_base.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93fd6a5fe30c2caf932434f0f3f5b13216d30678 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_base.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_bsr.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_bsr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23536f95bc454ad8f3215c45ef7e2e5c4eab9f9c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_bsr.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_compressed.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_compressed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..716a64ee85886ee902a38d86eb92252dbb928a25 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_compressed.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_construct.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_construct.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..771b0bf1ed6b7c1e77ad88f8d2b929afe2428f5b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_construct.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_coo.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_coo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..957c9843618da9591c53fb3945966ba02250fe50 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_coo.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_csc.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_csc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab4951ef7c5a99ddb4336efe8578be2f6e6bb1f0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_csc.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_csr.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_csr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31b15cba55db4c818878b31c9f050cdf83516261 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_csr.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_data.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_data.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c2f3ee073a46b15e09fda4600f7fc7e3853ff3ce Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_data.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_dia.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_dia.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..213b9b47001400e0c849ea670df3877a63105d24 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_dia.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_dok.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_dok.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc043391ff77a1c246008af221396fd1f799c448 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_dok.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_extract.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_extract.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f8783fb1eea0d2ab2a629fd76d49221c121a7405 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_extract.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_index.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_index.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4702077c46b0db6e871fd32831135606437f8eca Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_index.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_lil.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_lil.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dceb7b96cf7a5a5985342c5cb7d6758da4e809bc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_lil.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_matrix.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_matrix.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6af2f293529e34d5df979556650fb60d5884bbd2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_matrix.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_matrix_io.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_matrix_io.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..886c2b7bb6f7efb6610babecf9a900142bf97279 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_matrix_io.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_spfuncs.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_spfuncs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a782c7696a143e469f1a04dd34d65370129f63ad Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_spfuncs.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_sputils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_sputils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3437f19bed9bab558f4168afe33178d750193c0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_sputils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/base.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf16061f55e1279fa348d65b626c002ac4663822 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/base.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/bsr.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/bsr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..11e27e40b7450a1ae7da4c8eb41fc677fbdfee6a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/bsr.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/compressed.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/compressed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bdfc2d9c4698dd57a94377e716d4748ebfdbfabc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/compressed.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/construct.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/construct.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd3685b9fef94d6d19ec882db12dca7c382f197b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/construct.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/coo.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/coo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..851c859446caae8eeb2eefdf30d0c14cdf1eb733 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/coo.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/csc.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/csc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0719782ca66733269c9af2187b6c8872e9c39f6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/csc.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/csr.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/csr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b70f20ea0384bdcc27eae81e7c7af96525e5363f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/csr.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/data.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/data.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..712fbabe45928ce53f039ac1cd74c559decd69bf Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/data.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/dia.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/dia.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14559d517727e9085e233443af72644578e02e91 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/dia.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/dok.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/dok.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04d385f22499813776dae9b460e0c3a4c7c89d46 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/dok.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/extract.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/extract.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..38f032444e631da369be761215d25a0825064986 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/extract.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/lil.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/lil.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..296118747befe97fc105c49ed440cddef6a1701d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/lil.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/sparsetools.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/sparsetools.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe24a6e03b320868461d6b38ec1ce0df0a9cacfc Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/sparsetools.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/spfuncs.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/spfuncs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c00e0447e962600fce1d40dff3a925ac8898ae5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/spfuncs.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/sputils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/sputils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..af6cef238cc977d2d7831f1018ee25b148a70edf Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/sputils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_base.py b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..80a25aa002d9866900e3a68504813c4e7c6cd680 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_base.py @@ -0,0 +1,1568 @@ +"""Base class for sparse matrices""" +from warnings import warn + +import numpy as np +from scipy._lib._util import VisibleDeprecationWarning + +from ._sputils import (asmatrix, check_reshape_kwargs, check_shape, + get_sum_dtype, isdense, isscalarlike, + matrix, validateaxis,) + +from ._matrix import spmatrix + +__all__ = ['isspmatrix', 'issparse', 'sparray', + 'SparseWarning', 'SparseEfficiencyWarning'] + + +class SparseWarning(Warning): + pass + + +class SparseFormatWarning(SparseWarning): + pass + + +class SparseEfficiencyWarning(SparseWarning): + pass + + +# The formats that we might potentially understand. +_formats = {'csc': [0, "Compressed Sparse Column"], + 'csr': [1, "Compressed Sparse Row"], + 'dok': [2, "Dictionary Of Keys"], + 'lil': [3, "List of Lists"], + 'dod': [4, "Dictionary of Dictionaries"], + 'sss': [5, "Symmetric Sparse Skyline"], + 'coo': [6, "COOrdinate"], + 'lba': [7, "Linpack BAnded"], + 'egd': [8, "Ellpack-itpack Generalized Diagonal"], + 'dia': [9, "DIAgonal"], + 'bsr': [10, "Block Sparse Row"], + 'msr': [11, "Modified compressed Sparse Row"], + 'bsc': [12, "Block Sparse Column"], + 'msc': [13, "Modified compressed Sparse Column"], + 'ssk': [14, "Symmetric SKyline"], + 'nsk': [15, "Nonsymmetric SKyline"], + 'jad': [16, "JAgged Diagonal"], + 'uss': [17, "Unsymmetric Sparse Skyline"], + 'vbr': [18, "Variable Block Row"], + 'und': [19, "Undefined"] + } + + +# These univariate ufuncs preserve zeros. +_ufuncs_with_fixed_point_at_zero = frozenset([ + np.sin, np.tan, np.arcsin, np.arctan, np.sinh, np.tanh, np.arcsinh, + np.arctanh, np.rint, np.sign, np.expm1, np.log1p, np.deg2rad, + np.rad2deg, np.floor, np.ceil, np.trunc, np.sqrt]) + + +MAXPRINT = 50 + + +class _spbase: + """ This class provides a base class for all sparse arrays. It + cannot be instantiated. Most of the work is provided by subclasses. + """ + + __array_priority__ = 10.1 + _format = 'und' # undefined + + @property + def ndim(self) -> int: + return len(self._shape) + + @property + def _shape_as_2d(self): + s = self._shape + return (1, s[-1]) if len(s) == 1 else s + + @property + def _bsr_container(self): + from ._bsr import bsr_array + return bsr_array + + @property + def _coo_container(self): + from ._coo import coo_array + return coo_array + + @property + def _csc_container(self): + from ._csc import csc_array + return csc_array + + @property + def _csr_container(self): + from ._csr import csr_array + return csr_array + + @property + def _dia_container(self): + from ._dia import dia_array + return dia_array + + @property + def _dok_container(self): + from ._dok import dok_array + return dok_array + + @property + def _lil_container(self): + from ._lil import lil_array + return lil_array + + def __init__(self, maxprint=MAXPRINT): + self._shape = None + if self.__class__.__name__ == '_spbase': + raise ValueError("This class is not intended" + " to be instantiated directly.") + self.maxprint = maxprint + + # Use this in 1.14.0 and later: + # + # @property + # def shape(self): + # return self._shape + + def reshape(self, *args, **kwargs): + """reshape(self, shape, order='C', copy=False) + + Gives a new shape to a sparse array/matrix without changing its data. + + Parameters + ---------- + shape : length-2 tuple of ints + The new shape should be compatible with the original shape. + order : {'C', 'F'}, optional + Read the elements using this index order. 'C' means to read and + write the elements using C-like index order; e.g., read entire first + row, then second row, etc. 'F' means to read and write the elements + using Fortran-like index order; e.g., read entire first column, then + second column, etc. + copy : bool, optional + Indicates whether or not attributes of self should be copied + whenever possible. The degree to which attributes are copied varies + depending on the type of sparse array being used. + + Returns + ------- + reshaped : sparse array/matrix + A sparse array/matrix with the given `shape`, not necessarily of the same + format as the current object. + + See Also + -------- + numpy.reshape : NumPy's implementation of 'reshape' for ndarrays + """ + # If the shape already matches, don't bother doing an actual reshape + # Otherwise, the default is to convert to COO and use its reshape + is_array = isinstance(self, sparray) + shape = check_shape(args, self.shape, allow_1d=is_array) + order, copy = check_reshape_kwargs(kwargs) + if shape == self.shape: + if copy: + return self.copy() + else: + return self + + return self.tocoo(copy=copy).reshape(shape, order=order, copy=False) + + def resize(self, shape): + """Resize the array/matrix in-place to dimensions given by ``shape`` + + Any elements that lie within the new shape will remain at the same + indices, while non-zero elements lying outside the new shape are + removed. + + Parameters + ---------- + shape : (int, int) + number of rows and columns in the new array/matrix + + Notes + ----- + The semantics are not identical to `numpy.ndarray.resize` or + `numpy.resize`. Here, the same data will be maintained at each index + before and after reshape, if that index is within the new bounds. In + numpy, resizing maintains contiguity of the array, moving elements + around in the logical array but not within a flattened representation. + + We give no guarantees about whether the underlying data attributes + (arrays, etc.) will be modified in place or replaced with new objects. + """ + # As an inplace operation, this requires implementation in each format. + raise NotImplementedError( + f'{type(self).__name__}.resize is not implemented') + + def astype(self, dtype, casting='unsafe', copy=True): + """Cast the array/matrix elements to a specified type. + + Parameters + ---------- + dtype : string or numpy dtype + Typecode or data-type to which to cast the data. + casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional + Controls what kind of data casting may occur. + Defaults to 'unsafe' for backwards compatibility. + 'no' means the data types should not be cast at all. + 'equiv' means only byte-order changes are allowed. + 'safe' means only casts which can preserve values are allowed. + 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + 'unsafe' means any data conversions may be done. + copy : bool, optional + If `copy` is `False`, the result might share some memory with this + array/matrix. If `copy` is `True`, it is guaranteed that the result and + this array/matrix do not share any memory. + """ + + dtype = np.dtype(dtype) + if self.dtype != dtype: + return self.tocsr().astype( + dtype, casting=casting, copy=copy).asformat(self.format) + elif copy: + return self.copy() + else: + return self + + @classmethod + def _ascontainer(cls, X, **kwargs): + if issubclass(cls, sparray): + return np.asarray(X, **kwargs) + else: + return asmatrix(X, **kwargs) + + @classmethod + def _container(cls, X, **kwargs): + if issubclass(cls, sparray): + return np.array(X, **kwargs) + else: + return matrix(X, **kwargs) + + def _asfptype(self): + """Upcast array to a floating point format (if necessary)""" + + fp_types = ['f', 'd', 'F', 'D'] + + if self.dtype.char in fp_types: + return self + else: + for fp_type in fp_types: + if self.dtype <= np.dtype(fp_type): + return self.astype(fp_type) + + raise TypeError('cannot upcast [%s] to a floating ' + 'point format' % self.dtype.name) + + def __iter__(self): + for r in range(self.shape[0]): + yield self[r] + + def _getmaxprint(self): + """Maximum number of elements to display when printed.""" + return self.maxprint + + def count_nonzero(self): + """Number of non-zero entries, equivalent to + + np.count_nonzero(a.toarray()) + + Unlike the nnz property, which return the number of stored + entries (the length of the data attribute), this method counts the + actual number of non-zero entries in data. + """ + raise NotImplementedError("count_nonzero not implemented for %s." % + self.__class__.__name__) + + def _getnnz(self, axis=None): + """Number of stored values, including explicit zeros. + + Parameters + ---------- + axis : None, 0, or 1 + Select between the number of values across the whole array, in + each column, or in each row. + + See also + -------- + count_nonzero : Number of non-zero entries + """ + raise NotImplementedError("getnnz not implemented for %s." % + self.__class__.__name__) + + @property + def nnz(self) -> int: + """Number of stored values, including explicit zeros. + + See also + -------- + count_nonzero : Number of non-zero entries + """ + return self._getnnz() + + @property + def size(self) -> int: + """Number of stored values. + + See also + -------- + count_nonzero : Number of non-zero values. + """ + return self._getnnz() + + @property + def format(self) -> str: + """Format string for matrix.""" + return self._format + + @property + def A(self) -> np.ndarray: + """DEPRECATED: Return a dense array. + + .. deprecated:: 1.11.0 + + `.A` is deprecated and will be removed in v1.14.0. + Use `.toarray()` instead. + """ + if isinstance(self, sparray): + message = ("`.A` is deprecated and will be removed in v1.14.0. " + "Use `.toarray()` instead.") + warn(VisibleDeprecationWarning(message), stacklevel=2) + return self.toarray() + + @property + def T(self): + """Transpose.""" + return self.transpose() + + @property + def H(self): + """DEPRECATED: Returns the (complex) conjugate transpose. + + .. deprecated:: 1.11.0 + + `.H` is deprecated and will be removed in v1.14.0. + Please use `.T.conjugate()` instead. + """ + if isinstance(self, sparray): + message = ("`.H` is deprecated and will be removed in v1.14.0. " + "Please use `.T.conjugate()` instead.") + warn(VisibleDeprecationWarning(message), stacklevel=2) + return self.T.conjugate() + + @property + def real(self): + return self._real() + + @property + def imag(self): + return self._imag() + + def __repr__(self): + _, format_name = _formats[self.format] + sparse_cls = 'array' if isinstance(self, sparray) else 'matrix' + shape_str = 'x'.join(str(x) for x in self.shape) + return ( + f"<{shape_str} sparse {sparse_cls} of type '{self.dtype.type}'\n" + f"\twith {self.nnz} stored elements in {format_name} format>" + ) + + def __str__(self): + maxprint = self._getmaxprint() + + A = self.tocoo() + + # helper function, outputs "(i,j) v" + def tostr(row, col, data): + triples = zip(list(zip(row, col)), data) + return '\n'.join([(' {}\t{}'.format(*t)) for t in triples]) + + if self.nnz > maxprint: + half = maxprint // 2 + out = tostr(A.row[:half], A.col[:half], A.data[:half]) + out += "\n :\t:\n" + half = maxprint - maxprint//2 + out += tostr(A.row[-half:], A.col[-half:], A.data[-half:]) + else: + out = tostr(A.row, A.col, A.data) + + return out + + def __bool__(self): # Simple -- other ideas? + if self.shape == (1, 1): + return self.nnz != 0 + else: + raise ValueError("The truth value of an array with more than one " + "element is ambiguous. Use a.any() or a.all().") + __nonzero__ = __bool__ + + # What should len(sparse) return? For consistency with dense matrices, + # perhaps it should be the number of rows? But for some uses the number of + # non-zeros is more important. For now, raise an exception! + def __len__(self): + raise TypeError("sparse array length is ambiguous; use getnnz()" + " or shape[0]") + + def asformat(self, format, copy=False): + """Return this array/matrix in the passed format. + + Parameters + ---------- + format : {str, None} + The desired sparse format ("csr", "csc", "lil", "dok", "array", ...) + or None for no conversion. + copy : bool, optional + If True, the result is guaranteed to not share data with self. + + Returns + ------- + A : This array/matrix in the passed format. + """ + if format is None or format == self.format: + if copy: + return self.copy() + else: + return self + else: + try: + convert_method = getattr(self, 'to' + format) + except AttributeError as e: + raise ValueError(f'Format {format} is unknown.') from e + + # Forward the copy kwarg, if it's accepted. + try: + return convert_method(copy=copy) + except TypeError: + return convert_method() + + ################################################################### + # NOTE: All arithmetic operations use csr_matrix by default. + # Therefore a new sparse array format just needs to define a + # .tocsr() method to provide arithmetic support. Any of these + # methods can be overridden for efficiency. + #################################################################### + + def multiply(self, other): + """Point-wise multiplication by another array/matrix.""" + return self.tocsr().multiply(other) + + def maximum(self, other): + """Element-wise maximum between this and another array/matrix.""" + return self.tocsr().maximum(other) + + def minimum(self, other): + """Element-wise minimum between this and another array/matrix.""" + return self.tocsr().minimum(other) + + def dot(self, other): + """Ordinary dot product + + Examples + -------- + >>> import numpy as np + >>> from scipy.sparse import csr_array + >>> A = csr_array([[1, 2, 0], [0, 0, 3], [4, 0, 5]]) + >>> v = np.array([1, 0, -1]) + >>> A.dot(v) + array([ 1, -3, -1], dtype=int64) + + """ + if np.isscalar(other): + return self * other + else: + return self @ other + + def power(self, n, dtype=None): + """Element-wise power.""" + return self.tocsr().power(n, dtype=dtype) + + def __eq__(self, other): + return self.tocsr().__eq__(other) + + def __ne__(self, other): + return self.tocsr().__ne__(other) + + def __lt__(self, other): + return self.tocsr().__lt__(other) + + def __gt__(self, other): + return self.tocsr().__gt__(other) + + def __le__(self, other): + return self.tocsr().__le__(other) + + def __ge__(self, other): + return self.tocsr().__ge__(other) + + def __abs__(self): + return abs(self.tocsr()) + + def __round__(self, ndigits=0): + return round(self.tocsr(), ndigits=ndigits) + + def _add_sparse(self, other): + return self.tocsr()._add_sparse(other) + + def _add_dense(self, other): + return self.tocoo()._add_dense(other) + + def _sub_sparse(self, other): + return self.tocsr()._sub_sparse(other) + + def _sub_dense(self, other): + return self.todense() - other + + def _rsub_dense(self, other): + # note: this can't be replaced by other + (-self) for unsigned types + return other - self.todense() + + def __add__(self, other): # self + other + if isscalarlike(other): + if other == 0: + return self.copy() + # Now we would add this scalar to every element. + raise NotImplementedError('adding a nonzero scalar to a ' + 'sparse array is not supported') + elif issparse(other): + if other.shape != self.shape: + raise ValueError("inconsistent shapes") + return self._add_sparse(other) + elif isdense(other): + other = np.broadcast_to(other, self.shape) + return self._add_dense(other) + else: + return NotImplemented + + def __radd__(self,other): # other + self + return self.__add__(other) + + def __sub__(self, other): # self - other + if isscalarlike(other): + if other == 0: + return self.copy() + raise NotImplementedError('subtracting a nonzero scalar from a ' + 'sparse array is not supported') + elif issparse(other): + if other.shape != self.shape: + raise ValueError("inconsistent shapes") + return self._sub_sparse(other) + elif isdense(other): + other = np.broadcast_to(other, self.shape) + return self._sub_dense(other) + else: + return NotImplemented + + def __rsub__(self,other): # other - self + if isscalarlike(other): + if other == 0: + return -self.copy() + raise NotImplementedError('subtracting a sparse array from a ' + 'nonzero scalar is not supported') + elif isdense(other): + other = np.broadcast_to(other, self.shape) + return self._rsub_dense(other) + else: + return NotImplemented + + def _matmul_dispatch(self, other): + """np.array-like matmul & `np.matrix`-like mul, i.e. `dot` or `NotImplemented` + + interpret other and call one of the following + self._mul_scalar() + self._matmul_vector() + self._matmul_multivector() + self._matmul_sparse() + """ + # This method has to be different from `__matmul__` because it is also + # called by sparse matrix classes. + + # Currently matrix multiplication is only supported + # for 2D arrays. Hence we unpacked and use only the + # two last axes' lengths. + M, N = self._shape_as_2d + + if other.__class__ is np.ndarray: + # Fast path for the most common case + if other.shape == (N,): + return self._matmul_vector(other) + elif other.shape == (N, 1): + result = self._matmul_vector(other.ravel()) + if self.ndim == 1: + return result + return result.reshape(M, 1) + elif other.ndim == 2 and other.shape[0] == N: + return self._matmul_multivector(other) + + if isscalarlike(other): + # scalar value + return self._mul_scalar(other) + + if issparse(other): + if self.shape[-1] != other.shape[0]: + raise ValueError('dimension mismatch') + if other.ndim == 1: + raise ValueError('Cannot yet multiply a 1d sparse array') + return self._matmul_sparse(other) + + # If it's a list or whatever, treat it like an array + other_a = np.asanyarray(other) + + if other_a.ndim == 0 and other_a.dtype == np.object_: + # Not interpretable as an array; return NotImplemented so that + # other's __rmatmul__ can kick in if that's implemented. + return NotImplemented + + try: + other.shape + except AttributeError: + other = other_a + + if other.ndim == 1 or other.ndim == 2 and other.shape[1] == 1: + # dense row or column vector + if other.shape != (N,) and other.shape != (N, 1): + raise ValueError('dimension mismatch') + + result = self._matmul_vector(np.ravel(other)) + + if isinstance(other, np.matrix): + result = self._ascontainer(result) + + if other.ndim == 2 and other.shape[1] == 1: + # If 'other' was an (nx1) column vector, reshape the result + result = result.reshape(-1, 1) + + return result + + elif other.ndim == 2: + ## + # dense 2D array or matrix ("multivector") + + if other.shape[0] != N: + raise ValueError('dimension mismatch') + + result = self._matmul_multivector(np.asarray(other)) + + if isinstance(other, np.matrix): + result = self._ascontainer(result) + + return result + + else: + raise ValueError('could not interpret dimensions') + + def __mul__(self, *args, **kwargs): + return self.multiply(*args, **kwargs) + + def __rmul__(self, *args, **kwargs): # other * self + return self.multiply(*args, **kwargs) + + # by default, use CSR for __mul__ handlers + def _mul_scalar(self, other): + return self.tocsr()._mul_scalar(other) + + def _matmul_vector(self, other): + return self.tocsr()._matmul_vector(other) + + def _matmul_multivector(self, other): + return self.tocsr()._matmul_multivector(other) + + def _matmul_sparse(self, other): + return self.tocsr()._matmul_sparse(other) + + def _rmatmul_dispatch(self, other): + if isscalarlike(other): + return self._mul_scalar(other) + else: + # Don't use asarray unless we have to + try: + tr = other.transpose() + except AttributeError: + tr = np.asarray(other).transpose() + ret = self.transpose()._matmul_dispatch(tr) + if ret is NotImplemented: + return NotImplemented + return ret.transpose() + + ####################### + # matmul (@) operator # + ####################### + + def __matmul__(self, other): + if isscalarlike(other): + raise ValueError("Scalar operands are not allowed, " + "use '*' instead") + return self._matmul_dispatch(other) + + def __rmatmul__(self, other): + if isscalarlike(other): + raise ValueError("Scalar operands are not allowed, " + "use '*' instead") + return self._rmatmul_dispatch(other) + + #################### + # Other Arithmetic # + #################### + + def _divide(self, other, true_divide=False, rdivide=False): + if isscalarlike(other): + if rdivide: + if true_divide: + return np.true_divide(other, self.todense()) + else: + return np.divide(other, self.todense()) + + if true_divide and np.can_cast(self.dtype, np.float64): + return self.astype(np.float64)._mul_scalar(1./other) + else: + r = self._mul_scalar(1./other) + + scalar_dtype = np.asarray(other).dtype + if (np.issubdtype(self.dtype, np.integer) and + np.issubdtype(scalar_dtype, np.integer)): + return r.astype(self.dtype) + else: + return r + + elif isdense(other): + if not rdivide: + if true_divide: + recip = np.true_divide(1., other) + else: + recip = np.divide(1., other) + return self.multiply(recip) + else: + if true_divide: + return np.true_divide(other, self.todense()) + else: + return np.divide(other, self.todense()) + elif issparse(other): + if rdivide: + return other._divide(self, true_divide, rdivide=False) + + self_csr = self.tocsr() + if true_divide and np.can_cast(self.dtype, np.float64): + return self_csr.astype(np.float64)._divide_sparse(other) + else: + return self_csr._divide_sparse(other) + else: + return NotImplemented + + def __truediv__(self, other): + return self._divide(other, true_divide=True) + + def __div__(self, other): + # Always do true division + return self._divide(other, true_divide=True) + + def __rtruediv__(self, other): + # Implementing this as the inverse would be too magical -- bail out + return NotImplemented + + def __rdiv__(self, other): + # Implementing this as the inverse would be too magical -- bail out + return NotImplemented + + def __neg__(self): + return -self.tocsr() + + def __iadd__(self, other): + return NotImplemented + + def __isub__(self, other): + return NotImplemented + + def __imul__(self, other): + return NotImplemented + + def __idiv__(self, other): + return self.__itruediv__(other) + + def __itruediv__(self, other): + return NotImplemented + + def __pow__(self, *args, **kwargs): + return self.power(*args, **kwargs) + + def transpose(self, axes=None, copy=False): + """ + Reverses the dimensions of the sparse array/matrix. + + Parameters + ---------- + axes : None, optional + This argument is in the signature *solely* for NumPy + compatibility reasons. Do not pass in anything except + for the default value. + copy : bool, optional + Indicates whether or not attributes of `self` should be + copied whenever possible. The degree to which attributes + are copied varies depending on the type of sparse array/matrix + being used. + + Returns + ------- + p : `self` with the dimensions reversed. + + Notes + ----- + If `self` is a `csr_array` or a `csc_array`, then this will return a + `csc_array` or a `csr_array`, respectively. + + See Also + -------- + numpy.transpose : NumPy's implementation of 'transpose' for ndarrays + """ + return self.tocsr(copy=copy).transpose(axes=axes, copy=False) + + def conjugate(self, copy=True): + """Element-wise complex conjugation. + + If the array/matrix is of non-complex data type and `copy` is False, + this method does nothing and the data is not copied. + + Parameters + ---------- + copy : bool, optional + If True, the result is guaranteed to not share data with self. + + Returns + ------- + A : The element-wise complex conjugate. + + """ + if np.issubdtype(self.dtype, np.complexfloating): + return self.tocsr(copy=copy).conjugate(copy=False) + elif copy: + return self.copy() + else: + return self + + def conj(self, copy=True): + return self.conjugate(copy=copy) + + conj.__doc__ = conjugate.__doc__ + + def _real(self): + return self.tocsr()._real() + + def _imag(self): + return self.tocsr()._imag() + + def nonzero(self): + """Nonzero indices of the array/matrix. + + Returns a tuple of arrays (row,col) containing the indices + of the non-zero elements of the array. + + Examples + -------- + >>> from scipy.sparse import csr_array + >>> A = csr_array([[1,2,0],[0,0,3],[4,0,5]]) + >>> A.nonzero() + (array([0, 0, 1, 2, 2]), array([0, 1, 2, 0, 2])) + + """ + + # convert to COOrdinate format + A = self.tocoo() + nz_mask = A.data != 0 + return (A.row[nz_mask], A.col[nz_mask]) + + def _getcol(self, j): + """Returns a copy of column j of the array, as an (m x 1) sparse + array (column vector). + """ + if self.ndim == 1: + raise ValueError("getcol not provided for 1d arrays. Use indexing A[j]") + # Subclasses should override this method for efficiency. + # Post-multiply by a (n x 1) column vector 'a' containing all zeros + # except for a_j = 1 + N = self.shape[-1] + if j < 0: + j += N + if j < 0 or j >= N: + raise IndexError("index out of bounds") + col_selector = self._csc_container(([1], [[j], [0]]), + shape=(N, 1), dtype=self.dtype) + result = self @ col_selector + return result + + def _getrow(self, i): + """Returns a copy of row i of the array, as a (1 x n) sparse + array (row vector). + """ + if self.ndim == 1: + raise ValueError("getrow not meaningful for a 1d array") + # Subclasses should override this method for efficiency. + # Pre-multiply by a (1 x m) row vector 'a' containing all zeros + # except for a_i = 1 + M = self.shape[0] + if i < 0: + i += M + if i < 0 or i >= M: + raise IndexError("index out of bounds") + row_selector = self._csr_container(([1], [[0], [i]]), + shape=(1, M), dtype=self.dtype) + return row_selector @ self + + # The following dunder methods cannot be implemented. + # + # def __array__(self): + # # Sparse matrices rely on NumPy wrapping them in object arrays under + # # the hood to make unary ufuncs work on them. So we cannot raise + # # TypeError here - which would be handy to not give users object + # # arrays they probably don't want (they're looking for `.toarray()`). + # # + # # Conversion with `toarray()` would also break things because of the + # # behavior discussed above, plus we want to avoid densification by + # # accident because that can too easily blow up memory. + # + # def __array_ufunc__(self): + # # We cannot implement __array_ufunc__ due to mismatching semantics. + # # See gh-7707 and gh-7349 for details. + # + # def __array_function__(self): + # # We cannot implement __array_function__ due to mismatching semantics. + # # See gh-10362 for details. + + def todense(self, order=None, out=None): + """ + Return a dense representation of this sparse array/matrix. + + Parameters + ---------- + order : {'C', 'F'}, optional + Whether to store multi-dimensional data in C (row-major) + or Fortran (column-major) order in memory. The default + is 'None', which provides no ordering guarantees. + Cannot be specified in conjunction with the `out` + argument. + + out : ndarray, 2-D, optional + If specified, uses this array (or `numpy.matrix`) as the + output buffer instead of allocating a new array to + return. The provided array must have the same shape and + dtype as the sparse array/matrix on which you are calling the + method. + + Returns + ------- + arr : numpy.matrix, 2-D + A NumPy matrix object with the same shape and containing + the same data represented by the sparse array/matrix, with the + requested memory order. If `out` was passed and was an + array (rather than a `numpy.matrix`), it will be filled + with the appropriate values and returned wrapped in a + `numpy.matrix` object that shares the same memory. + """ + return self._ascontainer(self.toarray(order=order, out=out)) + + def toarray(self, order=None, out=None): + """ + Return a dense ndarray representation of this sparse array/matrix. + + Parameters + ---------- + order : {'C', 'F'}, optional + Whether to store multidimensional data in C (row-major) + or Fortran (column-major) order in memory. The default + is 'None', which provides no ordering guarantees. + Cannot be specified in conjunction with the `out` + argument. + + out : ndarray, 2-D, optional + If specified, uses this array as the output buffer + instead of allocating a new array to return. The provided + array must have the same shape and dtype as the sparse + array/matrix on which you are calling the method. For most + sparse types, `out` is required to be memory contiguous + (either C or Fortran ordered). + + Returns + ------- + arr : ndarray, 2-D + An array with the same shape and containing the same + data represented by the sparse array/matrix, with the requested + memory order. If `out` was passed, the same object is + returned after being modified in-place to contain the + appropriate values. + """ + return self.tocoo(copy=False).toarray(order=order, out=out) + + # Any sparse array format deriving from _spbase must define one of + # tocsr or tocoo. The other conversion methods may be implemented for + # efficiency, but are not required. + def tocsr(self, copy=False): + """Convert this array/matrix to Compressed Sparse Row format. + + With copy=False, the data/indices may be shared between this array/matrix and + the resultant csr_array/matrix. + """ + return self.tocoo(copy=copy).tocsr(copy=False) + + def todok(self, copy=False): + """Convert this array/matrix to Dictionary Of Keys format. + + With copy=False, the data/indices may be shared between this array/matrix and + the resultant dok_array/matrix. + """ + return self.tocoo(copy=copy).todok(copy=False) + + def tocoo(self, copy=False): + """Convert this array/matrix to COOrdinate format. + + With copy=False, the data/indices may be shared between this array/matrix and + the resultant coo_array/matrix. + """ + return self.tocsr(copy=False).tocoo(copy=copy) + + def tolil(self, copy=False): + """Convert this array/matrix to List of Lists format. + + With copy=False, the data/indices may be shared between this array/matrix and + the resultant lil_array/matrix. + """ + return self.tocsr(copy=False).tolil(copy=copy) + + def todia(self, copy=False): + """Convert this array/matrix to sparse DIAgonal format. + + With copy=False, the data/indices may be shared between this array/matrix and + the resultant dia_array/matrix. + """ + return self.tocoo(copy=copy).todia(copy=False) + + def tobsr(self, blocksize=None, copy=False): + """Convert this array/matrix to Block Sparse Row format. + + With copy=False, the data/indices may be shared between this array/matrix and + the resultant bsr_array/matrix. + + When blocksize=(R, C) is provided, it will be used for construction of + the bsr_array/matrix. + """ + return self.tocsr(copy=False).tobsr(blocksize=blocksize, copy=copy) + + def tocsc(self, copy=False): + """Convert this array/matrix to Compressed Sparse Column format. + + With copy=False, the data/indices may be shared between this array/matrix and + the resultant csc_array/matrix. + """ + return self.tocsr(copy=copy).tocsc(copy=False) + + def copy(self): + """Returns a copy of this array/matrix. + + No data/indices will be shared between the returned value and current + array/matrix. + """ + return self.__class__(self, copy=True) + + def sum(self, axis=None, dtype=None, out=None): + """ + Sum the array/matrix elements over a given axis. + + Parameters + ---------- + axis : {-2, -1, 0, 1, None} optional + Axis along which the sum is computed. The default is to + compute the sum of all the array/matrix elements, returning a scalar + (i.e., `axis` = `None`). + dtype : dtype, optional + The type of the returned array/matrix and of the accumulator in which + the elements are summed. The dtype of `a` is used by default + unless `a` has an integer dtype of less precision than the default + platform integer. In that case, if `a` is signed then the platform + integer is used while if `a` is unsigned then an unsigned integer + of the same precision as the platform integer is used. + + .. versionadded:: 0.18.0 + + out : np.matrix, optional + Alternative output matrix in which to place the result. It must + have the same shape as the expected output, but the type of the + output values will be cast if necessary. + + .. versionadded:: 0.18.0 + + Returns + ------- + sum_along_axis : np.matrix + A matrix with the same shape as `self`, with the specified + axis removed. + + See Also + -------- + numpy.matrix.sum : NumPy's implementation of 'sum' for matrices + + """ + validateaxis(axis) + + # Mimic numpy's casting. + res_dtype = get_sum_dtype(self.dtype) + + if self.ndim == 1: + if axis not in (None, -1, 0): + raise ValueError("axis must be None, -1 or 0") + ret = (self @ np.ones(self.shape, dtype=res_dtype)).astype(dtype) + + if out is not None: + if any(dim != 1 for dim in out.shape): + raise ValueError("dimensions do not match") + out[...] = ret + return ret + + # We use multiplication by a matrix of ones to achieve this. + # For some sparse array formats more efficient methods are + # possible -- these should override this function. + M, N = self.shape + + if axis is None: + # sum over rows and columns + return ( + self @ self._ascontainer(np.ones((N, 1), dtype=res_dtype)) + ).sum(dtype=dtype, out=out) + + if axis < 0: + axis += 2 + + # axis = 0 or 1 now + if axis == 0: + # sum over columns + ret = self._ascontainer( + np.ones((1, M), dtype=res_dtype) + ) @ self + else: + # sum over rows + ret = self @ self._ascontainer( + np.ones((N, 1), dtype=res_dtype) + ) + + if out is not None and out.shape != ret.shape: + raise ValueError("dimensions do not match") + + return ret.sum(axis=axis, dtype=dtype, out=out) + + def mean(self, axis=None, dtype=None, out=None): + """ + Compute the arithmetic mean along the specified axis. + + Returns the average of the array/matrix elements. The average is taken + over all elements in the array/matrix by default, otherwise over the + specified axis. `float64` intermediate and return values are used + for integer inputs. + + Parameters + ---------- + axis : {-2, -1, 0, 1, None} optional + Axis along which the mean is computed. The default is to compute + the mean of all elements in the array/matrix (i.e., `axis` = `None`). + dtype : data-type, optional + Type to use in computing the mean. For integer inputs, the default + is `float64`; for floating point inputs, it is the same as the + input dtype. + + .. versionadded:: 0.18.0 + + out : np.matrix, optional + Alternative output matrix in which to place the result. It must + have the same shape as the expected output, but the type of the + output values will be cast if necessary. + + .. versionadded:: 0.18.0 + + Returns + ------- + m : np.matrix + + See Also + -------- + numpy.matrix.mean : NumPy's implementation of 'mean' for matrices + + """ + validateaxis(axis) + + res_dtype = self.dtype.type + integral = (np.issubdtype(self.dtype, np.integer) or + np.issubdtype(self.dtype, np.bool_)) + + # output dtype + if dtype is None: + if integral: + res_dtype = np.float64 + else: + res_dtype = np.dtype(dtype).type + + # intermediate dtype for summation + inter_dtype = np.float64 if integral else res_dtype + inter_self = self.astype(inter_dtype) + + if self.ndim == 1: + if axis not in (None, -1, 0): + raise ValueError("axis must be None, -1 or 0") + res = inter_self / self.shape[-1] + return res.sum(dtype=res_dtype, out=out) + + if axis is None: + return (inter_self / (self.shape[0] * self.shape[1]))\ + .sum(dtype=res_dtype, out=out) + + if axis < 0: + axis += 2 + + # axis = 0 or 1 now + if axis == 0: + return (inter_self * (1.0 / self.shape[0])).sum( + axis=0, dtype=res_dtype, out=out) + else: + return (inter_self * (1.0 / self.shape[1])).sum( + axis=1, dtype=res_dtype, out=out) + + def diagonal(self, k=0): + """Returns the kth diagonal of the array/matrix. + + Parameters + ---------- + k : int, optional + Which diagonal to get, corresponding to elements a[i, i+k]. + Default: 0 (the main diagonal). + + .. versionadded:: 1.0 + + See also + -------- + numpy.diagonal : Equivalent numpy function. + + Examples + -------- + >>> from scipy.sparse import csr_array + >>> A = csr_array([[1, 2, 0], [0, 0, 3], [4, 0, 5]]) + >>> A.diagonal() + array([1, 0, 5]) + >>> A.diagonal(k=1) + array([2, 3]) + """ + return self.tocsr().diagonal(k=k) + + def trace(self, offset=0): + """Returns the sum along diagonals of the sparse array/matrix. + + Parameters + ---------- + offset : int, optional + Which diagonal to get, corresponding to elements a[i, i+offset]. + Default: 0 (the main diagonal). + + """ + return self.diagonal(k=offset).sum() + + def setdiag(self, values, k=0): + """ + Set diagonal or off-diagonal elements of the array/matrix. + + Parameters + ---------- + values : array_like + New values of the diagonal elements. + + Values may have any length. If the diagonal is longer than values, + then the remaining diagonal entries will not be set. If values are + longer than the diagonal, then the remaining values are ignored. + + If a scalar value is given, all of the diagonal is set to it. + + k : int, optional + Which off-diagonal to set, corresponding to elements a[i,i+k]. + Default: 0 (the main diagonal). + + """ + M, N = self.shape + if (k > 0 and k >= N) or (k < 0 and -k >= M): + raise ValueError("k exceeds array dimensions") + self._setdiag(np.asarray(values), k) + + def _setdiag(self, values, k): + """This part of the implementation gets overridden by the + different formats. + """ + M, N = self.shape + if k < 0: + if values.ndim == 0: + # broadcast + max_index = min(M+k, N) + for i in range(max_index): + self[i - k, i] = values + else: + max_index = min(M+k, N, len(values)) + if max_index <= 0: + return + for i, v in enumerate(values[:max_index]): + self[i - k, i] = v + else: + if values.ndim == 0: + # broadcast + max_index = min(M, N-k) + for i in range(max_index): + self[i, i + k] = values + else: + max_index = min(M, N-k, len(values)) + if max_index <= 0: + return + for i, v in enumerate(values[:max_index]): + self[i, i + k] = v + + def _process_toarray_args(self, order, out): + if out is not None: + if order is not None: + raise ValueError('order cannot be specified if out ' + 'is not None') + if out.shape != self.shape or out.dtype != self.dtype: + raise ValueError('out array must be same dtype and shape as ' + 'sparse array') + out[...] = 0. + return out + else: + return np.zeros(self.shape, dtype=self.dtype, order=order) + + def _get_index_dtype(self, arrays=(), maxval=None, check_contents=False): + """ + Determine index dtype for array. + + This wraps _sputils.get_index_dtype, providing compatibility for both + array and matrix API sparse matrices. Matrix API sparse matrices would + attempt to downcast the indices - which can be computationally + expensive and undesirable for users. The array API changes this + behaviour. + + See discussion: https://github.com/scipy/scipy/issues/16774 + + The get_index_dtype import is due to implementation details of the test + suite. It allows the decorator ``with_64bit_maxval_limit`` to mock a + lower int32 max value for checks on the matrix API's downcasting + behaviour. + """ + from ._sputils import get_index_dtype + + # Don't check contents for array API + return get_index_dtype(arrays, + maxval, + (check_contents and not isinstance(self, sparray))) + + + ## All methods below are deprecated and should be removed in + ## scipy 1.14.0 + ## + ## Also uncomment the definition of shape above. + + def get_shape(self): + """Get shape of a sparse array/matrix. + + .. deprecated:: 1.11.0 + This method will be removed in SciPy 1.14.0. + Use `X.shape` instead. + """ + msg = ( + "`get_shape` is deprecated and will be removed in v1.14.0; " + "use `X.shape` instead." + ) + warn(msg, DeprecationWarning, stacklevel=2) + + return self._shape + + def set_shape(self, shape): + """See `reshape`. + + .. deprecated:: 1.11.0 + This method will be removed in SciPy 1.14.0. + Use `X.reshape` instead. + """ + msg = ( + "Shape assignment is deprecated and will be removed in v1.14.0; " + "use `reshape` instead." + ) + warn(msg, DeprecationWarning, stacklevel=2) + + # Make sure copy is False since this is in place + # Make sure format is unchanged because we are doing a __dict__ swap + new_self = self.reshape(shape, copy=False).asformat(self.format) + self.__dict__ = new_self.__dict__ + + shape = property( + fget=lambda self: self._shape, + fset=set_shape, + doc="""The shape of the array. + +Note that, starting in SciPy 1.14.0, this property will no longer be +settable. To change the array shape, use `X.reshape` instead. +""" + ) + + def asfptype(self): + """Upcast array/matrix to a floating point format (if necessary) + + .. deprecated:: 1.11.0 + This method is for internal use only, and will be removed from the + public API in SciPy 1.14.0. + """ + msg = ( + "`asfptype` is an internal function, and is deprecated " + "as part of the public API. It will be removed in v1.14.0." + ) + warn(msg, DeprecationWarning, stacklevel=2) + return self._asfptype() + + def getmaxprint(self): + """Maximum number of elements to display when printed. + + .. deprecated:: 1.11.0 + This method is for internal use only, and will be removed from the + public API in SciPy 1.14.0. + """ + msg = ( + "`getmaxprint` is an internal function, and is deprecated " + "as part of the public API. It will be removed in v1.14.0." + ) + warn(msg, DeprecationWarning, stacklevel=2) + return self._getmaxprint() + + def getformat(self): + """Sparse array/matrix storage format. + + .. deprecated:: 1.11.0 + This method will be removed in SciPy 1.14.0. + Use `X.format` instead. + """ + msg = ( + "`getformat` is deprecated and will be removed in v1.14.0; " + "use `X.format` instead." + ) + warn(msg, DeprecationWarning, stacklevel=2) + return self.format + + def getnnz(self, axis=None): + """Number of stored values, including explicit zeros. + + Parameters + ---------- + axis : None, 0, or 1 + Select between the number of values across the whole array/matrix, in + each column, or in each row. + + See also + -------- + count_nonzero : Number of non-zero entries + """ + return self._getnnz(axis=axis) + + def getH(self): + """Return the Hermitian transpose of this array/matrix. + + .. deprecated:: 1.11.0 + This method will be removed in SciPy 1.14.0. + Use `X.conj().T` instead. + """ + msg = ( + "`getH` is deprecated and will be removed in v1.14.0; " + "use `X.conj().T` instead." + ) + warn(msg, DeprecationWarning, stacklevel=2) + return self.conjugate().transpose() + + def getcol(self, j): + """Returns a copy of column j of the array/matrix, as an (m x 1) sparse + array/matrix (column vector). + + .. deprecated:: 1.11.0 + This method will be removed in SciPy 1.14.0. + Use array/matrix indexing instead. + """ + msg = ( + "`getcol` is deprecated and will be removed in v1.14.0; " + f"use `X[:, [{j}]]` instead." + ) + warn(msg, DeprecationWarning, stacklevel=2) + return self._getcol(j) + + def getrow(self, i): + """Returns a copy of row i of the array/matrix, as a (1 x n) sparse + array/matrix (row vector). + + .. deprecated:: 1.11.0 + This method will be removed in SciPy 1.14.0. + Use array/matrix indexing instead. + """ + msg = ( + "`getrow` is deprecated and will be removed in v1.14.0; " + f"use `X[[{i}]]` instead." + ) + warn(msg, DeprecationWarning, stacklevel=2) + return self._getrow(i) + + ## End 1.14.0 deprecated methods + + +class sparray: + """A namespace class to separate sparray from spmatrix""" + pass + +sparray.__doc__ = _spbase.__doc__ + + +def issparse(x): + """Is `x` of a sparse array or sparse matrix type? + + Parameters + ---------- + x + object to check for being a sparse array or sparse matrix + + Returns + ------- + bool + True if `x` is a sparse array or a sparse matrix, False otherwise + + Examples + -------- + >>> import numpy as np + >>> from scipy.sparse import csr_array, csr_matrix, issparse + >>> issparse(csr_matrix([[5]])) + True + >>> issparse(csr_array([[5]])) + True + >>> issparse(np.array([[5]])) + False + >>> issparse(5) + False + """ + return isinstance(x, _spbase) + + +def isspmatrix(x): + """Is `x` of a sparse matrix type? + + Parameters + ---------- + x + object to check for being a sparse matrix + + Returns + ------- + bool + True if `x` is a sparse matrix, False otherwise + + Examples + -------- + >>> import numpy as np + >>> from scipy.sparse import csr_array, csr_matrix, isspmatrix + >>> isspmatrix(csr_matrix([[5]])) + True + >>> isspmatrix(csr_array([[5]])) + False + >>> isspmatrix(np.array([[5]])) + False + >>> isspmatrix(5) + False + """ + return isinstance(x, spmatrix) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_bsr.py b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_bsr.py new file mode 100644 index 0000000000000000000000000000000000000000..8702fcdc9b4583e2c73912fae6153938e1dd026b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_bsr.py @@ -0,0 +1,855 @@ +"""Compressed Block Sparse Row format""" + +__docformat__ = "restructuredtext en" + +__all__ = ['bsr_array', 'bsr_matrix', 'isspmatrix_bsr'] + +from warnings import warn + +import numpy as np + +from scipy._lib._util import copy_if_needed +from ._matrix import spmatrix +from ._data import _data_matrix, _minmax_mixin +from ._compressed import _cs_matrix +from ._base import issparse, _formats, _spbase, sparray +from ._sputils import (isshape, getdtype, getdata, to_native, upcast, + check_shape) +from . import _sparsetools +from ._sparsetools import (bsr_matvec, bsr_matvecs, csr_matmat_maxnnz, + bsr_matmat, bsr_transpose, bsr_sort_indices, + bsr_tocsr) + + +class _bsr_base(_cs_matrix, _minmax_mixin): + _format = 'bsr' + + def __init__(self, arg1, shape=None, dtype=None, copy=False, blocksize=None): + _data_matrix.__init__(self) + + if issparse(arg1): + if arg1.format == self.format and copy: + arg1 = arg1.copy() + else: + arg1 = arg1.tobsr(blocksize=blocksize) + self.indptr, self.indices, self.data, self._shape = ( + arg1.indptr, arg1.indices, arg1.data, arg1._shape + ) + + elif isinstance(arg1,tuple): + if isshape(arg1): + # it's a tuple of matrix dimensions (M,N) + self._shape = check_shape(arg1) + M,N = self.shape + # process blocksize + if blocksize is None: + blocksize = (1,1) + else: + if not isshape(blocksize): + raise ValueError('invalid blocksize=%s' % blocksize) + blocksize = tuple(blocksize) + self.data = np.zeros((0,) + blocksize, getdtype(dtype, default=float)) + + R,C = blocksize + if (M % R) != 0 or (N % C) != 0: + raise ValueError('shape must be multiple of blocksize') + + # Select index dtype large enough to pass array and + # scalar parameters to sparsetools + idx_dtype = self._get_index_dtype(maxval=max(M//R, N//C, R, C)) + self.indices = np.zeros(0, dtype=idx_dtype) + self.indptr = np.zeros(M//R + 1, dtype=idx_dtype) + + elif len(arg1) == 2: + # (data,(row,col)) format + coo = self._coo_container(arg1, dtype=dtype, shape=shape) + bsr = coo.tobsr(blocksize=blocksize) + self.indptr, self.indices, self.data, self._shape = ( + bsr.indptr, bsr.indices, bsr.data, bsr._shape + ) + + elif len(arg1) == 3: + # (data,indices,indptr) format + (data, indices, indptr) = arg1 + + # Select index dtype large enough to pass array and + # scalar parameters to sparsetools + maxval = 1 + if shape is not None: + maxval = max(shape) + if blocksize is not None: + maxval = max(maxval, max(blocksize)) + idx_dtype = self._get_index_dtype((indices, indptr), maxval=maxval, + check_contents=True) + if not copy: + copy = copy_if_needed + self.indices = np.array(indices, copy=copy, dtype=idx_dtype) + self.indptr = np.array(indptr, copy=copy, dtype=idx_dtype) + self.data = getdata(data, copy=copy, dtype=dtype) + if self.data.ndim != 3: + raise ValueError( + f'BSR data must be 3-dimensional, got shape={self.data.shape}' + ) + if blocksize is not None: + if not isshape(blocksize): + raise ValueError(f'invalid blocksize={blocksize}') + if tuple(blocksize) != self.data.shape[1:]: + raise ValueError('mismatching blocksize={} vs {}'.format( + blocksize, self.data.shape[1:])) + else: + raise ValueError('unrecognized bsr_array constructor usage') + else: + # must be dense + try: + arg1 = np.asarray(arg1) + except Exception as e: + raise ValueError("unrecognized form for" + " %s_matrix constructor" % self.format) from e + arg1 = self._coo_container( + arg1, dtype=dtype + ).tobsr(blocksize=blocksize) + self.indptr, self.indices, self.data, self._shape = ( + arg1.indptr, arg1.indices, arg1.data, arg1._shape + ) + + if shape is not None: + self._shape = check_shape(shape) + else: + if self.shape is None: + # shape not already set, try to infer dimensions + try: + M = len(self.indptr) - 1 + N = self.indices.max() + 1 + except Exception as e: + raise ValueError('unable to infer matrix dimensions') from e + else: + R,C = self.blocksize + self._shape = check_shape((M*R,N*C)) + + if self.shape is None: + if shape is None: + # TODO infer shape here + raise ValueError('need to infer shape') + else: + self._shape = check_shape(shape) + + if dtype is not None: + self.data = self.data.astype(dtype, copy=False) + + self.check_format(full_check=False) + + def check_format(self, full_check=True): + """Check whether the array/matrix respects the BSR format. + + Parameters + ---------- + full_check : bool, optional + If `True`, run rigorous check, scanning arrays for valid values. + Note that activating those check might copy arrays for casting, + modifying indices and index pointers' inplace. + If `False`, run basic checks on attributes. O(1) operations. + Default is `True`. + """ + M,N = self.shape + R,C = self.blocksize + + # index arrays should have integer data types + if self.indptr.dtype.kind != 'i': + warn(f"indptr array has non-integer dtype ({self.indptr.dtype.name})", + stacklevel=2) + if self.indices.dtype.kind != 'i': + warn(f"indices array has non-integer dtype ({self.indices.dtype.name})", + stacklevel=2) + + # check array shapes + if self.indices.ndim != 1 or self.indptr.ndim != 1: + raise ValueError("indices, and indptr should be 1-D") + if self.data.ndim != 3: + raise ValueError("data should be 3-D") + + # check index pointer + if (len(self.indptr) != M//R + 1): + raise ValueError("index pointer size (%d) should be (%d)" % + (len(self.indptr), M//R + 1)) + if (self.indptr[0] != 0): + raise ValueError("index pointer should start with 0") + + # check index and data arrays + if (len(self.indices) != len(self.data)): + raise ValueError("indices and data should have the same size") + if (self.indptr[-1] > len(self.indices)): + raise ValueError("Last value of index pointer should be less than " + "the size of index and data arrays") + + self.prune() + + if full_check: + # check format validity (more expensive) + if self.nnz > 0: + if self.indices.max() >= N//C: + raise ValueError("column index values must be < %d (now max %d)" + % (N//C, self.indices.max())) + if self.indices.min() < 0: + raise ValueError("column index values must be >= 0") + if np.diff(self.indptr).min() < 0: + raise ValueError("index pointer values must form a " + "non-decreasing sequence") + + idx_dtype = self._get_index_dtype((self.indices, self.indptr)) + self.indptr = np.asarray(self.indptr, dtype=idx_dtype) + self.indices = np.asarray(self.indices, dtype=idx_dtype) + self.data = to_native(self.data) + # if not self.has_sorted_indices(): + # warn('Indices were not in sorted order. Sorting indices.') + # self.sort_indices(check_first=False) + + @property + def blocksize(self) -> tuple: + """Block size of the matrix.""" + return self.data.shape[1:] + + def _getnnz(self, axis=None): + if axis is not None: + raise NotImplementedError("_getnnz over an axis is not implemented " + "for BSR format") + R,C = self.blocksize + return int(self.indptr[-1] * R * C) + + _getnnz.__doc__ = _spbase._getnnz.__doc__ + + def __repr__(self): + _, fmt = _formats[self.format] + sparse_cls = 'array' if isinstance(self, sparray) else 'matrix' + shape_str = 'x'.join(str(x) for x in self.shape) + blksz = 'x'.join(str(x) for x in self.blocksize) + return ( + f"<{shape_str} sparse {sparse_cls} of type '{self.dtype.type}'\n" + f"\twith {self.nnz} stored elements (blocksize = {blksz}) in {fmt} format>" + ) + + def diagonal(self, k=0): + rows, cols = self.shape + if k <= -rows or k >= cols: + return np.empty(0, dtype=self.data.dtype) + R, C = self.blocksize + y = np.zeros(min(rows + min(k, 0), cols - max(k, 0)), + dtype=upcast(self.dtype)) + _sparsetools.bsr_diagonal(k, rows // R, cols // C, R, C, + self.indptr, self.indices, + np.ravel(self.data), y) + return y + + diagonal.__doc__ = _spbase.diagonal.__doc__ + + ########################## + # NotImplemented methods # + ########################## + + def __getitem__(self,key): + raise NotImplementedError + + def __setitem__(self,key,val): + raise NotImplementedError + + ###################### + # Arithmetic methods # + ###################### + + def _add_dense(self, other): + return self.tocoo(copy=False)._add_dense(other) + + def _matmul_vector(self, other): + M,N = self.shape + R,C = self.blocksize + + result = np.zeros(self.shape[0], dtype=upcast(self.dtype, other.dtype)) + + bsr_matvec(M//R, N//C, R, C, + self.indptr, self.indices, self.data.ravel(), + other, result) + + return result + + def _matmul_multivector(self,other): + R,C = self.blocksize + M,N = self.shape + n_vecs = other.shape[1] # number of column vectors + + result = np.zeros((M,n_vecs), dtype=upcast(self.dtype,other.dtype)) + + bsr_matvecs(M//R, N//C, n_vecs, R, C, + self.indptr, self.indices, self.data.ravel(), + other.ravel(), result.ravel()) + + return result + + def _matmul_sparse(self, other): + M, K1 = self.shape + K2, N = other.shape + + R,n = self.blocksize + + # convert to this format + if other.format == "bsr": + C = other.blocksize[1] + else: + C = 1 + + if other.format == "csr" and n == 1: + other = other.tobsr(blocksize=(n,C), copy=False) # lightweight conversion + else: + other = other.tobsr(blocksize=(n,C)) + + idx_dtype = self._get_index_dtype((self.indptr, self.indices, + other.indptr, other.indices)) + + bnnz = csr_matmat_maxnnz(M//R, N//C, + self.indptr.astype(idx_dtype), + self.indices.astype(idx_dtype), + other.indptr.astype(idx_dtype), + other.indices.astype(idx_dtype)) + + idx_dtype = self._get_index_dtype((self.indptr, self.indices, + other.indptr, other.indices), + maxval=bnnz) + indptr = np.empty(self.indptr.shape, dtype=idx_dtype) + indices = np.empty(bnnz, dtype=idx_dtype) + data = np.empty(R*C*bnnz, dtype=upcast(self.dtype,other.dtype)) + + bsr_matmat(bnnz, M//R, N//C, R, C, n, + self.indptr.astype(idx_dtype), + self.indices.astype(idx_dtype), + np.ravel(self.data), + other.indptr.astype(idx_dtype), + other.indices.astype(idx_dtype), + np.ravel(other.data), + indptr, + indices, + data) + + data = data.reshape(-1,R,C) + + # TODO eliminate zeros + + return self._bsr_container( + (data, indices, indptr), shape=(M, N), blocksize=(R, C) + ) + + ###################### + # Conversion methods # + ###################### + + def tobsr(self, blocksize=None, copy=False): + """Convert this array/matrix into Block Sparse Row Format. + + With copy=False, the data/indices may be shared between this + array/matrix and the resultant bsr_array/bsr_matrix. + + If blocksize=(R, C) is provided, it will be used for determining + block size of the bsr_array/bsr_matrix. + """ + if blocksize not in [None, self.blocksize]: + return self.tocsr().tobsr(blocksize=blocksize) + if copy: + return self.copy() + else: + return self + + def tocsr(self, copy=False): + M, N = self.shape + R, C = self.blocksize + nnz = self.nnz + idx_dtype = self._get_index_dtype((self.indptr, self.indices), + maxval=max(nnz, N)) + indptr = np.empty(M + 1, dtype=idx_dtype) + indices = np.empty(nnz, dtype=idx_dtype) + data = np.empty(nnz, dtype=upcast(self.dtype)) + + bsr_tocsr(M // R, # n_brow + N // C, # n_bcol + R, C, + self.indptr.astype(idx_dtype, copy=False), + self.indices.astype(idx_dtype, copy=False), + self.data, + indptr, + indices, + data) + return self._csr_container((data, indices, indptr), shape=self.shape) + + tocsr.__doc__ = _spbase.tocsr.__doc__ + + def tocsc(self, copy=False): + return self.tocsr(copy=False).tocsc(copy=copy) + + tocsc.__doc__ = _spbase.tocsc.__doc__ + + def tocoo(self, copy=True): + """Convert this array/matrix to COOrdinate format. + + When copy=False the data array will be shared between + this array/matrix and the resultant coo_array/coo_matrix. + """ + + M,N = self.shape + R,C = self.blocksize + + indptr_diff = np.diff(self.indptr) + if indptr_diff.dtype.itemsize > np.dtype(np.intp).itemsize: + # Check for potential overflow + indptr_diff_limited = indptr_diff.astype(np.intp) + if np.any(indptr_diff_limited != indptr_diff): + raise ValueError("Matrix too big to convert") + indptr_diff = indptr_diff_limited + + idx_dtype = self._get_index_dtype(maxval=max(M, N)) + row = (R * np.arange(M//R, dtype=idx_dtype)).repeat(indptr_diff) + row = row.repeat(R*C).reshape(-1,R,C) + row += np.tile(np.arange(R, dtype=idx_dtype).reshape(-1,1), (1,C)) + row = row.reshape(-1) + + col = ((C * self.indices).astype(idx_dtype, copy=False) + .repeat(R*C).reshape(-1,R,C)) + col += np.tile(np.arange(C, dtype=idx_dtype), (R,1)) + col = col.reshape(-1) + + data = self.data.reshape(-1) + + if copy: + data = data.copy() + + return self._coo_container( + (data, (row, col)), shape=self.shape + ) + + def toarray(self, order=None, out=None): + return self.tocoo(copy=False).toarray(order=order, out=out) + + toarray.__doc__ = _spbase.toarray.__doc__ + + def transpose(self, axes=None, copy=False): + if axes is not None and axes != (1, 0): + raise ValueError("Sparse matrices do not support " + "an 'axes' parameter because swapping " + "dimensions is the only logical permutation.") + + R, C = self.blocksize + M, N = self.shape + NBLK = self.nnz//(R*C) + + if self.nnz == 0: + return self._bsr_container((N, M), blocksize=(C, R), + dtype=self.dtype, copy=copy) + + indptr = np.empty(N//C + 1, dtype=self.indptr.dtype) + indices = np.empty(NBLK, dtype=self.indices.dtype) + data = np.empty((NBLK, C, R), dtype=self.data.dtype) + + bsr_transpose(M//R, N//C, R, C, + self.indptr, self.indices, self.data.ravel(), + indptr, indices, data.ravel()) + + return self._bsr_container((data, indices, indptr), + shape=(N, M), copy=copy) + + transpose.__doc__ = _spbase.transpose.__doc__ + + ############################################################## + # methods that examine or modify the internal data structure # + ############################################################## + + def eliminate_zeros(self): + """Remove zero elements in-place.""" + + if not self.nnz: + return # nothing to do + + R,C = self.blocksize + M,N = self.shape + + mask = (self.data != 0).reshape(-1,R*C).sum(axis=1) # nonzero blocks + + nonzero_blocks = mask.nonzero()[0] + + self.data[:len(nonzero_blocks)] = self.data[nonzero_blocks] + + # modifies self.indptr and self.indices *in place* + _sparsetools.csr_eliminate_zeros(M//R, N//C, self.indptr, + self.indices, mask) + self.prune() + + def sum_duplicates(self): + """Eliminate duplicate array/matrix entries by adding them together + + The is an *in place* operation + """ + if self.has_canonical_format: + return + self.sort_indices() + R, C = self.blocksize + M, N = self.shape + + # port of _sparsetools.csr_sum_duplicates + n_row = M // R + nnz = 0 + row_end = 0 + for i in range(n_row): + jj = row_end + row_end = self.indptr[i+1] + while jj < row_end: + j = self.indices[jj] + x = self.data[jj] + jj += 1 + while jj < row_end and self.indices[jj] == j: + x += self.data[jj] + jj += 1 + self.indices[nnz] = j + self.data[nnz] = x + nnz += 1 + self.indptr[i+1] = nnz + + self.prune() # nnz may have changed + self.has_canonical_format = True + + def sort_indices(self): + """Sort the indices of this array/matrix *in place* + """ + if self.has_sorted_indices: + return + + R,C = self.blocksize + M,N = self.shape + + bsr_sort_indices(M//R, N//C, R, C, self.indptr, self.indices, self.data.ravel()) + + self.has_sorted_indices = True + + def prune(self): + """Remove empty space after all non-zero elements. + """ + + R,C = self.blocksize + M,N = self.shape + + if len(self.indptr) != M//R + 1: + raise ValueError("index pointer has invalid length") + + bnnz = self.indptr[-1] + + if len(self.indices) < bnnz: + raise ValueError("indices array has too few elements") + if len(self.data) < bnnz: + raise ValueError("data array has too few elements") + + self.data = self.data[:bnnz] + self.indices = self.indices[:bnnz] + + # utility functions + def _binopt(self, other, op, in_shape=None, out_shape=None): + """Apply the binary operation fn to two sparse matrices.""" + + # Ideally we'd take the GCDs of the blocksize dimensions + # and explode self and other to match. + other = self.__class__(other, blocksize=self.blocksize) + + # e.g. bsr_plus_bsr, etc. + fn = getattr(_sparsetools, self.format + op + self.format) + + R,C = self.blocksize + + max_bnnz = len(self.data) + len(other.data) + idx_dtype = self._get_index_dtype((self.indptr, self.indices, + other.indptr, other.indices), + maxval=max_bnnz) + indptr = np.empty(self.indptr.shape, dtype=idx_dtype) + indices = np.empty(max_bnnz, dtype=idx_dtype) + + bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_'] + if op in bool_ops: + data = np.empty(R*C*max_bnnz, dtype=np.bool_) + else: + data = np.empty(R*C*max_bnnz, dtype=upcast(self.dtype,other.dtype)) + + fn(self.shape[0]//R, self.shape[1]//C, R, C, + self.indptr.astype(idx_dtype), + self.indices.astype(idx_dtype), + self.data, + other.indptr.astype(idx_dtype), + other.indices.astype(idx_dtype), + np.ravel(other.data), + indptr, + indices, + data) + + actual_bnnz = indptr[-1] + indices = indices[:actual_bnnz] + data = data[:R*C*actual_bnnz] + + if actual_bnnz < max_bnnz/2: + indices = indices.copy() + data = data.copy() + + data = data.reshape(-1,R,C) + + return self.__class__((data, indices, indptr), shape=self.shape) + + # needed by _data_matrix + def _with_data(self,data,copy=True): + """Returns a matrix with the same sparsity structure as self, + but with different data. By default the structure arrays + (i.e. .indptr and .indices) are copied. + """ + if copy: + return self.__class__((data,self.indices.copy(),self.indptr.copy()), + shape=self.shape,dtype=data.dtype) + else: + return self.__class__((data,self.indices,self.indptr), + shape=self.shape,dtype=data.dtype) + +# # these functions are used by the parent class +# # to remove redundancy between bsc_matrix and bsr_matrix +# def _swap(self,x): +# """swap the members of x if this is a column-oriented matrix +# """ +# return (x[0],x[1]) + + +def isspmatrix_bsr(x): + """Is `x` of a bsr_matrix type? + + Parameters + ---------- + x + object to check for being a bsr matrix + + Returns + ------- + bool + True if `x` is a bsr matrix, False otherwise + + Examples + -------- + >>> from scipy.sparse import bsr_array, bsr_matrix, csr_matrix, isspmatrix_bsr + >>> isspmatrix_bsr(bsr_matrix([[5]])) + True + >>> isspmatrix_bsr(bsr_array([[5]])) + False + >>> isspmatrix_bsr(csr_matrix([[5]])) + False + """ + return isinstance(x, bsr_matrix) + + +# This namespace class separates array from matrix with isinstance +class bsr_array(_bsr_base, sparray): + """ + Block Sparse Row format sparse array. + + This can be instantiated in several ways: + bsr_array(D, [blocksize=(R,C)]) + where D is a 2-D ndarray. + + bsr_array(S, [blocksize=(R,C)]) + with another sparse array or matrix S (equivalent to S.tobsr()) + + bsr_array((M, N), [blocksize=(R,C), dtype]) + to construct an empty sparse array with shape (M, N) + dtype is optional, defaulting to dtype='d'. + + bsr_array((data, ij), [blocksize=(R,C), shape=(M, N)]) + where ``data`` and ``ij`` satisfy ``a[ij[0, k], ij[1, k]] = data[k]`` + + bsr_array((data, indices, indptr), [shape=(M, N)]) + is the standard BSR representation where the block column + indices for row i are stored in ``indices[indptr[i]:indptr[i+1]]`` + and their corresponding block values are stored in + ``data[ indptr[i]: indptr[i+1] ]``. If the shape parameter is not + supplied, the array dimensions are inferred from the index arrays. + + Attributes + ---------- + dtype : dtype + Data type of the array + shape : 2-tuple + Shape of the array + ndim : int + Number of dimensions (this is always 2) + nnz + size + data + BSR format data array of the array + indices + BSR format index array of the array + indptr + BSR format index pointer array of the array + blocksize + Block size + has_sorted_indices : bool + Whether indices are sorted + has_canonical_format : bool + T + + Notes + ----- + Sparse arrays can be used in arithmetic operations: they support + addition, subtraction, multiplication, division, and matrix power. + + **Summary of BSR format** + + The Block Sparse Row (BSR) format is very similar to the Compressed + Sparse Row (CSR) format. BSR is appropriate for sparse matrices with dense + sub matrices like the last example below. Such sparse block matrices often + arise in vector-valued finite element discretizations. In such cases, BSR is + considerably more efficient than CSR and CSC for many sparse arithmetic + operations. + + **Blocksize** + + The blocksize (R,C) must evenly divide the shape of the sparse array (M,N). + That is, R and C must satisfy the relationship ``M % R = 0`` and + ``N % C = 0``. + + If no blocksize is specified, a simple heuristic is applied to determine + an appropriate blocksize. + + **Canonical Format** + + In canonical format, there are no duplicate blocks and indices are sorted + per row. + + Examples + -------- + >>> import numpy as np + >>> from scipy.sparse import bsr_array + >>> bsr_array((3, 4), dtype=np.int8).toarray() + array([[0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]], dtype=int8) + + >>> row = np.array([0, 0, 1, 2, 2, 2]) + >>> col = np.array([0, 2, 2, 0, 1, 2]) + >>> data = np.array([1, 2, 3 ,4, 5, 6]) + >>> bsr_array((data, (row, col)), shape=(3, 3)).toarray() + array([[1, 0, 2], + [0, 0, 3], + [4, 5, 6]]) + + >>> indptr = np.array([0, 2, 3, 6]) + >>> indices = np.array([0, 2, 2, 0, 1, 2]) + >>> data = np.array([1, 2, 3, 4, 5, 6]).repeat(4).reshape(6, 2, 2) + >>> bsr_array((data,indices,indptr), shape=(6, 6)).toarray() + array([[1, 1, 0, 0, 2, 2], + [1, 1, 0, 0, 2, 2], + [0, 0, 0, 0, 3, 3], + [0, 0, 0, 0, 3, 3], + [4, 4, 5, 5, 6, 6], + [4, 4, 5, 5, 6, 6]]) + + """ + + +class bsr_matrix(spmatrix, _bsr_base): + """ + Block Sparse Row format sparse matrix. + + This can be instantiated in several ways: + bsr_matrix(D, [blocksize=(R,C)]) + where D is a 2-D ndarray. + + bsr_matrix(S, [blocksize=(R,C)]) + with another sparse array or matrix S (equivalent to S.tobsr()) + + bsr_matrix((M, N), [blocksize=(R,C), dtype]) + to construct an empty sparse matrix with shape (M, N) + dtype is optional, defaulting to dtype='d'. + + bsr_matrix((data, ij), [blocksize=(R,C), shape=(M, N)]) + where ``data`` and ``ij`` satisfy ``a[ij[0, k], ij[1, k]] = data[k]`` + + bsr_matrix((data, indices, indptr), [shape=(M, N)]) + is the standard BSR representation where the block column + indices for row i are stored in ``indices[indptr[i]:indptr[i+1]]`` + and their corresponding block values are stored in + ``data[ indptr[i]: indptr[i+1] ]``. If the shape parameter is not + supplied, the matrix dimensions are inferred from the index arrays. + + Attributes + ---------- + dtype : dtype + Data type of the matrix + shape : 2-tuple + Shape of the matrix + ndim : int + Number of dimensions (this is always 2) + nnz + size + data + BSR format data array of the matrix + indices + BSR format index array of the matrix + indptr + BSR format index pointer array of the matrix + blocksize + Block size + has_sorted_indices : bool + Whether indices are sorted + has_canonical_format : bool + T + + Notes + ----- + Sparse matrices can be used in arithmetic operations: they support + addition, subtraction, multiplication, division, and matrix power. + + **Summary of BSR format** + + The Block Sparse Row (BSR) format is very similar to the Compressed + Sparse Row (CSR) format. BSR is appropriate for sparse matrices with dense + sub matrices like the last example below. Such sparse block matrices often + arise in vector-valued finite element discretizations. In such cases, BSR is + considerably more efficient than CSR and CSC for many sparse arithmetic + operations. + + **Blocksize** + + The blocksize (R,C) must evenly divide the shape of the sparse matrix (M,N). + That is, R and C must satisfy the relationship ``M % R = 0`` and + ``N % C = 0``. + + If no blocksize is specified, a simple heuristic is applied to determine + an appropriate blocksize. + + **Canonical Format** + + In canonical format, there are no duplicate blocks and indices are sorted + per row. + + Examples + -------- + >>> import numpy as np + >>> from scipy.sparse import bsr_matrix + >>> bsr_matrix((3, 4), dtype=np.int8).toarray() + array([[0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]], dtype=int8) + + >>> row = np.array([0, 0, 1, 2, 2, 2]) + >>> col = np.array([0, 2, 2, 0, 1, 2]) + >>> data = np.array([1, 2, 3 ,4, 5, 6]) + >>> bsr_matrix((data, (row, col)), shape=(3, 3)).toarray() + array([[1, 0, 2], + [0, 0, 3], + [4, 5, 6]]) + + >>> indptr = np.array([0, 2, 3, 6]) + >>> indices = np.array([0, 2, 2, 0, 1, 2]) + >>> data = np.array([1, 2, 3, 4, 5, 6]).repeat(4).reshape(6, 2, 2) + >>> bsr_matrix((data,indices,indptr), shape=(6, 6)).toarray() + array([[1, 1, 0, 0, 2, 2], + [1, 1, 0, 0, 2, 2], + [0, 0, 0, 0, 3, 3], + [0, 0, 0, 0, 3, 3], + [4, 4, 5, 5, 6, 6], + [4, 4, 5, 5, 6, 6]]) + + """ + diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_compressed.py b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_compressed.py new file mode 100644 index 0000000000000000000000000000000000000000..dd73fc27b9bf6b35125da7776bd88435ea66b650 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_compressed.py @@ -0,0 +1,1367 @@ +"""Base class for sparse matrix formats using compressed storage.""" +__all__ = [] + +from warnings import warn +import operator + +import numpy as np +from scipy._lib._util import _prune_array, copy_if_needed + +from ._base import _spbase, issparse, SparseEfficiencyWarning +from ._data import _data_matrix, _minmax_mixin +from . import _sparsetools +from ._sparsetools import (get_csr_submatrix, csr_sample_offsets, csr_todense, + csr_sample_values, csr_row_index, csr_row_slice, + csr_column_index1, csr_column_index2) +from ._index import IndexMixin +from ._sputils import (upcast, upcast_char, to_native, isdense, isshape, + getdtype, isscalarlike, isintlike, downcast_intp_index, + get_sum_dtype, check_shape, is_pydata_spmatrix) + + +class _cs_matrix(_data_matrix, _minmax_mixin, IndexMixin): + """ + base array/matrix class for compressed row- and column-oriented arrays/matrices + """ + + def __init__(self, arg1, shape=None, dtype=None, copy=False): + _data_matrix.__init__(self) + + if issparse(arg1): + if arg1.format == self.format and copy: + arg1 = arg1.copy() + else: + arg1 = arg1.asformat(self.format) + self.indptr, self.indices, self.data, self._shape = ( + arg1.indptr, arg1.indices, arg1.data, arg1._shape + ) + + elif isinstance(arg1, tuple): + if isshape(arg1): + # It's a tuple of matrix dimensions (M, N) + # create empty matrix + self._shape = check_shape(arg1) + M, N = self.shape + # Select index dtype large enough to pass array and + # scalar parameters to sparsetools + idx_dtype = self._get_index_dtype(maxval=max(M, N)) + self.data = np.zeros(0, getdtype(dtype, default=float)) + self.indices = np.zeros(0, idx_dtype) + self.indptr = np.zeros(self._swap((M, N))[0] + 1, + dtype=idx_dtype) + else: + if len(arg1) == 2: + # (data, ij) format + coo = self._coo_container(arg1, shape=shape, dtype=dtype) + arrays = coo._coo_to_compressed(self._swap) + self.indptr, self.indices, self.data, self._shape = arrays + elif len(arg1) == 3: + # (data, indices, indptr) format + (data, indices, indptr) = arg1 + + # Select index dtype large enough to pass array and + # scalar parameters to sparsetools + maxval = None + if shape is not None: + maxval = max(shape) + idx_dtype = self._get_index_dtype((indices, indptr), + maxval=maxval, + check_contents=True) + + if not copy: + copy = copy_if_needed + self.indices = np.array(indices, copy=copy, dtype=idx_dtype) + self.indptr = np.array(indptr, copy=copy, dtype=idx_dtype) + self.data = np.array(data, copy=copy, dtype=dtype) + else: + raise ValueError(f"unrecognized {self.format}_matrix " + "constructor usage") + + else: + # must be dense + try: + arg1 = np.asarray(arg1) + except Exception as e: + msg = f"unrecognized {self.format}_matrix constructor usage" + raise ValueError(msg) from e + coo = self._coo_container(arg1, dtype=dtype) + arrays = coo._coo_to_compressed(self._swap) + self.indptr, self.indices, self.data, self._shape = arrays + + # Read matrix dimensions given, if any + if shape is not None: + self._shape = check_shape(shape) + else: + if self.shape is None: + # shape not already set, try to infer dimensions + try: + major_dim = len(self.indptr) - 1 + minor_dim = self.indices.max() + 1 + except Exception as e: + raise ValueError('unable to infer matrix dimensions') from e + else: + self._shape = check_shape(self._swap((major_dim, minor_dim))) + + if dtype is not None: + self.data = self.data.astype(dtype, copy=False) + + self.check_format(full_check=False) + + def _getnnz(self, axis=None): + if axis is None: + return int(self.indptr[-1]) + else: + if axis < 0: + axis += 2 + axis, _ = self._swap((axis, 1 - axis)) + _, N = self._swap(self.shape) + if axis == 0: + return np.bincount(downcast_intp_index(self.indices), + minlength=N) + elif axis == 1: + return np.diff(self.indptr) + raise ValueError('axis out of bounds') + + _getnnz.__doc__ = _spbase._getnnz.__doc__ + + def check_format(self, full_check=True): + """Check whether the array/matrix respects the CSR or CSC format. + + Parameters + ---------- + full_check : bool, optional + If `True`, run rigorous check, scanning arrays for valid values. + Note that activating those check might copy arrays for casting, + modifying indices and index pointers' inplace. + If `False`, run basic checks on attributes. O(1) operations. + Default is `True`. + """ + # use _swap to determine proper bounds + major_name, minor_name = self._swap(('row', 'column')) + major_dim, minor_dim = self._swap(self.shape) + + # index arrays should have integer data types + if self.indptr.dtype.kind != 'i': + warn(f"indptr array has non-integer dtype ({self.indptr.dtype.name})", + stacklevel=3) + if self.indices.dtype.kind != 'i': + warn(f"indices array has non-integer dtype ({self.indices.dtype.name})", + stacklevel=3) + + # check array shapes + for x in [self.data.ndim, self.indices.ndim, self.indptr.ndim]: + if x != 1: + raise ValueError('data, indices, and indptr should be 1-D') + + # check index pointer + if (len(self.indptr) != major_dim + 1): + raise ValueError("index pointer size ({}) should be ({})" + "".format(len(self.indptr), major_dim + 1)) + if (self.indptr[0] != 0): + raise ValueError("index pointer should start with 0") + + # check index and data arrays + if (len(self.indices) != len(self.data)): + raise ValueError("indices and data should have the same size") + if (self.indptr[-1] > len(self.indices)): + raise ValueError("Last value of index pointer should be less than " + "the size of index and data arrays") + + self.prune() + + if full_check: + # check format validity (more expensive) + if self.nnz > 0: + if self.indices.max() >= minor_dim: + raise ValueError(f"{minor_name} index values must be < {minor_dim}") + if self.indices.min() < 0: + raise ValueError(f"{minor_name} index values must be >= 0") + if np.diff(self.indptr).min() < 0: + raise ValueError("index pointer values must form a " + "non-decreasing sequence") + + idx_dtype = self._get_index_dtype((self.indptr, self.indices)) + self.indptr = np.asarray(self.indptr, dtype=idx_dtype) + self.indices = np.asarray(self.indices, dtype=idx_dtype) + self.data = to_native(self.data) + + # if not self.has_sorted_indices(): + # warn('Indices were not in sorted order. Sorting indices.') + # self.sort_indices() + # assert(self.has_sorted_indices()) + # TODO check for duplicates? + + ####################### + # Boolean comparisons # + ####################### + + def _scalar_binopt(self, other, op): + """Scalar version of self._binopt, for cases in which no new nonzeros + are added. Produces a new sparse array in canonical form. + """ + self.sum_duplicates() + res = self._with_data(op(self.data, other), copy=True) + res.eliminate_zeros() + return res + + def __eq__(self, other): + # Scalar other. + if isscalarlike(other): + if np.isnan(other): + return self.__class__(self.shape, dtype=np.bool_) + + if other == 0: + warn("Comparing a sparse matrix with 0 using == is inefficient" + ", try using != instead.", SparseEfficiencyWarning, + stacklevel=3) + all_true = self.__class__(np.ones(self.shape, dtype=np.bool_)) + inv = self._scalar_binopt(other, operator.ne) + return all_true - inv + else: + return self._scalar_binopt(other, operator.eq) + # Dense other. + elif isdense(other): + return self.todense() == other + # Pydata sparse other. + elif is_pydata_spmatrix(other): + return NotImplemented + # Sparse other. + elif issparse(other): + warn("Comparing sparse matrices using == is inefficient, try using" + " != instead.", SparseEfficiencyWarning, stacklevel=3) + # TODO sparse broadcasting + if self.shape != other.shape: + return False + elif self.format != other.format: + other = other.asformat(self.format) + res = self._binopt(other, '_ne_') + all_true = self.__class__(np.ones(self.shape, dtype=np.bool_)) + return all_true - res + else: + return NotImplemented + + def __ne__(self, other): + # Scalar other. + if isscalarlike(other): + if np.isnan(other): + warn("Comparing a sparse matrix with nan using != is" + " inefficient", SparseEfficiencyWarning, stacklevel=3) + all_true = self.__class__(np.ones(self.shape, dtype=np.bool_)) + return all_true + elif other != 0: + warn("Comparing a sparse matrix with a nonzero scalar using !=" + " is inefficient, try using == instead.", + SparseEfficiencyWarning, stacklevel=3) + all_true = self.__class__(np.ones(self.shape), dtype=np.bool_) + inv = self._scalar_binopt(other, operator.eq) + return all_true - inv + else: + return self._scalar_binopt(other, operator.ne) + # Dense other. + elif isdense(other): + return self.todense() != other + # Pydata sparse other. + elif is_pydata_spmatrix(other): + return NotImplemented + # Sparse other. + elif issparse(other): + # TODO sparse broadcasting + if self.shape != other.shape: + return True + elif self.format != other.format: + other = other.asformat(self.format) + return self._binopt(other, '_ne_') + else: + return NotImplemented + + def _inequality(self, other, op, op_name, bad_scalar_msg): + # Scalar other. + if isscalarlike(other): + if 0 == other and op_name in ('_le_', '_ge_'): + raise NotImplementedError(" >= and <= don't work with 0.") + elif op(0, other): + warn(bad_scalar_msg, SparseEfficiencyWarning, stacklevel=3) + other_arr = np.empty(self.shape, dtype=np.result_type(other)) + other_arr.fill(other) + other_arr = self.__class__(other_arr) + return self._binopt(other_arr, op_name) + else: + return self._scalar_binopt(other, op) + # Dense other. + elif isdense(other): + return op(self.todense(), other) + # Sparse other. + elif issparse(other): + # TODO sparse broadcasting + if self.shape != other.shape: + raise ValueError("inconsistent shapes") + elif self.format != other.format: + other = other.asformat(self.format) + if op_name not in ('_ge_', '_le_'): + return self._binopt(other, op_name) + + warn("Comparing sparse matrices using >= and <= is inefficient, " + "using <, >, or !=, instead.", + SparseEfficiencyWarning, stacklevel=3) + all_true = self.__class__(np.ones(self.shape, dtype=np.bool_)) + res = self._binopt(other, '_gt_' if op_name == '_le_' else '_lt_') + return all_true - res + else: + return NotImplemented + + def __lt__(self, other): + return self._inequality(other, operator.lt, '_lt_', + "Comparing a sparse matrix with a scalar " + "greater than zero using < is inefficient, " + "try using >= instead.") + + def __gt__(self, other): + return self._inequality(other, operator.gt, '_gt_', + "Comparing a sparse matrix with a scalar " + "less than zero using > is inefficient, " + "try using <= instead.") + + def __le__(self, other): + return self._inequality(other, operator.le, '_le_', + "Comparing a sparse matrix with a scalar " + "greater than zero using <= is inefficient, " + "try using > instead.") + + def __ge__(self, other): + return self._inequality(other, operator.ge, '_ge_', + "Comparing a sparse matrix with a scalar " + "less than zero using >= is inefficient, " + "try using < instead.") + + ################################# + # Arithmetic operator overrides # + ################################# + + def _add_dense(self, other): + if other.shape != self.shape: + raise ValueError(f'Incompatible shapes ({self.shape} and {other.shape})') + dtype = upcast_char(self.dtype.char, other.dtype.char) + order = self._swap('CF')[0] + result = np.array(other, dtype=dtype, order=order, copy=True) + M, N = self._swap(self.shape) + y = result if result.flags.c_contiguous else result.T + csr_todense(M, N, self.indptr, self.indices, self.data, y) + return self._container(result, copy=False) + + def _add_sparse(self, other): + return self._binopt(other, '_plus_') + + def _sub_sparse(self, other): + return self._binopt(other, '_minus_') + + def multiply(self, other): + """Point-wise multiplication by another array/matrix, vector, or + scalar. + """ + # Scalar multiplication. + if isscalarlike(other): + return self._mul_scalar(other) + # Sparse matrix or vector. + if issparse(other): + if self.shape == other.shape: + other = self.__class__(other) + return self._binopt(other, '_elmul_') + if other.ndim == 1: + raise TypeError("broadcast from a 1d array not yet supported") + # Single element. + elif other.shape == (1, 1): + return self._mul_scalar(other.toarray()[0, 0]) + elif self.shape == (1, 1): + return other._mul_scalar(self.toarray()[0, 0]) + # A row times a column. + elif self.shape[1] == 1 and other.shape[0] == 1: + return self._matmul_sparse(other.tocsc()) + elif self.shape[0] == 1 and other.shape[1] == 1: + return other._matmul_sparse(self.tocsc()) + # Row vector times matrix. other is a row. + elif other.shape[0] == 1 and self.shape[1] == other.shape[1]: + other = self._dia_container( + (other.toarray().ravel(), [0]), + shape=(other.shape[1], other.shape[1]) + ) + return self._matmul_sparse(other) + # self is a row. + elif self.shape[0] == 1 and self.shape[1] == other.shape[1]: + copy = self._dia_container( + (self.toarray().ravel(), [0]), + shape=(self.shape[1], self.shape[1]) + ) + return other._matmul_sparse(copy) + # Column vector times matrix. other is a column. + elif other.shape[1] == 1 and self.shape[0] == other.shape[0]: + other = self._dia_container( + (other.toarray().ravel(), [0]), + shape=(other.shape[0], other.shape[0]) + ) + return other._matmul_sparse(self) + # self is a column. + elif self.shape[1] == 1 and self.shape[0] == other.shape[0]: + copy = self._dia_container( + (self.toarray().ravel(), [0]), + shape=(self.shape[0], self.shape[0]) + ) + return copy._matmul_sparse(other) + else: + raise ValueError("inconsistent shapes") + + # Assume other is a dense matrix/array, which produces a single-item + # object array if other isn't convertible to ndarray. + other = np.atleast_2d(other) + + if other.ndim != 2: + return np.multiply(self.toarray(), other) + # Single element / wrapped object. + if other.size == 1: + if other.dtype == np.object_: + # 'other' not convertible to ndarray. + return NotImplemented + return self._mul_scalar(other.flat[0]) + # Fast case for trivial sparse matrix. + elif self.shape == (1, 1): + return np.multiply(self.toarray()[0, 0], other) + + ret = self.tocoo() + # Matching shapes. + if self.shape == other.shape: + data = np.multiply(ret.data, other[ret.row, ret.col]) + # Sparse row vector times... + elif self.shape[0] == 1: + if other.shape[1] == 1: # Dense column vector. + data = np.multiply(ret.data, other) + elif other.shape[1] == self.shape[1]: # Dense matrix. + data = np.multiply(ret.data, other[:, ret.col]) + else: + raise ValueError("inconsistent shapes") + row = np.repeat(np.arange(other.shape[0]), len(ret.row)) + col = np.tile(ret.col, other.shape[0]) + return self._coo_container( + (data.view(np.ndarray).ravel(), (row, col)), + shape=(other.shape[0], self.shape[1]), + copy=False + ) + # Sparse column vector times... + elif self.shape[1] == 1: + if other.shape[0] == 1: # Dense row vector. + data = np.multiply(ret.data[:, None], other) + elif other.shape[0] == self.shape[0]: # Dense matrix. + data = np.multiply(ret.data[:, None], other[ret.row]) + else: + raise ValueError("inconsistent shapes") + row = np.repeat(ret.row, other.shape[1]) + col = np.tile(np.arange(other.shape[1]), len(ret.col)) + return self._coo_container( + (data.view(np.ndarray).ravel(), (row, col)), + shape=(self.shape[0], other.shape[1]), + copy=False + ) + # Sparse matrix times dense row vector. + elif other.shape[0] == 1 and self.shape[1] == other.shape[1]: + data = np.multiply(ret.data, other[:, ret.col].ravel()) + # Sparse matrix times dense column vector. + elif other.shape[1] == 1 and self.shape[0] == other.shape[0]: + data = np.multiply(ret.data, other[ret.row].ravel()) + else: + raise ValueError("inconsistent shapes") + ret.data = data.view(np.ndarray).ravel() + return ret + + ########################### + # Multiplication handlers # + ########################### + + def _matmul_vector(self, other): + M, N = self.shape + + # output array + result = np.zeros(M, dtype=upcast_char(self.dtype.char, + other.dtype.char)) + + # csr_matvec or csc_matvec + fn = getattr(_sparsetools, self.format + '_matvec') + fn(M, N, self.indptr, self.indices, self.data, other, result) + + return result + + def _matmul_multivector(self, other): + M, N = self.shape + n_vecs = other.shape[1] # number of column vectors + + result = np.zeros((M, n_vecs), + dtype=upcast_char(self.dtype.char, other.dtype.char)) + + # csr_matvecs or csc_matvecs + fn = getattr(_sparsetools, self.format + '_matvecs') + fn(M, N, n_vecs, self.indptr, self.indices, self.data, + other.ravel(), result.ravel()) + + return result + + def _matmul_sparse(self, other): + M, K1 = self.shape + K2, N = other.shape + + major_axis = self._swap((M, N))[0] + other = self.__class__(other) # convert to this format + + idx_dtype = self._get_index_dtype((self.indptr, self.indices, + other.indptr, other.indices)) + + fn = getattr(_sparsetools, self.format + '_matmat_maxnnz') + nnz = fn(M, N, + np.asarray(self.indptr, dtype=idx_dtype), + np.asarray(self.indices, dtype=idx_dtype), + np.asarray(other.indptr, dtype=idx_dtype), + np.asarray(other.indices, dtype=idx_dtype)) + + idx_dtype = self._get_index_dtype((self.indptr, self.indices, + other.indptr, other.indices), + maxval=nnz) + + indptr = np.empty(major_axis + 1, dtype=idx_dtype) + indices = np.empty(nnz, dtype=idx_dtype) + data = np.empty(nnz, dtype=upcast(self.dtype, other.dtype)) + + fn = getattr(_sparsetools, self.format + '_matmat') + fn(M, N, np.asarray(self.indptr, dtype=idx_dtype), + np.asarray(self.indices, dtype=idx_dtype), + self.data, + np.asarray(other.indptr, dtype=idx_dtype), + np.asarray(other.indices, dtype=idx_dtype), + other.data, + indptr, indices, data) + + return self.__class__((data, indices, indptr), shape=(M, N)) + + def diagonal(self, k=0): + rows, cols = self.shape + if k <= -rows or k >= cols: + return np.empty(0, dtype=self.data.dtype) + fn = getattr(_sparsetools, self.format + "_diagonal") + y = np.empty(min(rows + min(k, 0), cols - max(k, 0)), + dtype=upcast(self.dtype)) + fn(k, self.shape[0], self.shape[1], self.indptr, self.indices, + self.data, y) + return y + + diagonal.__doc__ = _spbase.diagonal.__doc__ + + ##################### + # Other binary ops # + ##################### + + def _maximum_minimum(self, other, npop, op_name, dense_check): + if isscalarlike(other): + if dense_check(other): + warn("Taking maximum (minimum) with > 0 (< 0) number results" + " to a dense matrix.", SparseEfficiencyWarning, + stacklevel=3) + other_arr = np.empty(self.shape, dtype=np.asarray(other).dtype) + other_arr.fill(other) + other_arr = self.__class__(other_arr) + return self._binopt(other_arr, op_name) + else: + self.sum_duplicates() + new_data = npop(self.data, np.asarray(other)) + mat = self.__class__((new_data, self.indices, self.indptr), + dtype=new_data.dtype, shape=self.shape) + return mat + elif isdense(other): + return npop(self.todense(), other) + elif issparse(other): + return self._binopt(other, op_name) + else: + raise ValueError("Operands not compatible.") + + def maximum(self, other): + return self._maximum_minimum(other, np.maximum, + '_maximum_', lambda x: np.asarray(x) > 0) + + maximum.__doc__ = _spbase.maximum.__doc__ + + def minimum(self, other): + return self._maximum_minimum(other, np.minimum, + '_minimum_', lambda x: np.asarray(x) < 0) + + minimum.__doc__ = _spbase.minimum.__doc__ + + ##################### + # Reduce operations # + ##################### + + def sum(self, axis=None, dtype=None, out=None): + """Sum the array/matrix over the given axis. If the axis is None, sum + over both rows and columns, returning a scalar. + """ + # The _spbase base class already does axis=0 and axis=1 efficiently + # so we only do the case axis=None here + if (not hasattr(self, 'blocksize') and + axis in self._swap(((1, -1), (0, 2)))[0]): + # faster than multiplication for large minor axis in CSC/CSR + res_dtype = get_sum_dtype(self.dtype) + ret = np.zeros(len(self.indptr) - 1, dtype=res_dtype) + + major_index, value = self._minor_reduce(np.add) + ret[major_index] = value + ret = self._ascontainer(ret) + if axis % 2 == 1: + ret = ret.T + + if out is not None and out.shape != ret.shape: + raise ValueError('dimensions do not match') + + return ret.sum(axis=(), dtype=dtype, out=out) + # _spbase will handle the remaining situations when axis + # is in {None, -1, 0, 1} + else: + return _spbase.sum(self, axis=axis, dtype=dtype, out=out) + + sum.__doc__ = _spbase.sum.__doc__ + + def _minor_reduce(self, ufunc, data=None): + """Reduce nonzeros with a ufunc over the minor axis when non-empty + + Can be applied to a function of self.data by supplying data parameter. + + Warning: this does not call sum_duplicates() + + Returns + ------- + major_index : array of ints + Major indices where nonzero + + value : array of self.dtype + Reduce result for nonzeros in each major_index + """ + if data is None: + data = self.data + major_index = np.flatnonzero(np.diff(self.indptr)) + value = ufunc.reduceat(data, + downcast_intp_index(self.indptr[major_index])) + return major_index, value + + ####################### + # Getting and Setting # + ####################### + + def _get_intXint(self, row, col): + M, N = self._swap(self.shape) + major, minor = self._swap((row, col)) + indptr, indices, data = get_csr_submatrix( + M, N, self.indptr, self.indices, self.data, + major, major + 1, minor, minor + 1) + return data.sum(dtype=self.dtype) + + def _get_sliceXslice(self, row, col): + major, minor = self._swap((row, col)) + if major.step in (1, None) and minor.step in (1, None): + return self._get_submatrix(major, minor, copy=True) + return self._major_slice(major)._minor_slice(minor) + + def _get_arrayXarray(self, row, col): + # inner indexing + idx_dtype = self.indices.dtype + M, N = self._swap(self.shape) + major, minor = self._swap((row, col)) + major = np.asarray(major, dtype=idx_dtype) + minor = np.asarray(minor, dtype=idx_dtype) + + val = np.empty(major.size, dtype=self.dtype) + csr_sample_values(M, N, self.indptr, self.indices, self.data, + major.size, major.ravel(), minor.ravel(), val) + if major.ndim == 1: + return self._ascontainer(val) + return self.__class__(val.reshape(major.shape)) + + def _get_columnXarray(self, row, col): + # outer indexing + major, minor = self._swap((row, col)) + return self._major_index_fancy(major)._minor_index_fancy(minor) + + def _major_index_fancy(self, idx): + """Index along the major axis where idx is an array of ints. + """ + idx_dtype = self._get_index_dtype((self.indptr, self.indices)) + indices = np.asarray(idx, dtype=idx_dtype).ravel() + + _, N = self._swap(self.shape) + M = len(indices) + new_shape = self._swap((M, N)) + if M == 0: + return self.__class__(new_shape, dtype=self.dtype) + + row_nnz = (self.indptr[indices + 1] - self.indptr[indices]).astype(idx_dtype) + + res_indptr = np.zeros(M+1, dtype=idx_dtype) + np.cumsum(row_nnz, out=res_indptr[1:]) + + nnz = res_indptr[-1] + res_indices = np.empty(nnz, dtype=idx_dtype) + res_data = np.empty(nnz, dtype=self.dtype) + csr_row_index( + M, + indices, + self.indptr.astype(idx_dtype, copy=False), + self.indices.astype(idx_dtype, copy=False), + self.data, + res_indices, + res_data + ) + + return self.__class__((res_data, res_indices, res_indptr), + shape=new_shape, copy=False) + + def _major_slice(self, idx, copy=False): + """Index along the major axis where idx is a slice object. + """ + if idx == slice(None): + return self.copy() if copy else self + + M, N = self._swap(self.shape) + start, stop, step = idx.indices(M) + M = len(range(start, stop, step)) + new_shape = self._swap((M, N)) + if M == 0: + return self.__class__(new_shape, dtype=self.dtype) + + # Work out what slices are needed for `row_nnz` + # start,stop can be -1, only if step is negative + start0, stop0 = start, stop + if stop == -1 and start >= 0: + stop0 = None + start1, stop1 = start + 1, stop + 1 + + row_nnz = self.indptr[start1:stop1:step] - \ + self.indptr[start0:stop0:step] + idx_dtype = self.indices.dtype + res_indptr = np.zeros(M+1, dtype=idx_dtype) + np.cumsum(row_nnz, out=res_indptr[1:]) + + if step == 1: + all_idx = slice(self.indptr[start], self.indptr[stop]) + res_indices = np.array(self.indices[all_idx], copy=copy) + res_data = np.array(self.data[all_idx], copy=copy) + else: + nnz = res_indptr[-1] + res_indices = np.empty(nnz, dtype=idx_dtype) + res_data = np.empty(nnz, dtype=self.dtype) + csr_row_slice(start, stop, step, self.indptr, self.indices, + self.data, res_indices, res_data) + + return self.__class__((res_data, res_indices, res_indptr), + shape=new_shape, copy=False) + + def _minor_index_fancy(self, idx): + """Index along the minor axis where idx is an array of ints. + """ + idx_dtype = self._get_index_dtype((self.indices, self.indptr)) + indices = self.indices.astype(idx_dtype, copy=False) + indptr = self.indptr.astype(idx_dtype, copy=False) + + idx = np.asarray(idx, dtype=idx_dtype).ravel() + + M, N = self._swap(self.shape) + k = len(idx) + new_shape = self._swap((M, k)) + if k == 0: + return self.__class__(new_shape, dtype=self.dtype) + + # pass 1: count idx entries and compute new indptr + col_offsets = np.zeros(N, dtype=idx_dtype) + res_indptr = np.empty_like(self.indptr, dtype=idx_dtype) + csr_column_index1( + k, + idx, + M, + N, + indptr, + indices, + col_offsets, + res_indptr, + ) + + # pass 2: copy indices/data for selected idxs + col_order = np.argsort(idx).astype(idx_dtype, copy=False) + nnz = res_indptr[-1] + res_indices = np.empty(nnz, dtype=idx_dtype) + res_data = np.empty(nnz, dtype=self.dtype) + csr_column_index2(col_order, col_offsets, len(self.indices), + indices, self.data, res_indices, res_data) + return self.__class__((res_data, res_indices, res_indptr), + shape=new_shape, copy=False) + + def _minor_slice(self, idx, copy=False): + """Index along the minor axis where idx is a slice object. + """ + if idx == slice(None): + return self.copy() if copy else self + + M, N = self._swap(self.shape) + start, stop, step = idx.indices(N) + N = len(range(start, stop, step)) + if N == 0: + return self.__class__(self._swap((M, N)), dtype=self.dtype) + if step == 1: + return self._get_submatrix(minor=idx, copy=copy) + # TODO: don't fall back to fancy indexing here + return self._minor_index_fancy(np.arange(start, stop, step)) + + def _get_submatrix(self, major=None, minor=None, copy=False): + """Return a submatrix of this matrix. + + major, minor: None, int, or slice with step 1 + """ + M, N = self._swap(self.shape) + i0, i1 = _process_slice(major, M) + j0, j1 = _process_slice(minor, N) + + if i0 == 0 and j0 == 0 and i1 == M and j1 == N: + return self.copy() if copy else self + + indptr, indices, data = get_csr_submatrix( + M, N, self.indptr, self.indices, self.data, i0, i1, j0, j1) + + shape = self._swap((i1 - i0, j1 - j0)) + return self.__class__((data, indices, indptr), shape=shape, + dtype=self.dtype, copy=False) + + def _set_intXint(self, row, col, x): + i, j = self._swap((row, col)) + self._set_many(i, j, x) + + def _set_arrayXarray(self, row, col, x): + i, j = self._swap((row, col)) + self._set_many(i, j, x) + + def _set_arrayXarray_sparse(self, row, col, x): + # clear entries that will be overwritten + self._zero_many(*self._swap((row, col))) + + M, N = row.shape # matches col.shape + broadcast_row = M != 1 and x.shape[0] == 1 + broadcast_col = N != 1 and x.shape[1] == 1 + r, c = x.row, x.col + + x = np.asarray(x.data, dtype=self.dtype) + if x.size == 0: + return + + if broadcast_row: + r = np.repeat(np.arange(M), len(r)) + c = np.tile(c, M) + x = np.tile(x, M) + if broadcast_col: + r = np.repeat(r, N) + c = np.tile(np.arange(N), len(c)) + x = np.repeat(x, N) + # only assign entries in the new sparsity structure + i, j = self._swap((row[r, c], col[r, c])) + self._set_many(i, j, x) + + def _setdiag(self, values, k): + if 0 in self.shape: + return + + M, N = self.shape + broadcast = (values.ndim == 0) + + if k < 0: + if broadcast: + max_index = min(M + k, N) + else: + max_index = min(M + k, N, len(values)) + i = np.arange(-k, max_index - k, dtype=self.indices.dtype) + j = np.arange(max_index, dtype=self.indices.dtype) + + else: + if broadcast: + max_index = min(M, N - k) + else: + max_index = min(M, N - k, len(values)) + i = np.arange(max_index, dtype=self.indices.dtype) + j = np.arange(k, k + max_index, dtype=self.indices.dtype) + + if not broadcast: + values = values[:len(i)] + + x = np.atleast_1d(np.asarray(values, dtype=self.dtype)).ravel() + if x.squeeze().shape != i.squeeze().shape: + x = np.broadcast_to(x, i.shape) + if x.size == 0: + return + + M, N = self._swap((M, N)) + i, j = self._swap((i, j)) + n_samples = x.size + offsets = np.empty(n_samples, dtype=self.indices.dtype) + ret = csr_sample_offsets(M, N, self.indptr, self.indices, n_samples, + i, j, offsets) + if ret == 1: + # rinse and repeat + self.sum_duplicates() + csr_sample_offsets(M, N, self.indptr, self.indices, n_samples, + i, j, offsets) + if -1 not in offsets: + # only affects existing non-zero cells + self.data[offsets] = x + return + + mask = (offsets <= -1) + # Boundary between csc and convert to coo + # The value 0.001 is justified in gh-19962#issuecomment-1920499678 + if mask.sum() < self.nnz * 0.001: + # create new entries + i = i[mask] + j = j[mask] + self._insert_many(i, j, x[mask]) + # replace existing entries + mask = ~mask + self.data[offsets[mask]] = x[mask] + else: + # convert to coo for _set_diag + coo = self.tocoo() + coo._setdiag(values, k) + arrays = coo._coo_to_compressed(self._swap) + self.indptr, self.indices, self.data, _ = arrays + + def _prepare_indices(self, i, j): + M, N = self._swap(self.shape) + + def check_bounds(indices, bound): + idx = indices.max() + if idx >= bound: + raise IndexError('index (%d) out of range (>= %d)' % + (idx, bound)) + idx = indices.min() + if idx < -bound: + raise IndexError('index (%d) out of range (< -%d)' % + (idx, bound)) + + i = np.atleast_1d(np.asarray(i, dtype=self.indices.dtype)).ravel() + j = np.atleast_1d(np.asarray(j, dtype=self.indices.dtype)).ravel() + check_bounds(i, M) + check_bounds(j, N) + return i, j, M, N + + def _set_many(self, i, j, x): + """Sets value at each (i, j) to x + + Here (i,j) index major and minor respectively, and must not contain + duplicate entries. + """ + i, j, M, N = self._prepare_indices(i, j) + x = np.atleast_1d(np.asarray(x, dtype=self.dtype)).ravel() + + n_samples = x.size + offsets = np.empty(n_samples, dtype=self.indices.dtype) + ret = csr_sample_offsets(M, N, self.indptr, self.indices, n_samples, + i, j, offsets) + if ret == 1: + # rinse and repeat + self.sum_duplicates() + csr_sample_offsets(M, N, self.indptr, self.indices, n_samples, + i, j, offsets) + + if -1 not in offsets: + # only affects existing non-zero cells + self.data[offsets] = x + return + + else: + warn("Changing the sparsity structure of a {}_matrix is expensive." + " lil_matrix is more efficient.".format(self.format), + SparseEfficiencyWarning, stacklevel=3) + # replace where possible + mask = offsets > -1 + self.data[offsets[mask]] = x[mask] + # only insertions remain + mask = ~mask + i = i[mask] + i[i < 0] += M + j = j[mask] + j[j < 0] += N + self._insert_many(i, j, x[mask]) + + def _zero_many(self, i, j): + """Sets value at each (i, j) to zero, preserving sparsity structure. + + Here (i,j) index major and minor respectively. + """ + i, j, M, N = self._prepare_indices(i, j) + + n_samples = len(i) + offsets = np.empty(n_samples, dtype=self.indices.dtype) + ret = csr_sample_offsets(M, N, self.indptr, self.indices, n_samples, + i, j, offsets) + if ret == 1: + # rinse and repeat + self.sum_duplicates() + csr_sample_offsets(M, N, self.indptr, self.indices, n_samples, + i, j, offsets) + + # only assign zeros to the existing sparsity structure + self.data[offsets[offsets > -1]] = 0 + + def _insert_many(self, i, j, x): + """Inserts new nonzero at each (i, j) with value x + + Here (i,j) index major and minor respectively. + i, j and x must be non-empty, 1d arrays. + Inserts each major group (e.g. all entries per row) at a time. + Maintains has_sorted_indices property. + Modifies i, j, x in place. + """ + order = np.argsort(i, kind='mergesort') # stable for duplicates + i = i.take(order, mode='clip') + j = j.take(order, mode='clip') + x = x.take(order, mode='clip') + + do_sort = self.has_sorted_indices + + # Update index data type + idx_dtype = self._get_index_dtype((self.indices, self.indptr), + maxval=(self.indptr[-1] + x.size)) + self.indptr = np.asarray(self.indptr, dtype=idx_dtype) + self.indices = np.asarray(self.indices, dtype=idx_dtype) + i = np.asarray(i, dtype=idx_dtype) + j = np.asarray(j, dtype=idx_dtype) + + # Collate old and new in chunks by major index + indices_parts = [] + data_parts = [] + ui, ui_indptr = np.unique(i, return_index=True) + ui_indptr = np.append(ui_indptr, len(j)) + new_nnzs = np.diff(ui_indptr) + prev = 0 + for c, (ii, js, je) in enumerate(zip(ui, ui_indptr, ui_indptr[1:])): + # old entries + start = self.indptr[prev] + stop = self.indptr[ii] + indices_parts.append(self.indices[start:stop]) + data_parts.append(self.data[start:stop]) + + # handle duplicate j: keep last setting + uj, uj_indptr = np.unique(j[js:je][::-1], return_index=True) + if len(uj) == je - js: + indices_parts.append(j[js:je]) + data_parts.append(x[js:je]) + else: + indices_parts.append(j[js:je][::-1][uj_indptr]) + data_parts.append(x[js:je][::-1][uj_indptr]) + new_nnzs[c] = len(uj) + + prev = ii + + # remaining old entries + start = self.indptr[ii] + indices_parts.append(self.indices[start:]) + data_parts.append(self.data[start:]) + + # update attributes + self.indices = np.concatenate(indices_parts) + self.data = np.concatenate(data_parts) + nnzs = np.empty(self.indptr.shape, dtype=idx_dtype) + nnzs[0] = idx_dtype(0) + indptr_diff = np.diff(self.indptr) + indptr_diff[ui] += new_nnzs + nnzs[1:] = indptr_diff + self.indptr = np.cumsum(nnzs, out=nnzs) + + if do_sort: + # TODO: only sort where necessary + self.has_sorted_indices = False + self.sort_indices() + + self.check_format(full_check=False) + + ###################### + # Conversion methods # + ###################### + + def tocoo(self, copy=True): + major_dim, minor_dim = self._swap(self.shape) + minor_indices = self.indices + major_indices = np.empty(len(minor_indices), dtype=self.indices.dtype) + _sparsetools.expandptr(major_dim, self.indptr, major_indices) + coords = self._swap((major_indices, minor_indices)) + + return self._coo_container( + (self.data, coords), self.shape, copy=copy, dtype=self.dtype + ) + + tocoo.__doc__ = _spbase.tocoo.__doc__ + + def toarray(self, order=None, out=None): + if out is None and order is None: + order = self._swap('cf')[0] + out = self._process_toarray_args(order, out) + if not (out.flags.c_contiguous or out.flags.f_contiguous): + raise ValueError('Output array must be C or F contiguous') + # align ideal order with output array order + if out.flags.c_contiguous: + x = self.tocsr() + y = out + else: + x = self.tocsc() + y = out.T + M, N = x._swap(x.shape) + csr_todense(M, N, x.indptr, x.indices, x.data, y) + return out + + toarray.__doc__ = _spbase.toarray.__doc__ + + ############################################################## + # methods that examine or modify the internal data structure # + ############################################################## + + def eliminate_zeros(self): + """Remove zero entries from the array/matrix + + This is an *in place* operation. + """ + M, N = self._swap(self.shape) + _sparsetools.csr_eliminate_zeros(M, N, self.indptr, self.indices, + self.data) + self.prune() # nnz may have changed + + @property + def has_canonical_format(self) -> bool: + """Whether the array/matrix has sorted indices and no duplicates + + Returns + - True: if the above applies + - False: otherwise + + has_canonical_format implies has_sorted_indices, so if the latter flag + is False, so will the former be; if the former is found True, the + latter flag is also set. + """ + # first check to see if result was cached + if not getattr(self, '_has_sorted_indices', True): + # not sorted => not canonical + self._has_canonical_format = False + elif not hasattr(self, '_has_canonical_format'): + self.has_canonical_format = bool( + _sparsetools.csr_has_canonical_format( + len(self.indptr) - 1, self.indptr, self.indices) + ) + return self._has_canonical_format + + @has_canonical_format.setter + def has_canonical_format(self, val: bool): + self._has_canonical_format = bool(val) + if val: + self.has_sorted_indices = True + + def sum_duplicates(self): + """Eliminate duplicate entries by adding them together + + This is an *in place* operation. + """ + if self.has_canonical_format: + return + self.sort_indices() + + M, N = self._swap(self.shape) + _sparsetools.csr_sum_duplicates(M, N, self.indptr, self.indices, + self.data) + + self.prune() # nnz may have changed + self.has_canonical_format = True + + @property + def has_sorted_indices(self) -> bool: + """Whether the indices are sorted + + Returns + - True: if the indices of the array/matrix are in sorted order + - False: otherwise + """ + # first check to see if result was cached + if not hasattr(self, '_has_sorted_indices'): + self._has_sorted_indices = bool( + _sparsetools.csr_has_sorted_indices( + len(self.indptr) - 1, self.indptr, self.indices) + ) + return self._has_sorted_indices + + @has_sorted_indices.setter + def has_sorted_indices(self, val: bool): + self._has_sorted_indices = bool(val) + + + def sorted_indices(self): + """Return a copy of this array/matrix with sorted indices + """ + A = self.copy() + A.sort_indices() + return A + + # an alternative that has linear complexity is the following + # although the previous option is typically faster + # return self.toother().toother() + + def sort_indices(self): + """Sort the indices of this array/matrix *in place* + """ + + if not self.has_sorted_indices: + _sparsetools.csr_sort_indices(len(self.indptr) - 1, self.indptr, + self.indices, self.data) + self.has_sorted_indices = True + + def prune(self): + """Remove empty space after all non-zero elements. + """ + major_dim = self._swap(self.shape)[0] + + if len(self.indptr) != major_dim + 1: + raise ValueError('index pointer has invalid length') + if len(self.indices) < self.nnz: + raise ValueError('indices array has fewer than nnz elements') + if len(self.data) < self.nnz: + raise ValueError('data array has fewer than nnz elements') + + self.indices = _prune_array(self.indices[:self.nnz]) + self.data = _prune_array(self.data[:self.nnz]) + + def resize(self, *shape): + shape = check_shape(shape) + if hasattr(self, 'blocksize'): + bm, bn = self.blocksize + new_M, rm = divmod(shape[0], bm) + new_N, rn = divmod(shape[1], bn) + if rm or rn: + raise ValueError("shape must be divisible into {} blocks. " + "Got {}".format(self.blocksize, shape)) + M, N = self.shape[0] // bm, self.shape[1] // bn + else: + new_M, new_N = self._swap(shape) + M, N = self._swap(self.shape) + + if new_M < M: + self.indices = self.indices[:self.indptr[new_M]] + self.data = self.data[:self.indptr[new_M]] + self.indptr = self.indptr[:new_M + 1] + elif new_M > M: + self.indptr = np.resize(self.indptr, new_M + 1) + self.indptr[M + 1:].fill(self.indptr[M]) + + if new_N < N: + mask = self.indices < new_N + if not np.all(mask): + self.indices = self.indices[mask] + self.data = self.data[mask] + major_index, val = self._minor_reduce(np.add, mask) + self.indptr.fill(0) + self.indptr[1:][major_index] = val + np.cumsum(self.indptr, out=self.indptr) + + self._shape = shape + + resize.__doc__ = _spbase.resize.__doc__ + + ################### + # utility methods # + ################### + + # needed by _data_matrix + def _with_data(self, data, copy=True): + """Returns a matrix with the same sparsity structure as self, + but with different data. By default the structure arrays + (i.e. .indptr and .indices) are copied. + """ + if copy: + return self.__class__((data, self.indices.copy(), + self.indptr.copy()), + shape=self.shape, + dtype=data.dtype) + else: + return self.__class__((data, self.indices, self.indptr), + shape=self.shape, dtype=data.dtype) + + def _binopt(self, other, op): + """apply the binary operation fn to two sparse matrices.""" + other = self.__class__(other) + + # e.g. csr_plus_csr, csr_minus_csr, etc. + fn = getattr(_sparsetools, self.format + op + self.format) + + maxnnz = self.nnz + other.nnz + idx_dtype = self._get_index_dtype((self.indptr, self.indices, + other.indptr, other.indices), + maxval=maxnnz) + indptr = np.empty(self.indptr.shape, dtype=idx_dtype) + indices = np.empty(maxnnz, dtype=idx_dtype) + + bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_'] + if op in bool_ops: + data = np.empty(maxnnz, dtype=np.bool_) + else: + data = np.empty(maxnnz, dtype=upcast(self.dtype, other.dtype)) + + fn(self.shape[0], self.shape[1], + np.asarray(self.indptr, dtype=idx_dtype), + np.asarray(self.indices, dtype=idx_dtype), + self.data, + np.asarray(other.indptr, dtype=idx_dtype), + np.asarray(other.indices, dtype=idx_dtype), + other.data, + indptr, indices, data) + + A = self.__class__((data, indices, indptr), shape=self.shape) + A.prune() + + return A + + def _divide_sparse(self, other): + """ + Divide this matrix by a second sparse matrix. + """ + if other.shape != self.shape: + raise ValueError('inconsistent shapes') + + r = self._binopt(other, '_eldiv_') + + if np.issubdtype(r.dtype, np.inexact): + # Eldiv leaves entries outside the combined sparsity + # pattern empty, so they must be filled manually. + # Everything outside of other's sparsity is NaN, and everything + # inside it is either zero or defined by eldiv. + out = np.empty(self.shape, dtype=self.dtype) + out.fill(np.nan) + row, col = other.nonzero() + out[row, col] = 0 + r = r.tocoo() + out[r.row, r.col] = r.data + out = self._container(out) + else: + # integers types go with nan <-> 0 + out = r + + return out + + +def _process_slice(sl, num): + if sl is None: + i0, i1 = 0, num + elif isinstance(sl, slice): + i0, i1, stride = sl.indices(num) + if stride != 1: + raise ValueError('slicing with step != 1 not supported') + i0 = min(i0, i1) # give an empty slice when i0 > i1 + elif isintlike(sl): + if sl < 0: + sl += num + i0, i1 = sl, sl + 1 + if i0 < 0 or i1 > num: + raise IndexError('index out of bounds: 0 <= %d < %d <= %d' % + (i0, i1, num)) + else: + raise TypeError('expected slice or scalar') + + return i0, i1 diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_construct.py b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_construct.py new file mode 100644 index 0000000000000000000000000000000000000000..6f5d3dd514d2684bfc6a53ae485c183e299f780f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_construct.py @@ -0,0 +1,1401 @@ +"""Functions to construct sparse matrices and arrays +""" + +__docformat__ = "restructuredtext en" + +__all__ = ['spdiags', 'eye', 'identity', 'kron', 'kronsum', + 'hstack', 'vstack', 'bmat', 'rand', 'random', 'diags', 'block_diag', + 'diags_array', 'block_array', 'eye_array', 'random_array'] + +import numbers +import math +import numpy as np + +from scipy._lib._util import check_random_state, rng_integers +from ._sputils import upcast, get_index_dtype, isscalarlike + +from ._sparsetools import csr_hstack +from ._bsr import bsr_matrix, bsr_array +from ._coo import coo_matrix, coo_array +from ._csc import csc_matrix, csc_array +from ._csr import csr_matrix, csr_array +from ._dia import dia_matrix, dia_array + +from ._base import issparse, sparray + + +def spdiags(data, diags, m=None, n=None, format=None): + """ + Return a sparse matrix from diagonals. + + Parameters + ---------- + data : array_like + Matrix diagonals stored row-wise + diags : sequence of int or an int + Diagonals to set: + + * k = 0 the main diagonal + * k > 0 the kth upper diagonal + * k < 0 the kth lower diagonal + m, n : int, tuple, optional + Shape of the result. If `n` is None and `m` is a given tuple, + the shape is this tuple. If omitted, the matrix is square and + its shape is len(data[0]). + format : str, optional + Format of the result. By default (format=None) an appropriate sparse + matrix format is returned. This choice is subject to change. + + .. warning:: + + This function returns a sparse matrix -- not a sparse array. + You are encouraged to use ``diags_array`` to take advantage + of the sparse array functionality. + + See Also + -------- + diags_array : more convenient form of this function + diags : matrix version of diags_array + dia_matrix : the sparse DIAgonal format. + + Examples + -------- + >>> import numpy as np + >>> from scipy.sparse import spdiags + >>> data = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) + >>> diags = np.array([0, -1, 2]) + >>> spdiags(data, diags, 4, 4).toarray() + array([[1, 0, 3, 0], + [1, 2, 0, 4], + [0, 2, 3, 0], + [0, 0, 3, 4]]) + + """ + if m is None and n is None: + m = n = len(data[0]) + elif n is None: + m, n = m + return dia_matrix((data, diags), shape=(m, n)).asformat(format) + + +def diags_array(diagonals, /, *, offsets=0, shape=None, format=None, dtype=None): + """ + Construct a sparse array from diagonals. + + Parameters + ---------- + diagonals : sequence of array_like + Sequence of arrays containing the array diagonals, + corresponding to `offsets`. + offsets : sequence of int or an int, optional + Diagonals to set: + - k = 0 the main diagonal (default) + - k > 0 the kth upper diagonal + - k < 0 the kth lower diagonal + shape : tuple of int, optional + Shape of the result. If omitted, a square array large enough + to contain the diagonals is returned. + format : {"dia", "csr", "csc", "lil", ...}, optional + Matrix format of the result. By default (format=None) an + appropriate sparse array format is returned. This choice is + subject to change. + dtype : dtype, optional + Data type of the array. + + Notes + ----- + The result from `diags_array` is the sparse equivalent of:: + + np.diag(diagonals[0], offsets[0]) + + ... + + np.diag(diagonals[k], offsets[k]) + + Repeated diagonal offsets are disallowed. + + .. versionadded:: 1.11 + + Examples + -------- + >>> from scipy.sparse import diags_array + >>> diagonals = [[1, 2, 3, 4], [1, 2, 3], [1, 2]] + >>> diags_array(diagonals, offsets=[0, -1, 2]).toarray() + array([[1, 0, 1, 0], + [1, 2, 0, 2], + [0, 2, 3, 0], + [0, 0, 3, 4]]) + + Broadcasting of scalars is supported (but shape needs to be + specified): + + >>> diags_array([1, -2, 1], offsets=[-1, 0, 1], shape=(4, 4)).toarray() + array([[-2., 1., 0., 0.], + [ 1., -2., 1., 0.], + [ 0., 1., -2., 1.], + [ 0., 0., 1., -2.]]) + + + If only one diagonal is wanted (as in `numpy.diag`), the following + works as well: + + >>> diags_array([1, 2, 3], offsets=1).toarray() + array([[ 0., 1., 0., 0.], + [ 0., 0., 2., 0.], + [ 0., 0., 0., 3.], + [ 0., 0., 0., 0.]]) + """ + # if offsets is not a sequence, assume that there's only one diagonal + if isscalarlike(offsets): + # now check that there's actually only one diagonal + if len(diagonals) == 0 or isscalarlike(diagonals[0]): + diagonals = [np.atleast_1d(diagonals)] + else: + raise ValueError("Different number of diagonals and offsets.") + else: + diagonals = list(map(np.atleast_1d, diagonals)) + + offsets = np.atleast_1d(offsets) + + # Basic check + if len(diagonals) != len(offsets): + raise ValueError("Different number of diagonals and offsets.") + + # Determine shape, if omitted + if shape is None: + m = len(diagonals[0]) + abs(int(offsets[0])) + shape = (m, m) + + # Determine data type, if omitted + if dtype is None: + dtype = np.common_type(*diagonals) + + # Construct data array + m, n = shape + + M = max([min(m + offset, n - offset) + max(0, offset) + for offset in offsets]) + M = max(0, M) + data_arr = np.zeros((len(offsets), M), dtype=dtype) + + K = min(m, n) + + for j, diagonal in enumerate(diagonals): + offset = offsets[j] + k = max(0, offset) + length = min(m + offset, n - offset, K) + if length < 0: + raise ValueError("Offset %d (index %d) out of bounds" % (offset, j)) + try: + data_arr[j, k:k+length] = diagonal[...,:length] + except ValueError as e: + if len(diagonal) != length and len(diagonal) != 1: + raise ValueError( + "Diagonal length (index %d: %d at offset %d) does not " + "agree with array size (%d, %d)." % ( + j, len(diagonal), offset, m, n)) from e + raise + + return dia_array((data_arr, offsets), shape=(m, n)).asformat(format) + + +def diags(diagonals, offsets=0, shape=None, format=None, dtype=None): + """ + Construct a sparse matrix from diagonals. + + .. warning:: + + This function returns a sparse matrix -- not a sparse array. + You are encouraged to use ``diags_array`` to take advantage + of the sparse array functionality. + + Parameters + ---------- + diagonals : sequence of array_like + Sequence of arrays containing the matrix diagonals, + corresponding to `offsets`. + offsets : sequence of int or an int, optional + Diagonals to set: + - k = 0 the main diagonal (default) + - k > 0 the kth upper diagonal + - k < 0 the kth lower diagonal + shape : tuple of int, optional + Shape of the result. If omitted, a square matrix large enough + to contain the diagonals is returned. + format : {"dia", "csr", "csc", "lil", ...}, optional + Matrix format of the result. By default (format=None) an + appropriate sparse matrix format is returned. This choice is + subject to change. + dtype : dtype, optional + Data type of the matrix. + + See Also + -------- + spdiags : construct matrix from diagonals + diags_array : construct sparse array instead of sparse matrix + + Notes + ----- + This function differs from `spdiags` in the way it handles + off-diagonals. + + The result from `diags` is the sparse equivalent of:: + + np.diag(diagonals[0], offsets[0]) + + ... + + np.diag(diagonals[k], offsets[k]) + + Repeated diagonal offsets are disallowed. + + .. versionadded:: 0.11 + + Examples + -------- + >>> from scipy.sparse import diags + >>> diagonals = [[1, 2, 3, 4], [1, 2, 3], [1, 2]] + >>> diags(diagonals, [0, -1, 2]).toarray() + array([[1, 0, 1, 0], + [1, 2, 0, 2], + [0, 2, 3, 0], + [0, 0, 3, 4]]) + + Broadcasting of scalars is supported (but shape needs to be + specified): + + >>> diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)).toarray() + array([[-2., 1., 0., 0.], + [ 1., -2., 1., 0.], + [ 0., 1., -2., 1.], + [ 0., 0., 1., -2.]]) + + + If only one diagonal is wanted (as in `numpy.diag`), the following + works as well: + + >>> diags([1, 2, 3], 1).toarray() + array([[ 0., 1., 0., 0.], + [ 0., 0., 2., 0.], + [ 0., 0., 0., 3.], + [ 0., 0., 0., 0.]]) + """ + A = diags_array(diagonals, offsets=offsets, shape=shape, dtype=dtype) + return dia_matrix(A).asformat(format) + + +def identity(n, dtype='d', format=None): + """Identity matrix in sparse format + + Returns an identity matrix with shape (n,n) using a given + sparse format and dtype. This differs from `eye_array` in + that it has a square shape with ones only on the main diagonal. + It is thus the multiplicative identity. `eye_array` allows + rectangular shapes and the diagonal can be offset from the main one. + + .. warning:: + + This function returns a sparse matrix -- not a sparse array. + You are encouraged to use ``eye_array`` to take advantage + of the sparse array functionality. + + Parameters + ---------- + n : int + Shape of the identity matrix. + dtype : dtype, optional + Data type of the matrix + format : str, optional + Sparse format of the result, e.g., format="csr", etc. + + Examples + -------- + >>> import scipy as sp + >>> sp.sparse.identity(3).toarray() + array([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + >>> sp.sparse.identity(3, dtype='int8', format='dia') + <3x3 sparse matrix of type '' + with 3 stored elements (1 diagonals) in DIAgonal format> + >>> sp.sparse.eye_array(3, dtype='int8', format='dia') + <3x3 sparse array of type '' + with 3 stored elements (1 diagonals) in DIAgonal format> + + """ + return eye(n, n, dtype=dtype, format=format) + + +def eye_array(m, n=None, *, k=0, dtype=float, format=None): + """Identity matrix in sparse array format + + Return a sparse array with ones on diagonal. + Specifically a sparse array (m x n) where the kth diagonal + is all ones and everything else is zeros. + + Parameters + ---------- + m : int or tuple of ints + Number of rows requested. + n : int, optional + Number of columns. Default: `m`. + k : int, optional + Diagonal to place ones on. Default: 0 (main diagonal). + dtype : dtype, optional + Data type of the array + format : str, optional (default: "dia") + Sparse format of the result, e.g., format="csr", etc. + + Examples + -------- + >>> import numpy as np + >>> import scipy as sp + >>> sp.sparse.eye_array(3).toarray() + array([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + >>> sp.sparse.eye_array(3, dtype=np.int8) + <3x3 sparse array of type '' + with 3 stored elements (1 diagonals) in DIAgonal format> + + """ + # TODO: delete next 15 lines [combine with _eye()] once spmatrix removed + return _eye(m, n, k, dtype, format) + + +def _eye(m, n, k, dtype, format, as_sparray=True): + if as_sparray: + csr_sparse = csr_array + csc_sparse = csc_array + coo_sparse = coo_array + diags_sparse = diags_array + else: + csr_sparse = csr_matrix + csc_sparse = csc_matrix + coo_sparse = coo_matrix + diags_sparse = diags + + if n is None: + n = m + m, n = int(m), int(n) + + if m == n and k == 0: + # fast branch for special formats + if format in ['csr', 'csc']: + idx_dtype = get_index_dtype(maxval=n) + indptr = np.arange(n+1, dtype=idx_dtype) + indices = np.arange(n, dtype=idx_dtype) + data = np.ones(n, dtype=dtype) + cls = {'csr': csr_sparse, 'csc': csc_sparse}[format] + return cls((data, indices, indptr), (n, n)) + + elif format == 'coo': + idx_dtype = get_index_dtype(maxval=n) + row = np.arange(n, dtype=idx_dtype) + col = np.arange(n, dtype=idx_dtype) + data = np.ones(n, dtype=dtype) + return coo_sparse((data, (row, col)), (n, n)) + + data = np.ones((1, max(0, min(m + k, n))), dtype=dtype) + return diags_sparse(data, offsets=[k], shape=(m, n), dtype=dtype).asformat(format) + + +def eye(m, n=None, k=0, dtype=float, format=None): + """Sparse matrix with ones on diagonal + + Returns a sparse matrix (m x n) where the kth diagonal + is all ones and everything else is zeros. + + Parameters + ---------- + m : int + Number of rows in the matrix. + n : int, optional + Number of columns. Default: `m`. + k : int, optional + Diagonal to place ones on. Default: 0 (main diagonal). + dtype : dtype, optional + Data type of the matrix. + format : str, optional + Sparse format of the result, e.g., format="csr", etc. + + .. warning:: + + This function returns a sparse matrix -- not a sparse array. + You are encouraged to use ``eye_array`` to take advantage + of the sparse array functionality. + + Examples + -------- + >>> import numpy as np + >>> import scipy as sp + >>> sp.sparse.eye(3).toarray() + array([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + >>> sp.sparse.eye(3, dtype=np.int8) + <3x3 sparse matrix of type '' + with 3 stored elements (1 diagonals) in DIAgonal format> + + """ + return _eye(m, n, k, dtype, format, False) + + +def kron(A, B, format=None): + """kronecker product of sparse matrices A and B + + Parameters + ---------- + A : sparse or dense matrix + first matrix of the product + B : sparse or dense matrix + second matrix of the product + format : str, optional (default: 'bsr' or 'coo') + format of the result (e.g. "csr") + If None, choose 'bsr' for relatively dense array and 'coo' for others + + Returns + ------- + kronecker product in a sparse format. + Returns a sparse matrix unless either A or B is a + sparse array in which case returns a sparse array. + + Examples + -------- + >>> import numpy as np + >>> import scipy as sp + >>> A = sp.sparse.csr_array(np.array([[0, 2], [5, 0]])) + >>> B = sp.sparse.csr_array(np.array([[1, 2], [3, 4]])) + >>> sp.sparse.kron(A, B).toarray() + array([[ 0, 0, 2, 4], + [ 0, 0, 6, 8], + [ 5, 10, 0, 0], + [15, 20, 0, 0]]) + + >>> sp.sparse.kron(A, [[1, 2], [3, 4]]).toarray() + array([[ 0, 0, 2, 4], + [ 0, 0, 6, 8], + [ 5, 10, 0, 0], + [15, 20, 0, 0]]) + + """ + # TODO: delete next 10 lines and replace _sparse with _array when spmatrix removed + if isinstance(A, sparray) or isinstance(B, sparray): + # convert to local variables + bsr_sparse = bsr_array + csr_sparse = csr_array + coo_sparse = coo_array + else: # use spmatrix + bsr_sparse = bsr_matrix + csr_sparse = csr_matrix + coo_sparse = coo_matrix + + B = coo_sparse(B) + + # B is fairly dense, use BSR + if (format is None or format == "bsr") and 2*B.nnz >= B.shape[0] * B.shape[1]: + A = csr_sparse(A,copy=True) + output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1]) + + if A.nnz == 0 or B.nnz == 0: + # kronecker product is the zero matrix + return coo_sparse(output_shape).asformat(format) + + B = B.toarray() + data = A.data.repeat(B.size).reshape(-1,B.shape[0],B.shape[1]) + data = data * B + + return bsr_sparse((data,A.indices,A.indptr), shape=output_shape) + else: + # use COO + A = coo_sparse(A) + output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1]) + + if A.nnz == 0 or B.nnz == 0: + # kronecker product is the zero matrix + return coo_sparse(output_shape).asformat(format) + + # expand entries of a into blocks + row = A.row.repeat(B.nnz) + col = A.col.repeat(B.nnz) + data = A.data.repeat(B.nnz) + + if max(A.shape[0]*B.shape[0], A.shape[1]*B.shape[1]) > np.iinfo('int32').max: + row = row.astype(np.int64) + col = col.astype(np.int64) + + row *= B.shape[0] + col *= B.shape[1] + + # increment block indices + row,col = row.reshape(-1,B.nnz),col.reshape(-1,B.nnz) + row += B.row + col += B.col + row,col = row.reshape(-1),col.reshape(-1) + + # compute block entries + data = data.reshape(-1,B.nnz) * B.data + data = data.reshape(-1) + + return coo_sparse((data,(row,col)), shape=output_shape).asformat(format) + + +def kronsum(A, B, format=None): + """kronecker sum of square sparse matrices A and B + + Kronecker sum of two sparse matrices is a sum of two Kronecker + products kron(I_n,A) + kron(B,I_m) where A has shape (m,m) + and B has shape (n,n) and I_m and I_n are identity matrices + of shape (m,m) and (n,n), respectively. + + Parameters + ---------- + A + square matrix + B + square matrix + format : str + format of the result (e.g. "csr") + + Returns + ------- + kronecker sum in a sparse matrix format + + """ + # TODO: delete next 8 lines and replace _sparse with _array when spmatrix removed + if isinstance(A, sparray) or isinstance(B, sparray): + # convert to local variables + coo_sparse = coo_array + identity_sparse = eye_array + else: + coo_sparse = coo_matrix + identity_sparse = identity + + A = coo_sparse(A) + B = coo_sparse(B) + + if A.shape[0] != A.shape[1]: + raise ValueError('A is not square') + + if B.shape[0] != B.shape[1]: + raise ValueError('B is not square') + + dtype = upcast(A.dtype, B.dtype) + + I_n = identity_sparse(A.shape[0], dtype=dtype) + I_m = identity_sparse(B.shape[0], dtype=dtype) + L = kron(I_m, A, format='coo') + R = kron(B, I_n, format='coo') + + return (L + R).asformat(format) + + +def _compressed_sparse_stack(blocks, axis, return_spmatrix): + """ + Stacking fast path for CSR/CSC matrices or arrays + (i) vstack for CSR, (ii) hstack for CSC. + """ + other_axis = 1 if axis == 0 else 0 + data = np.concatenate([b.data for b in blocks]) + constant_dim = blocks[0].shape[other_axis] + idx_dtype = get_index_dtype(arrays=[b.indptr for b in blocks], + maxval=max(data.size, constant_dim)) + indices = np.empty(data.size, dtype=idx_dtype) + indptr = np.empty(sum(b.shape[axis] for b in blocks) + 1, dtype=idx_dtype) + last_indptr = idx_dtype(0) + sum_dim = 0 + sum_indices = 0 + for b in blocks: + if b.shape[other_axis] != constant_dim: + raise ValueError(f'incompatible dimensions for axis {other_axis}') + indices[sum_indices:sum_indices+b.indices.size] = b.indices + sum_indices += b.indices.size + idxs = slice(sum_dim, sum_dim + b.shape[axis]) + indptr[idxs] = b.indptr[:-1] + indptr[idxs] += last_indptr + sum_dim += b.shape[axis] + last_indptr += b.indptr[-1] + indptr[-1] = last_indptr + # TODO remove this if-structure when sparse matrices removed + if return_spmatrix: + if axis == 0: + return csr_matrix((data, indices, indptr), + shape=(sum_dim, constant_dim)) + else: + return csc_matrix((data, indices, indptr), + shape=(constant_dim, sum_dim)) + + if axis == 0: + return csr_array((data, indices, indptr), + shape=(sum_dim, constant_dim)) + else: + return csc_array((data, indices, indptr), + shape=(constant_dim, sum_dim)) + + +def _stack_along_minor_axis(blocks, axis): + """ + Stacking fast path for CSR/CSC matrices along the minor axis + (i) hstack for CSR, (ii) vstack for CSC. + """ + n_blocks = len(blocks) + if n_blocks == 0: + raise ValueError('Missing block matrices') + + if n_blocks == 1: + return blocks[0] + + # check for incompatible dimensions + other_axis = 1 if axis == 0 else 0 + other_axis_dims = {b.shape[other_axis] for b in blocks} + if len(other_axis_dims) > 1: + raise ValueError(f'Mismatching dimensions along axis {other_axis}: ' + f'{other_axis_dims}') + constant_dim, = other_axis_dims + + # Do the stacking + indptr_list = [b.indptr for b in blocks] + data_cat = np.concatenate([b.data for b in blocks]) + + # Need to check if any indices/indptr, would be too large post- + # concatenation for np.int32: + # - The max value of indices is the output array's stacking-axis length - 1 + # - The max value in indptr is the number of non-zero entries. This is + # exceedingly unlikely to require int64, but is checked out of an + # abundance of caution. + sum_dim = sum(b.shape[axis] for b in blocks) + nnz = sum(len(b.indices) for b in blocks) + idx_dtype = get_index_dtype(maxval=max(sum_dim - 1, nnz)) + stack_dim_cat = np.array([b.shape[axis] for b in blocks], dtype=idx_dtype) + if data_cat.size > 0: + indptr_cat = np.concatenate(indptr_list).astype(idx_dtype) + indices_cat = (np.concatenate([b.indices for b in blocks]) + .astype(idx_dtype)) + indptr = np.empty(constant_dim + 1, dtype=idx_dtype) + indices = np.empty_like(indices_cat) + data = np.empty_like(data_cat) + csr_hstack(n_blocks, constant_dim, stack_dim_cat, + indptr_cat, indices_cat, data_cat, + indptr, indices, data) + else: + indptr = np.zeros(constant_dim + 1, dtype=idx_dtype) + indices = np.empty(0, dtype=idx_dtype) + data = np.empty(0, dtype=data_cat.dtype) + + if axis == 0: + return blocks[0]._csc_container((data, indices, indptr), + shape=(sum_dim, constant_dim)) + else: + return blocks[0]._csr_container((data, indices, indptr), + shape=(constant_dim, sum_dim)) + + +def hstack(blocks, format=None, dtype=None): + """ + Stack sparse matrices horizontally (column wise) + + Parameters + ---------- + blocks + sequence of sparse matrices with compatible shapes + format : str + sparse format of the result (e.g., "csr") + by default an appropriate sparse matrix format is returned. + This choice is subject to change. + dtype : dtype, optional + The data-type of the output matrix. If not given, the dtype is + determined from that of `blocks`. + + Returns + ------- + new_array : sparse matrix or array + If any block in blocks is a sparse array, return a sparse array. + Otherwise return a sparse matrix. + + If you want a sparse array built from blocks that are not sparse + arrays, use `block(hstack(blocks))` or convert one block + e.g. `blocks[0] = csr_array(blocks[0])`. + + See Also + -------- + vstack : stack sparse matrices vertically (row wise) + + Examples + -------- + >>> from scipy.sparse import coo_matrix, hstack + >>> A = coo_matrix([[1, 2], [3, 4]]) + >>> B = coo_matrix([[5], [6]]) + >>> hstack([A,B]).toarray() + array([[1, 2, 5], + [3, 4, 6]]) + + """ + blocks = np.asarray(blocks, dtype='object') + if any(isinstance(b, sparray) for b in blocks.flat): + return _block([blocks], format, dtype) + else: + return _block([blocks], format, dtype, return_spmatrix=True) + + +def vstack(blocks, format=None, dtype=None): + """ + Stack sparse arrays vertically (row wise) + + Parameters + ---------- + blocks + sequence of sparse arrays with compatible shapes + format : str, optional + sparse format of the result (e.g., "csr") + by default an appropriate sparse array format is returned. + This choice is subject to change. + dtype : dtype, optional + The data-type of the output array. If not given, the dtype is + determined from that of `blocks`. + + Returns + ------- + new_array : sparse matrix or array + If any block in blocks is a sparse array, return a sparse array. + Otherwise return a sparse matrix. + + If you want a sparse array built from blocks that are not sparse + arrays, use `block(vstack(blocks))` or convert one block + e.g. `blocks[0] = csr_array(blocks[0])`. + + See Also + -------- + hstack : stack sparse matrices horizontally (column wise) + + Examples + -------- + >>> from scipy.sparse import coo_array, vstack + >>> A = coo_array([[1, 2], [3, 4]]) + >>> B = coo_array([[5, 6]]) + >>> vstack([A, B]).toarray() + array([[1, 2], + [3, 4], + [5, 6]]) + + """ + blocks = np.asarray(blocks, dtype='object') + if any(isinstance(b, sparray) for b in blocks.flat): + return _block([[b] for b in blocks], format, dtype) + else: + return _block([[b] for b in blocks], format, dtype, return_spmatrix=True) + + +def bmat(blocks, format=None, dtype=None): + """ + Build a sparse array or matrix from sparse sub-blocks + + Note: `block_array` is preferred over `bmat`. They are the same function + except that `bmat` can return a deprecated sparse matrix. + `bmat` returns a coo_matrix if none of the inputs are a sparse array. + + .. warning:: + + This function returns a sparse matrix -- not a sparse array. + You are encouraged to use ``block_array`` to take advantage + of the sparse array functionality. + + Parameters + ---------- + blocks : array_like + Grid of sparse matrices with compatible shapes. + An entry of None implies an all-zero matrix. + format : {'bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil'}, optional + The sparse format of the result (e.g. "csr"). By default an + appropriate sparse matrix format is returned. + This choice is subject to change. + dtype : dtype, optional + The data-type of the output matrix. If not given, the dtype is + determined from that of `blocks`. + + Returns + ------- + bmat : sparse matrix or array + If any block in blocks is a sparse array, return a sparse array. + Otherwise return a sparse matrix. + + If you want a sparse array built from blocks that are not sparse + arrays, use `block_array()`. + + See Also + -------- + block_array + + Examples + -------- + >>> from scipy.sparse import coo_array, bmat + >>> A = coo_array([[1, 2], [3, 4]]) + >>> B = coo_array([[5], [6]]) + >>> C = coo_array([[7]]) + >>> bmat([[A, B], [None, C]]).toarray() + array([[1, 2, 5], + [3, 4, 6], + [0, 0, 7]]) + + >>> bmat([[A, None], [None, C]]).toarray() + array([[1, 2, 0], + [3, 4, 0], + [0, 0, 7]]) + + """ + blocks = np.asarray(blocks, dtype='object') + if any(isinstance(b, sparray) for b in blocks.flat): + return _block(blocks, format, dtype) + else: + return _block(blocks, format, dtype, return_spmatrix=True) + + +def block_array(blocks, *, format=None, dtype=None): + """ + Build a sparse array from sparse sub-blocks + + Parameters + ---------- + blocks : array_like + Grid of sparse arrays with compatible shapes. + An entry of None implies an all-zero array. + format : {'bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil'}, optional + The sparse format of the result (e.g. "csr"). By default an + appropriate sparse array format is returned. + This choice is subject to change. + dtype : dtype, optional + The data-type of the output array. If not given, the dtype is + determined from that of `blocks`. + + Returns + ------- + block : sparse array + + See Also + -------- + block_diag : specify blocks along the main diagonals + diags : specify (possibly offset) diagonals + + Examples + -------- + >>> from scipy.sparse import coo_array, block_array + >>> A = coo_array([[1, 2], [3, 4]]) + >>> B = coo_array([[5], [6]]) + >>> C = coo_array([[7]]) + >>> block_array([[A, B], [None, C]]).toarray() + array([[1, 2, 5], + [3, 4, 6], + [0, 0, 7]]) + + >>> block_array([[A, None], [None, C]]).toarray() + array([[1, 2, 0], + [3, 4, 0], + [0, 0, 7]]) + + """ + return _block(blocks, format, dtype) + + +def _block(blocks, format, dtype, return_spmatrix=False): + blocks = np.asarray(blocks, dtype='object') + + if blocks.ndim != 2: + raise ValueError('blocks must be 2-D') + + M,N = blocks.shape + + # check for fast path cases + if (format in (None, 'csr') and + all(issparse(b) and b.format == 'csr' for b in blocks.flat) + ): + if N > 1: + # stack along columns (axis 1): must have shape (M, 1) + blocks = [[_stack_along_minor_axis(blocks[b, :], 1)] for b in range(M)] + blocks = np.asarray(blocks, dtype='object') + + # stack along rows (axis 0): + A = _compressed_sparse_stack(blocks[:, 0], 0, return_spmatrix) + if dtype is not None: + A = A.astype(dtype) + return A + elif (format in (None, 'csc') and + all(issparse(b) and b.format == 'csc' for b in blocks.flat) + ): + if M > 1: + # stack along rows (axis 0): must have shape (1, N) + blocks = [[_stack_along_minor_axis(blocks[:, b], 0) for b in range(N)]] + blocks = np.asarray(blocks, dtype='object') + + # stack along columns (axis 1): + A = _compressed_sparse_stack(blocks[0, :], 1, return_spmatrix) + if dtype is not None: + A = A.astype(dtype) + return A + + block_mask = np.zeros(blocks.shape, dtype=bool) + brow_lengths = np.zeros(M, dtype=np.int64) + bcol_lengths = np.zeros(N, dtype=np.int64) + + # convert everything to COO format + for i in range(M): + for j in range(N): + if blocks[i,j] is not None: + A = coo_array(blocks[i,j]) + blocks[i,j] = A + block_mask[i,j] = True + + if brow_lengths[i] == 0: + brow_lengths[i] = A.shape[0] + elif brow_lengths[i] != A.shape[0]: + msg = (f'blocks[{i},:] has incompatible row dimensions. ' + f'Got blocks[{i},{j}].shape[0] == {A.shape[0]}, ' + f'expected {brow_lengths[i]}.') + raise ValueError(msg) + + if bcol_lengths[j] == 0: + bcol_lengths[j] = A.shape[1] + elif bcol_lengths[j] != A.shape[1]: + msg = (f'blocks[:,{j}] has incompatible column ' + f'dimensions. ' + f'Got blocks[{i},{j}].shape[1] == {A.shape[1]}, ' + f'expected {bcol_lengths[j]}.') + raise ValueError(msg) + + nnz = sum(block.nnz for block in blocks[block_mask]) + if dtype is None: + all_dtypes = [blk.dtype for blk in blocks[block_mask]] + dtype = upcast(*all_dtypes) if all_dtypes else None + + row_offsets = np.append(0, np.cumsum(brow_lengths)) + col_offsets = np.append(0, np.cumsum(bcol_lengths)) + + shape = (row_offsets[-1], col_offsets[-1]) + + data = np.empty(nnz, dtype=dtype) + idx_dtype = get_index_dtype(maxval=max(shape)) + row = np.empty(nnz, dtype=idx_dtype) + col = np.empty(nnz, dtype=idx_dtype) + + nnz = 0 + ii, jj = np.nonzero(block_mask) + for i, j in zip(ii, jj): + B = blocks[i, j] + idx = slice(nnz, nnz + B.nnz) + data[idx] = B.data + np.add(B.row, row_offsets[i], out=row[idx], dtype=idx_dtype) + np.add(B.col, col_offsets[j], out=col[idx], dtype=idx_dtype) + nnz += B.nnz + + if return_spmatrix: + return coo_matrix((data, (row, col)), shape=shape).asformat(format) + return coo_array((data, (row, col)), shape=shape).asformat(format) + + +def block_diag(mats, format=None, dtype=None): + """ + Build a block diagonal sparse matrix or array from provided matrices. + + Parameters + ---------- + mats : sequence of matrices or arrays + Input matrices or arrays. + format : str, optional + The sparse format of the result (e.g., "csr"). If not given, the result + is returned in "coo" format. + dtype : dtype specifier, optional + The data-type of the output. If not given, the dtype is + determined from that of `blocks`. + + Returns + ------- + res : sparse matrix or array + If at least one input is a sparse array, the output is a sparse array. + Otherwise the output is a sparse matrix. + + Notes + ----- + + .. versionadded:: 0.11.0 + + See Also + -------- + block_array + diags_array + + Examples + -------- + >>> from scipy.sparse import coo_array, block_diag + >>> A = coo_array([[1, 2], [3, 4]]) + >>> B = coo_array([[5], [6]]) + >>> C = coo_array([[7]]) + >>> block_diag((A, B, C)).toarray() + array([[1, 2, 0, 0], + [3, 4, 0, 0], + [0, 0, 5, 0], + [0, 0, 6, 0], + [0, 0, 0, 7]]) + + """ + if any(isinstance(a, sparray) for a in mats): + container = coo_array + else: + container = coo_matrix + + row = [] + col = [] + data = [] + r_idx = 0 + c_idx = 0 + for a in mats: + if isinstance(a, (list, numbers.Number)): + a = coo_array(np.atleast_2d(a)) + if issparse(a): + a = a.tocoo() + nrows, ncols = a._shape_as_2d + row.append(a.row + r_idx) + col.append(a.col + c_idx) + data.append(a.data) + else: + nrows, ncols = a.shape + a_row, a_col = np.divmod(np.arange(nrows*ncols), ncols) + row.append(a_row + r_idx) + col.append(a_col + c_idx) + data.append(a.ravel()) + r_idx += nrows + c_idx += ncols + row = np.concatenate(row) + col = np.concatenate(col) + data = np.concatenate(data) + return container((data, (row, col)), + shape=(r_idx, c_idx), + dtype=dtype).asformat(format) + + +def random_array(shape, *, density=0.01, format='coo', dtype=None, + random_state=None, data_sampler=None): + """Return a sparse array of uniformly random numbers in [0, 1) + + Returns a sparse array with the given shape and density + where values are generated uniformly randomly in the range [0, 1). + + .. warning:: + + Since numpy 1.17, passing a ``np.random.Generator`` (e.g. + ``np.random.default_rng``) for ``random_state`` will lead to much + faster execution times. + + A much slower implementation is used by default for backwards + compatibility. + + Parameters + ---------- + shape : int or tuple of ints + shape of the array + density : real, optional (default: 0.01) + density of the generated matrix: density equal to one means a full + matrix, density of 0 means a matrix with no non-zero items. + format : str, optional (default: 'coo') + sparse matrix format. + dtype : dtype, optional (default: np.float64) + type of the returned matrix values. + random_state : {None, int, `Generator`, `RandomState`}, optional + A random number generator to determine nonzero structure. We recommend using + a `numpy.random.Generator` manually provided for every call as it is much + faster than RandomState. + + - If `None` (or `np.random`), the `numpy.random.RandomState` + singleton is used. + - If an int, a new ``Generator`` instance is used, + seeded with the int. + - If a ``Generator`` or ``RandomState`` instance then + that instance is used. + + This random state will be used for sampling `indices` (the sparsity + structure), and by default for the data values too (see `data_sampler`). + + data_sampler : callable, optional (default depends on dtype) + Sampler of random data values with keyword arg `size`. + This function should take a single keyword argument `size` specifying + the length of its returned ndarray. It is used to generate the nonzero + values in the matrix after the locations of those values are chosen. + By default, uniform [0, 1) random values are used unless `dtype` is + an integer (default uniform integers from that dtype) or + complex (default uniform over the unit square in the complex plane). + For these, the `random_state` rng is used e.g. `rng.uniform(size=size)`. + + Returns + ------- + res : sparse array + + Examples + -------- + + Passing a ``np.random.Generator`` instance for better performance: + + >>> import numpy as np + >>> import scipy as sp + >>> rng = np.random.default_rng() + + Default sampling uniformly from [0, 1): + + >>> S = sp.sparse.random_array((3, 4), density=0.25, random_state=rng) + + Providing a sampler for the values: + + >>> rvs = sp.stats.poisson(25, loc=10).rvs + >>> S = sp.sparse.random_array((3, 4), density=0.25, + ... random_state=rng, data_sampler=rvs) + >>> S.toarray() + array([[ 36., 0., 33., 0.], # random + [ 0., 0., 0., 0.], + [ 0., 0., 36., 0.]]) + + Building a custom distribution. + This example builds a squared normal from np.random: + + >>> def np_normal_squared(size=None, random_state=rng): + ... return random_state.standard_normal(size) ** 2 + >>> S = sp.sparse.random_array((3, 4), density=0.25, random_state=rng, + ... data_sampler=np_normal_squared) + + Or we can build it from sp.stats style rvs functions: + + >>> def sp_stats_normal_squared(size=None, random_state=rng): + ... std_normal = sp.stats.distributions.norm_gen().rvs + ... return std_normal(size=size, random_state=random_state) ** 2 + >>> S = sp.sparse.random_array((3, 4), density=0.25, random_state=rng, + ... data_sampler=sp_stats_normal_squared) + + Or we can subclass sp.stats rv_continous or rv_discrete: + + >>> class NormalSquared(sp.stats.rv_continuous): + ... def _rvs(self, size=None, random_state=rng): + ... return random_state.standard_normal(size) ** 2 + >>> X = NormalSquared() + >>> Y = X().rvs + >>> S = sp.sparse.random_array((3, 4), density=0.25, + ... random_state=rng, data_sampler=Y) + """ + # Use the more efficient RNG by default. + if random_state is None: + random_state = np.random.default_rng() + data, ind = _random(shape, density, format, dtype, random_state, data_sampler) + return coo_array((data, ind), shape=shape).asformat(format) + + +def _random(shape, density=0.01, format=None, dtype=None, + random_state=None, data_sampler=None): + if density < 0 or density > 1: + raise ValueError("density expected to be 0 <= density <= 1") + + tot_prod = math.prod(shape) # use `math` for when prod is >= 2**64 + + # Number of non zero values + size = int(round(density * tot_prod)) + + rng = check_random_state(random_state) + + if data_sampler is None: + if np.issubdtype(dtype, np.integer): + def data_sampler(size): + return rng_integers(rng, + np.iinfo(dtype).min, + np.iinfo(dtype).max, + size, + dtype=dtype) + elif np.issubdtype(dtype, np.complexfloating): + def data_sampler(size): + return (rng.uniform(size=size) + + rng.uniform(size=size) * 1j) + else: + data_sampler = rng.uniform + + # rng.choice uses int64 if first arg is an int + if tot_prod < np.iinfo(np.int64).max: + raveled_ind = rng.choice(tot_prod, size=size, replace=False) + ind = np.unravel_index(raveled_ind, shape=shape, order='F') + else: + # for ravel indices bigger than dtype max, use sets to remove duplicates + ndim = len(shape) + seen = set() + while len(seen) < size: + dsize = size - len(seen) + seen.update(map(tuple, rng_integers(rng, shape, size=(dsize, ndim)))) + ind = tuple(np.array(list(seen)).T) + + # size kwarg allows eg data_sampler=partial(np.random.poisson, lam=5) + vals = data_sampler(size=size).astype(dtype, copy=False) + return vals, ind + + +def random(m, n, density=0.01, format='coo', dtype=None, + random_state=None, data_rvs=None): + """Generate a sparse matrix of the given shape and density with randomly + distributed values. + + .. warning:: + + Since numpy 1.17, passing a ``np.random.Generator`` (e.g. + ``np.random.default_rng``) for ``random_state`` will lead to much + faster execution times. + + A much slower implementation is used by default for backwards + compatibility. + + .. warning:: + + This function returns a sparse matrix -- not a sparse array. + You are encouraged to use ``random_array`` to take advantage of the + sparse array functionality. + + Parameters + ---------- + m, n : int + shape of the matrix + density : real, optional + density of the generated matrix: density equal to one means a full + matrix, density of 0 means a matrix with no non-zero items. + format : str, optional + sparse matrix format. + dtype : dtype, optional + type of the returned matrix values. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + - If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + - If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + - If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + + This random state will be used for sampling the sparsity structure, but + not necessarily for sampling the values of the structurally nonzero + entries of the matrix. + data_rvs : callable, optional + Samples a requested number of random values. + This function should take a single argument specifying the length + of the ndarray that it will return. The structurally nonzero entries + of the sparse random matrix will be taken from the array sampled + by this function. By default, uniform [0, 1) random values will be + sampled using the same random state as is used for sampling + the sparsity structure. + + Returns + ------- + res : sparse matrix + + See Also + -------- + random_array : constructs sparse arrays instead of sparse matrices + + Examples + -------- + + Passing a ``np.random.Generator`` instance for better performance: + + >>> import scipy as sp + >>> import numpy as np + >>> rng = np.random.default_rng() + >>> S = sp.sparse.random(3, 4, density=0.25, random_state=rng) + + Providing a sampler for the values: + + >>> rvs = sp.stats.poisson(25, loc=10).rvs + >>> S = sp.sparse.random(3, 4, density=0.25, random_state=rng, data_rvs=rvs) + >>> S.toarray() + array([[ 36., 0., 33., 0.], # random + [ 0., 0., 0., 0.], + [ 0., 0., 36., 0.]]) + + Building a custom distribution. + This example builds a squared normal from np.random: + + >>> def np_normal_squared(size=None, random_state=rng): + ... return random_state.standard_normal(size) ** 2 + >>> S = sp.sparse.random(3, 4, density=0.25, random_state=rng, + ... data_rvs=np_normal_squared) + + Or we can build it from sp.stats style rvs functions: + + >>> def sp_stats_normal_squared(size=None, random_state=rng): + ... std_normal = sp.stats.distributions.norm_gen().rvs + ... return std_normal(size=size, random_state=random_state) ** 2 + >>> S = sp.sparse.random(3, 4, density=0.25, random_state=rng, + ... data_rvs=sp_stats_normal_squared) + + Or we can subclass sp.stats rv_continous or rv_discrete: + + >>> class NormalSquared(sp.stats.rv_continuous): + ... def _rvs(self, size=None, random_state=rng): + ... return random_state.standard_normal(size) ** 2 + >>> X = NormalSquared() + >>> Y = X() # get a frozen version of the distribution + >>> S = sp.sparse.random(3, 4, density=0.25, random_state=rng, data_rvs=Y.rvs) + """ + if n is None: + n = m + m, n = int(m), int(n) + # make keyword syntax work for data_rvs e.g. data_rvs(size=7) + if data_rvs is not None: + def data_rvs_kw(size): + return data_rvs(size) + else: + data_rvs_kw = None + vals, ind = _random((m, n), density, format, dtype, random_state, data_rvs_kw) + return coo_matrix((vals, ind), shape=(m, n)).asformat(format) + + +def rand(m, n, density=0.01, format="coo", dtype=None, random_state=None): + """Generate a sparse matrix of the given shape and density with uniformly + distributed values. + + .. warning:: + + This function returns a sparse matrix -- not a sparse array. + You are encouraged to use ``random_array`` to take advantage + of the sparse array functionality. + + Parameters + ---------- + m, n : int + shape of the matrix + density : real, optional + density of the generated matrix: density equal to one means a full + matrix, density of 0 means a matrix with no non-zero items. + format : str, optional + sparse matrix format. + dtype : dtype, optional + type of the returned matrix values. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + + Returns + ------- + res : sparse matrix + + Notes + ----- + Only float types are supported for now. + + See Also + -------- + random : Similar function allowing a custom random data sampler + random_array : Similar to random() but returns a sparse array + + Examples + -------- + >>> from scipy.sparse import rand + >>> matrix = rand(3, 4, density=0.25, format="csr", random_state=42) + >>> matrix + <3x4 sparse matrix of type '' + with 3 stored elements in Compressed Sparse Row format> + >>> matrix.toarray() + array([[0.05641158, 0. , 0. , 0.65088847], # random + [0. , 0. , 0. , 0.14286682], + [0. , 0. , 0. , 0. ]]) + + """ + return random(m, n, density, format, dtype, random_state) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_coo.py b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_coo.py new file mode 100644 index 0000000000000000000000000000000000000000..f8c5039f94ed939cd149dbe1bc7e258153d0b32d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_coo.py @@ -0,0 +1,858 @@ +""" A sparse matrix in COOrdinate or 'triplet' format""" + +__docformat__ = "restructuredtext en" + +__all__ = ['coo_array', 'coo_matrix', 'isspmatrix_coo'] + +import math +from warnings import warn + +import numpy as np + +from .._lib._util import copy_if_needed +from ._matrix import spmatrix +from ._sparsetools import coo_tocsr, coo_todense, coo_matvec +from ._base import issparse, SparseEfficiencyWarning, _spbase, sparray +from ._data import _data_matrix, _minmax_mixin +from ._sputils import (upcast_char, to_native, isshape, getdtype, + getdata, downcast_intp_index, get_index_dtype, + check_shape, check_reshape_kwargs) + +import operator + + +class _coo_base(_data_matrix, _minmax_mixin): + _format = 'coo' + + def __init__(self, arg1, shape=None, dtype=None, copy=False): + _data_matrix.__init__(self) + is_array = isinstance(self, sparray) + if not copy: + copy = copy_if_needed + + if isinstance(arg1, tuple): + if isshape(arg1, allow_1d=is_array): + self._shape = check_shape(arg1, allow_1d=is_array) + idx_dtype = self._get_index_dtype(maxval=max(self._shape)) + data_dtype = getdtype(dtype, default=float) + self.coords = tuple(np.array([], dtype=idx_dtype) + for _ in range(len(self._shape))) + self.data = np.array([], dtype=data_dtype) + self.has_canonical_format = True + else: + try: + obj, coords = arg1 + except (TypeError, ValueError) as e: + raise TypeError('invalid input format') from e + + if shape is None: + if any(len(idx) == 0 for idx in coords): + raise ValueError('cannot infer dimensions from zero ' + 'sized index arrays') + shape = tuple(operator.index(np.max(idx)) + 1 + for idx in coords) + self._shape = check_shape(shape, allow_1d=is_array) + + idx_dtype = self._get_index_dtype(coords, + maxval=max(self.shape), + check_contents=True) + self.coords = tuple(np.array(idx, copy=copy, dtype=idx_dtype) + for idx in coords) + self.data = getdata(obj, copy=copy, dtype=dtype) + self.has_canonical_format = False + else: + if issparse(arg1): + if arg1.format == self.format and copy: + self.coords = tuple(idx.copy() for idx in arg1.coords) + self.data = arg1.data.copy() + self._shape = check_shape(arg1.shape, allow_1d=is_array) + self.has_canonical_format = arg1.has_canonical_format + else: + coo = arg1.tocoo() + self.coords = tuple(coo.coords) + self.data = coo.data + self._shape = check_shape(coo.shape, allow_1d=is_array) + self.has_canonical_format = False + else: + # dense argument + M = np.asarray(arg1) + if not is_array: + M = np.atleast_2d(M) + if M.ndim != 2: + raise TypeError('expected dimension <= 2 array or matrix') + + self._shape = check_shape(M.shape, allow_1d=is_array) + if shape is not None: + if check_shape(shape, allow_1d=is_array) != self._shape: + message = f'inconsistent shapes: {shape} != {self._shape}' + raise ValueError(message) + index_dtype = self._get_index_dtype(maxval=max(self._shape)) + coords = M.nonzero() + self.coords = tuple(idx.astype(index_dtype, copy=False) + for idx in coords) + self.data = M[coords] + self.has_canonical_format = True + + if dtype is not None: + self.data = self.data.astype(dtype, copy=False) + + self._check() + + @property + def row(self): + if self.ndim > 1: + return self.coords[-2] + result = np.zeros_like(self.col) + result.setflags(write=False) + return result + + + @row.setter + def row(self, new_row): + if self.ndim < 2: + raise ValueError('cannot set row attribute of a 1-dimensional sparse array') + new_row = np.asarray(new_row, dtype=self.coords[-2].dtype) + self.coords = self.coords[:-2] + (new_row,) + self.coords[-1:] + + @property + def col(self): + return self.coords[-1] + + @col.setter + def col(self, new_col): + new_col = np.asarray(new_col, dtype=self.coords[-1].dtype) + self.coords = self.coords[:-1] + (new_col,) + + def reshape(self, *args, **kwargs): + is_array = isinstance(self, sparray) + shape = check_shape(args, self.shape, allow_1d=is_array) + order, copy = check_reshape_kwargs(kwargs) + + # Return early if reshape is not required + if shape == self.shape: + if copy: + return self.copy() + else: + return self + + # When reducing the number of dimensions, we need to be careful about + # index overflow. This is why we can't simply call + # `np.ravel_multi_index()` followed by `np.unravel_index()` here. + flat_coords = _ravel_coords(self.coords, self.shape, order=order) + if len(shape) == 2: + if order == 'C': + new_coords = divmod(flat_coords, shape[1]) + else: + new_coords = divmod(flat_coords, shape[0])[::-1] + else: + new_coords = np.unravel_index(flat_coords, shape, order=order) + + # Handle copy here rather than passing on to the constructor so that no + # copy will be made of `new_coords` regardless. + if copy: + new_data = self.data.copy() + else: + new_data = self.data + + return self.__class__((new_data, new_coords), shape=shape, copy=False) + + reshape.__doc__ = _spbase.reshape.__doc__ + + def _getnnz(self, axis=None): + if axis is None or (axis == 0 and self.ndim == 1): + nnz = len(self.data) + if any(len(idx) != nnz for idx in self.coords): + raise ValueError('all index and data arrays must have the ' + 'same length') + + if self.data.ndim != 1 or any(idx.ndim != 1 for idx in self.coords): + raise ValueError('row, column, and data arrays must be 1-D') + + return int(nnz) + + if axis < 0: + axis += self.ndim + if axis >= self.ndim: + raise ValueError('axis out of bounds') + if self.ndim > 2: + raise NotImplementedError('per-axis nnz for COO arrays with >2 ' + 'dimensions is not supported') + return np.bincount(downcast_intp_index(self.coords[1 - axis]), + minlength=self.shape[1 - axis]) + + _getnnz.__doc__ = _spbase._getnnz.__doc__ + + def _check(self): + """ Checks data structure for consistency """ + if self.ndim != len(self.coords): + raise ValueError('mismatching number of index arrays for shape; ' + f'got {len(self.coords)}, expected {self.ndim}') + + # index arrays should have integer data types + for i, idx in enumerate(self.coords): + if idx.dtype.kind != 'i': + warn(f'index array {i} has non-integer dtype ({idx.dtype.name})', + stacklevel=3) + + idx_dtype = self._get_index_dtype(self.coords, maxval=max(self.shape)) + self.coords = tuple(np.asarray(idx, dtype=idx_dtype) + for idx in self.coords) + self.data = to_native(self.data) + + if self.nnz > 0: + for i, idx in enumerate(self.coords): + if idx.max() >= self.shape[i]: + raise ValueError(f'axis {i} index {idx.max()} exceeds ' + f'matrix dimension {self.shape[i]}') + if idx.min() < 0: + raise ValueError(f'negative axis {i} index: {idx.min()}') + + def transpose(self, axes=None, copy=False): + if axes is None: + axes = range(self.ndim)[::-1] + elif isinstance(self, sparray): + if len(axes) != self.ndim: + raise ValueError("axes don't match matrix dimensions") + if len(set(axes)) != self.ndim: + raise ValueError("repeated axis in transpose") + elif axes != (1, 0): + raise ValueError("Sparse matrices do not support an 'axes' " + "parameter because swapping dimensions is the " + "only logical permutation.") + + permuted_shape = tuple(self._shape[i] for i in axes) + permuted_coords = tuple(self.coords[i] for i in axes) + return self.__class__((self.data, permuted_coords), + shape=permuted_shape, copy=copy) + + transpose.__doc__ = _spbase.transpose.__doc__ + + def resize(self, *shape) -> None: + is_array = isinstance(self, sparray) + shape = check_shape(shape, allow_1d=is_array) + + # Check for added dimensions. + if len(shape) > self.ndim: + flat_coords = _ravel_coords(self.coords, self.shape) + max_size = math.prod(shape) + self.coords = np.unravel_index(flat_coords[:max_size], shape) + self.data = self.data[:max_size] + self._shape = shape + return + + # Check for removed dimensions. + if len(shape) < self.ndim: + tmp_shape = ( + self._shape[:len(shape) - 1] # Original shape without last axis + + (-1,) # Last axis is used to flatten the array + + (1,) * (self.ndim - len(shape)) # Pad with ones + ) + tmp = self.reshape(tmp_shape) + self.coords = tmp.coords[:len(shape)] + self._shape = tmp.shape[:len(shape)] + + # Handle truncation of existing dimensions. + is_truncating = any(old > new for old, new in zip(self.shape, shape)) + if is_truncating: + mask = np.logical_and.reduce([ + idx < size for idx, size in zip(self.coords, shape) + ]) + if not mask.all(): + self.coords = tuple(idx[mask] for idx in self.coords) + self.data = self.data[mask] + + self._shape = shape + + resize.__doc__ = _spbase.resize.__doc__ + + def toarray(self, order=None, out=None): + B = self._process_toarray_args(order, out) + fortran = int(B.flags.f_contiguous) + if not fortran and not B.flags.c_contiguous: + raise ValueError("Output array must be C or F contiguous") + if self.ndim > 2: + raise ValueError("Cannot densify higher-rank sparse array") + # This handles both 0D and 1D cases correctly regardless of the + # original shape. + M, N = self._shape_as_2d + coo_todense(M, N, self.nnz, self.row, self.col, self.data, + B.ravel('A'), fortran) + # Note: reshape() doesn't copy here, but does return a new array (view). + return B.reshape(self.shape) + + toarray.__doc__ = _spbase.toarray.__doc__ + + def tocsc(self, copy=False): + """Convert this array/matrix to Compressed Sparse Column format + + Duplicate entries will be summed together. + + Examples + -------- + >>> from numpy import array + >>> from scipy.sparse import coo_array + >>> row = array([0, 0, 1, 3, 1, 0, 0]) + >>> col = array([0, 2, 1, 3, 1, 0, 0]) + >>> data = array([1, 1, 1, 1, 1, 1, 1]) + >>> A = coo_array((data, (row, col)), shape=(4, 4)).tocsc() + >>> A.toarray() + array([[3, 0, 1, 0], + [0, 2, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 1]]) + + """ + if self.ndim != 2: + raise ValueError("Cannot convert a 1d sparse array to csc format") + if self.nnz == 0: + return self._csc_container(self.shape, dtype=self.dtype) + else: + from ._csc import csc_array + indptr, indices, data, shape = self._coo_to_compressed(csc_array._swap) + + x = self._csc_container((data, indices, indptr), shape=shape) + if not self.has_canonical_format: + x.sum_duplicates() + return x + + def tocsr(self, copy=False): + """Convert this array/matrix to Compressed Sparse Row format + + Duplicate entries will be summed together. + + Examples + -------- + >>> from numpy import array + >>> from scipy.sparse import coo_array + >>> row = array([0, 0, 1, 3, 1, 0, 0]) + >>> col = array([0, 2, 1, 3, 1, 0, 0]) + >>> data = array([1, 1, 1, 1, 1, 1, 1]) + >>> A = coo_array((data, (row, col)), shape=(4, 4)).tocsr() + >>> A.toarray() + array([[3, 0, 1, 0], + [0, 2, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 1]]) + + """ + if self.ndim != 2: + raise ValueError("Cannot convert a 1d sparse array to csr format") + if self.nnz == 0: + return self._csr_container(self.shape, dtype=self.dtype) + else: + from ._csr import csr_array + indptr, indices, data, shape = self._coo_to_compressed(csr_array._swap) + + x = self._csr_container((data, indices, indptr), shape=self.shape) + if not self.has_canonical_format: + x.sum_duplicates() + return x + + def _coo_to_compressed(self, swap): + """convert (shape, coords, data) to (indptr, indices, data, shape)""" + M, N = swap(self.shape) + major, minor = swap(self.coords) + nnz = len(major) + # convert idx_dtype intc to int32 for pythran. + # tested in scipy/optimize/tests/test__numdiff.py::test_group_columns + idx_dtype = self._get_index_dtype(self.coords, maxval=max(self.nnz, N)) + major = major.astype(idx_dtype, copy=False) + minor = minor.astype(idx_dtype, copy=False) + + indptr = np.empty(M + 1, dtype=idx_dtype) + indices = np.empty_like(minor, dtype=idx_dtype) + data = np.empty_like(self.data, dtype=self.dtype) + + coo_tocsr(M, N, nnz, major, minor, self.data, indptr, indices, data) + return indptr, indices, data, self.shape + + def tocoo(self, copy=False): + if copy: + return self.copy() + else: + return self + + tocoo.__doc__ = _spbase.tocoo.__doc__ + + def todia(self, copy=False): + if self.ndim != 2: + raise ValueError("Cannot convert a 1d sparse array to dia format") + self.sum_duplicates() + ks = self.col - self.row # the diagonal for each nonzero + diags, diag_idx = np.unique(ks, return_inverse=True) + + if len(diags) > 100: + # probably undesired, should todia() have a maxdiags parameter? + warn("Constructing a DIA matrix with %d diagonals " + "is inefficient" % len(diags), + SparseEfficiencyWarning, stacklevel=2) + + #initialize and fill in data array + if self.data.size == 0: + data = np.zeros((0, 0), dtype=self.dtype) + else: + data = np.zeros((len(diags), self.col.max()+1), dtype=self.dtype) + data[diag_idx, self.col] = self.data + + return self._dia_container((data, diags), shape=self.shape) + + todia.__doc__ = _spbase.todia.__doc__ + + def todok(self, copy=False): + self.sum_duplicates() + dok = self._dok_container(self.shape, dtype=self.dtype) + # ensure that 1d coordinates are not tuples + if self.ndim == 1: + coords = self.coords[0] + else: + coords = zip(*self.coords) + + dok._dict = dict(zip(coords, self.data)) + return dok + + todok.__doc__ = _spbase.todok.__doc__ + + def diagonal(self, k=0): + if self.ndim != 2: + raise ValueError("diagonal requires two dimensions") + rows, cols = self.shape + if k <= -rows or k >= cols: + return np.empty(0, dtype=self.data.dtype) + diag = np.zeros(min(rows + min(k, 0), cols - max(k, 0)), + dtype=self.dtype) + diag_mask = (self.row + k) == self.col + + if self.has_canonical_format: + row = self.row[diag_mask] + data = self.data[diag_mask] + else: + inds = tuple(idx[diag_mask] for idx in self.coords) + (row, _), data = self._sum_duplicates(inds, self.data[diag_mask]) + diag[row + min(k, 0)] = data + + return diag + + diagonal.__doc__ = _data_matrix.diagonal.__doc__ + + def _setdiag(self, values, k): + if self.ndim != 2: + raise ValueError("setting a diagonal requires two dimensions") + M, N = self.shape + if values.ndim and not len(values): + return + idx_dtype = self.row.dtype + + # Determine which triples to keep and where to put the new ones. + full_keep = self.col - self.row != k + if k < 0: + max_index = min(M+k, N) + if values.ndim: + max_index = min(max_index, len(values)) + keep = np.logical_or(full_keep, self.col >= max_index) + new_row = np.arange(-k, -k + max_index, dtype=idx_dtype) + new_col = np.arange(max_index, dtype=idx_dtype) + else: + max_index = min(M, N-k) + if values.ndim: + max_index = min(max_index, len(values)) + keep = np.logical_or(full_keep, self.row >= max_index) + new_row = np.arange(max_index, dtype=idx_dtype) + new_col = np.arange(k, k + max_index, dtype=idx_dtype) + + # Define the array of data consisting of the entries to be added. + if values.ndim: + new_data = values[:max_index] + else: + new_data = np.empty(max_index, dtype=self.dtype) + new_data[:] = values + + # Update the internal structure. + self.coords = (np.concatenate((self.row[keep], new_row)), + np.concatenate((self.col[keep], new_col))) + self.data = np.concatenate((self.data[keep], new_data)) + self.has_canonical_format = False + + # needed by _data_matrix + def _with_data(self, data, copy=True): + """Returns a matrix with the same sparsity structure as self, + but with different data. By default the index arrays are copied. + """ + if copy: + coords = tuple(idx.copy() for idx in self.coords) + else: + coords = self.coords + return self.__class__((data, coords), shape=self.shape, dtype=data.dtype) + + def sum_duplicates(self) -> None: + """Eliminate duplicate entries by adding them together + + This is an *in place* operation + """ + if self.has_canonical_format: + return + summed = self._sum_duplicates(self.coords, self.data) + self.coords, self.data = summed + self.has_canonical_format = True + + def _sum_duplicates(self, coords, data): + # Assumes coords not in canonical format. + if len(data) == 0: + return coords, data + # Sort coords w.r.t. rows, then cols. This corresponds to C-order, + # which we rely on for argmin/argmax to return the first index in the + # same way that numpy does (in the case of ties). + order = np.lexsort(coords[::-1]) + coords = tuple(idx[order] for idx in coords) + data = data[order] + unique_mask = np.logical_or.reduce([ + idx[1:] != idx[:-1] for idx in coords + ]) + unique_mask = np.append(True, unique_mask) + coords = tuple(idx[unique_mask] for idx in coords) + unique_inds, = np.nonzero(unique_mask) + data = np.add.reduceat(data, unique_inds, dtype=self.dtype) + return coords, data + + def eliminate_zeros(self): + """Remove zero entries from the array/matrix + + This is an *in place* operation + """ + mask = self.data != 0 + self.data = self.data[mask] + self.coords = tuple(idx[mask] for idx in self.coords) + + ####################### + # Arithmetic handlers # + ####################### + + def _add_dense(self, other): + if other.shape != self.shape: + raise ValueError(f'Incompatible shapes ({self.shape} and {other.shape})') + dtype = upcast_char(self.dtype.char, other.dtype.char) + result = np.array(other, dtype=dtype, copy=True) + fortran = int(result.flags.f_contiguous) + M, N = self._shape_as_2d + coo_todense(M, N, self.nnz, self.row, self.col, self.data, + result.ravel('A'), fortran) + return self._container(result, copy=False) + + def _matmul_vector(self, other): + result_shape = self.shape[0] if self.ndim > 1 else 1 + result = np.zeros(result_shape, + dtype=upcast_char(self.dtype.char, other.dtype.char)) + + if self.ndim == 2: + col = self.col + row = self.row + elif self.ndim == 1: + col = self.coords[0] + row = np.zeros_like(col) + else: + raise NotImplementedError( + f"coo_matvec not implemented for ndim={self.ndim}") + + coo_matvec(self.nnz, row, col, self.data, other, result) + # Array semantics return a scalar here, not a single-element array. + if isinstance(self, sparray) and result_shape == 1: + return result[0] + return result + + def _matmul_multivector(self, other): + result_dtype = upcast_char(self.dtype.char, other.dtype.char) + if self.ndim == 2: + result_shape = (other.shape[1], self.shape[0]) + col = self.col + row = self.row + elif self.ndim == 1: + result_shape = (other.shape[1],) + col = self.coords[0] + row = np.zeros_like(col) + else: + raise NotImplementedError( + f"coo_matvec not implemented for ndim={self.ndim}") + + result = np.zeros(result_shape, dtype=result_dtype) + for i, other_col in enumerate(other.T): + coo_matvec(self.nnz, row, col, self.data, other_col, result[i:i + 1]) + return result.T.view(type=type(other)) + + +def _ravel_coords(coords, shape, order='C'): + """Like np.ravel_multi_index, but avoids some overflow issues.""" + if len(coords) == 1: + return coords[0] + # Handle overflow as in https://github.com/scipy/scipy/pull/9132 + if len(coords) == 2: + nrows, ncols = shape + row, col = coords + if order == 'C': + maxval = (ncols * max(0, nrows - 1) + max(0, ncols - 1)) + idx_dtype = get_index_dtype(maxval=maxval) + return np.multiply(ncols, row, dtype=idx_dtype) + col + elif order == 'F': + maxval = (nrows * max(0, ncols - 1) + max(0, nrows - 1)) + idx_dtype = get_index_dtype(maxval=maxval) + return np.multiply(nrows, col, dtype=idx_dtype) + row + else: + raise ValueError("'order' must be 'C' or 'F'") + return np.ravel_multi_index(coords, shape, order=order) + + +def isspmatrix_coo(x): + """Is `x` of coo_matrix type? + + Parameters + ---------- + x + object to check for being a coo matrix + + Returns + ------- + bool + True if `x` is a coo matrix, False otherwise + + Examples + -------- + >>> from scipy.sparse import coo_array, coo_matrix, csr_matrix, isspmatrix_coo + >>> isspmatrix_coo(coo_matrix([[5]])) + True + >>> isspmatrix_coo(coo_array([[5]])) + False + >>> isspmatrix_coo(csr_matrix([[5]])) + False + """ + return isinstance(x, coo_matrix) + + +# This namespace class separates array from matrix with isinstance +class coo_array(_coo_base, sparray): + """ + A sparse array in COOrdinate format. + + Also known as the 'ijv' or 'triplet' format. + + This can be instantiated in several ways: + coo_array(D) + where D is an ndarray + + coo_array(S) + with another sparse array or matrix S (equivalent to S.tocoo()) + + coo_array(shape, [dtype]) + to construct an empty sparse array with shape `shape` + dtype is optional, defaulting to dtype='d'. + + coo_array((data, coords), [shape]) + to construct from existing data and index arrays: + 1. data[:] the entries of the sparse array, in any order + 2. coords[i][:] the axis-i coordinates of the data entries + + Where ``A[coords] = data``, and coords is a tuple of index arrays. + When shape is not specified, it is inferred from the index arrays. + + Attributes + ---------- + dtype : dtype + Data type of the sparse array + shape : tuple of integers + Shape of the sparse array + ndim : int + Number of dimensions of the sparse array + nnz + size + data + COO format data array of the sparse array + coords + COO format tuple of index arrays + has_canonical_format : bool + Whether the matrix has sorted coordinates and no duplicates + format + T + + Notes + ----- + + Sparse arrays can be used in arithmetic operations: they support + addition, subtraction, multiplication, division, and matrix power. + + Advantages of the COO format + - facilitates fast conversion among sparse formats + - permits duplicate entries (see example) + - very fast conversion to and from CSR/CSC formats + + Disadvantages of the COO format + - does not directly support: + + arithmetic operations + + slicing + + Intended Usage + - COO is a fast format for constructing sparse arrays + - Once a COO array has been constructed, convert to CSR or + CSC format for fast arithmetic and matrix vector operations + - By default when converting to CSR or CSC format, duplicate (i,j) + entries will be summed together. This facilitates efficient + construction of finite element matrices and the like. (see example) + + Canonical format + - Entries and coordinates sorted by row, then column. + - There are no duplicate entries (i.e. duplicate (i,j) locations) + - Data arrays MAY have explicit zeros. + + Examples + -------- + + >>> # Constructing an empty sparse array + >>> import numpy as np + >>> from scipy.sparse import coo_array + >>> coo_array((3, 4), dtype=np.int8).toarray() + array([[0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]], dtype=int8) + + >>> # Constructing a sparse array using ijv format + >>> row = np.array([0, 3, 1, 0]) + >>> col = np.array([0, 3, 1, 2]) + >>> data = np.array([4, 5, 7, 9]) + >>> coo_array((data, (row, col)), shape=(4, 4)).toarray() + array([[4, 0, 9, 0], + [0, 7, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 5]]) + + >>> # Constructing a sparse array with duplicate coordinates + >>> row = np.array([0, 0, 1, 3, 1, 0, 0]) + >>> col = np.array([0, 2, 1, 3, 1, 0, 0]) + >>> data = np.array([1, 1, 1, 1, 1, 1, 1]) + >>> coo = coo_array((data, (row, col)), shape=(4, 4)) + >>> # Duplicate coordinates are maintained until implicitly or explicitly summed + >>> np.max(coo.data) + 1 + >>> coo.toarray() + array([[3, 0, 1, 0], + [0, 2, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 1]]) + + """ + + +class coo_matrix(spmatrix, _coo_base): + """ + A sparse matrix in COOrdinate format. + + Also known as the 'ijv' or 'triplet' format. + + This can be instantiated in several ways: + coo_matrix(D) + where D is a 2-D ndarray + + coo_matrix(S) + with another sparse array or matrix S (equivalent to S.tocoo()) + + coo_matrix((M, N), [dtype]) + to construct an empty matrix with shape (M, N) + dtype is optional, defaulting to dtype='d'. + + coo_matrix((data, (i, j)), [shape=(M, N)]) + to construct from three arrays: + 1. data[:] the entries of the matrix, in any order + 2. i[:] the row indices of the matrix entries + 3. j[:] the column indices of the matrix entries + + Where ``A[i[k], j[k]] = data[k]``. When shape is not + specified, it is inferred from the index arrays + + Attributes + ---------- + dtype : dtype + Data type of the matrix + shape : 2-tuple + Shape of the matrix + ndim : int + Number of dimensions (this is always 2) + nnz + size + data + COO format data array of the matrix + row + COO format row index array of the matrix + col + COO format column index array of the matrix + has_canonical_format : bool + Whether the matrix has sorted indices and no duplicates + format + T + + Notes + ----- + + Sparse matrices can be used in arithmetic operations: they support + addition, subtraction, multiplication, division, and matrix power. + + Advantages of the COO format + - facilitates fast conversion among sparse formats + - permits duplicate entries (see example) + - very fast conversion to and from CSR/CSC formats + + Disadvantages of the COO format + - does not directly support: + + arithmetic operations + + slicing + + Intended Usage + - COO is a fast format for constructing sparse matrices + - Once a COO matrix has been constructed, convert to CSR or + CSC format for fast arithmetic and matrix vector operations + - By default when converting to CSR or CSC format, duplicate (i,j) + entries will be summed together. This facilitates efficient + construction of finite element matrices and the like. (see example) + + Canonical format + - Entries and coordinates sorted by row, then column. + - There are no duplicate entries (i.e. duplicate (i,j) locations) + - Data arrays MAY have explicit zeros. + + Examples + -------- + + >>> # Constructing an empty matrix + >>> import numpy as np + >>> from scipy.sparse import coo_matrix + >>> coo_matrix((3, 4), dtype=np.int8).toarray() + array([[0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]], dtype=int8) + + >>> # Constructing a matrix using ijv format + >>> row = np.array([0, 3, 1, 0]) + >>> col = np.array([0, 3, 1, 2]) + >>> data = np.array([4, 5, 7, 9]) + >>> coo_matrix((data, (row, col)), shape=(4, 4)).toarray() + array([[4, 0, 9, 0], + [0, 7, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 5]]) + + >>> # Constructing a matrix with duplicate coordinates + >>> row = np.array([0, 0, 1, 3, 1, 0, 0]) + >>> col = np.array([0, 2, 1, 3, 1, 0, 0]) + >>> data = np.array([1, 1, 1, 1, 1, 1, 1]) + >>> coo = coo_matrix((data, (row, col)), shape=(4, 4)) + >>> # Duplicate coordinates are maintained until implicitly or explicitly summed + >>> np.max(coo.data) + 1 + >>> coo.toarray() + array([[3, 0, 1, 0], + [0, 2, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 1]]) + + """ + + def __setstate__(self, state): + if 'coords' not in state: + # For retro-compatibility with the previous attributes + # storing nnz coordinates for 2D COO matrix. + state['coords'] = (state.pop('row'), state.pop('col')) + self.__dict__.update(state) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_csparsetools.cpython-310-x86_64-linux-gnu.so b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_csparsetools.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..fa6683236a9a5038d7ccb466199811a2e8e0c92c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_csparsetools.cpython-310-x86_64-linux-gnu.so differ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_csr.py b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_csr.py new file mode 100644 index 0000000000000000000000000000000000000000..37c6ffacd8145a42ee74b5d71c1c736b7bb508e0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_csr.py @@ -0,0 +1,491 @@ +"""Compressed Sparse Row matrix format""" + +__docformat__ = "restructuredtext en" + +__all__ = ['csr_array', 'csr_matrix', 'isspmatrix_csr'] + +import numpy as np + +from ._matrix import spmatrix +from ._base import _spbase, sparray +from ._sparsetools import (csr_tocsc, csr_tobsr, csr_count_blocks, + get_csr_submatrix) +from ._sputils import upcast + +from ._compressed import _cs_matrix + + +class _csr_base(_cs_matrix): + _format = 'csr' + + def transpose(self, axes=None, copy=False): + if axes is not None and axes != (1, 0): + raise ValueError("Sparse arrays/matrices do not support " + "an 'axes' parameter because swapping " + "dimensions is the only logical permutation.") + + M, N = self.shape + return self._csc_container((self.data, self.indices, + self.indptr), shape=(N, M), copy=copy) + + transpose.__doc__ = _spbase.transpose.__doc__ + + def tolil(self, copy=False): + lil = self._lil_container(self.shape, dtype=self.dtype) + + self.sum_duplicates() + ptr,ind,dat = self.indptr,self.indices,self.data + rows, data = lil.rows, lil.data + + for n in range(self.shape[0]): + start = ptr[n] + end = ptr[n+1] + rows[n] = ind[start:end].tolist() + data[n] = dat[start:end].tolist() + + return lil + + tolil.__doc__ = _spbase.tolil.__doc__ + + def tocsr(self, copy=False): + if copy: + return self.copy() + else: + return self + + tocsr.__doc__ = _spbase.tocsr.__doc__ + + def tocsc(self, copy=False): + idx_dtype = self._get_index_dtype((self.indptr, self.indices), + maxval=max(self.nnz, self.shape[0])) + indptr = np.empty(self.shape[1] + 1, dtype=idx_dtype) + indices = np.empty(self.nnz, dtype=idx_dtype) + data = np.empty(self.nnz, dtype=upcast(self.dtype)) + + csr_tocsc(self.shape[0], self.shape[1], + self.indptr.astype(idx_dtype), + self.indices.astype(idx_dtype), + self.data, + indptr, + indices, + data) + + A = self._csc_container((data, indices, indptr), shape=self.shape) + A.has_sorted_indices = True + return A + + tocsc.__doc__ = _spbase.tocsc.__doc__ + + def tobsr(self, blocksize=None, copy=True): + if blocksize is None: + from ._spfuncs import estimate_blocksize + return self.tobsr(blocksize=estimate_blocksize(self)) + + elif blocksize == (1,1): + arg1 = (self.data.reshape(-1,1,1),self.indices,self.indptr) + return self._bsr_container(arg1, shape=self.shape, copy=copy) + + else: + R,C = blocksize + M,N = self.shape + + if R < 1 or C < 1 or M % R != 0 or N % C != 0: + raise ValueError('invalid blocksize %s' % blocksize) + + blks = csr_count_blocks(M,N,R,C,self.indptr,self.indices) + + idx_dtype = self._get_index_dtype((self.indptr, self.indices), + maxval=max(N//C, blks)) + indptr = np.empty(M//R+1, dtype=idx_dtype) + indices = np.empty(blks, dtype=idx_dtype) + data = np.zeros((blks,R,C), dtype=self.dtype) + + csr_tobsr(M, N, R, C, + self.indptr.astype(idx_dtype), + self.indices.astype(idx_dtype), + self.data, + indptr, indices, data.ravel()) + + return self._bsr_container( + (data, indices, indptr), shape=self.shape + ) + + tobsr.__doc__ = _spbase.tobsr.__doc__ + + # these functions are used by the parent class (_cs_matrix) + # to remove redundancy between csc_matrix and csr_array + @staticmethod + def _swap(x): + """swap the members of x if this is a column-oriented matrix + """ + return x + + def __iter__(self): + indptr = np.zeros(2, dtype=self.indptr.dtype) + shape = (1, self.shape[1]) + i0 = 0 + for i1 in self.indptr[1:]: + indptr[1] = i1 - i0 + indices = self.indices[i0:i1] + data = self.data[i0:i1] + yield self.__class__( + (data, indices, indptr), shape=shape, copy=True + ) + i0 = i1 + + def _getrow(self, i): + """Returns a copy of row i of the matrix, as a (1 x n) + CSR matrix (row vector). + """ + M, N = self.shape + i = int(i) + if i < 0: + i += M + if i < 0 or i >= M: + raise IndexError('index (%d) out of range' % i) + indptr, indices, data = get_csr_submatrix( + M, N, self.indptr, self.indices, self.data, i, i + 1, 0, N) + return self.__class__((data, indices, indptr), shape=(1, N), + dtype=self.dtype, copy=False) + + def _getcol(self, i): + """Returns a copy of column i of the matrix, as a (m x 1) + CSR matrix (column vector). + """ + M, N = self.shape + i = int(i) + if i < 0: + i += N + if i < 0 or i >= N: + raise IndexError('index (%d) out of range' % i) + indptr, indices, data = get_csr_submatrix( + M, N, self.indptr, self.indices, self.data, 0, M, i, i + 1) + return self.__class__((data, indices, indptr), shape=(M, 1), + dtype=self.dtype, copy=False) + + def _get_intXarray(self, row, col): + return self._getrow(row)._minor_index_fancy(col) + + def _get_intXslice(self, row, col): + if col.step in (1, None): + return self._get_submatrix(row, col, copy=True) + # TODO: uncomment this once it's faster: + # return self._getrow(row)._minor_slice(col) + + M, N = self.shape + start, stop, stride = col.indices(N) + + ii, jj = self.indptr[row:row+2] + row_indices = self.indices[ii:jj] + row_data = self.data[ii:jj] + + if stride > 0: + ind = (row_indices >= start) & (row_indices < stop) + else: + ind = (row_indices <= start) & (row_indices > stop) + + if abs(stride) > 1: + ind &= (row_indices - start) % stride == 0 + + row_indices = (row_indices[ind] - start) // stride + row_data = row_data[ind] + row_indptr = np.array([0, len(row_indices)]) + + if stride < 0: + row_data = row_data[::-1] + row_indices = abs(row_indices[::-1]) + + shape = (1, max(0, int(np.ceil(float(stop - start) / stride)))) + return self.__class__((row_data, row_indices, row_indptr), shape=shape, + dtype=self.dtype, copy=False) + + def _get_sliceXint(self, row, col): + if row.step in (1, None): + return self._get_submatrix(row, col, copy=True) + return self._major_slice(row)._get_submatrix(minor=col) + + def _get_sliceXarray(self, row, col): + return self._major_slice(row)._minor_index_fancy(col) + + def _get_arrayXint(self, row, col): + return self._major_index_fancy(row)._get_submatrix(minor=col) + + def _get_arrayXslice(self, row, col): + if col.step not in (1, None): + col = np.arange(*col.indices(self.shape[1])) + return self._get_arrayXarray(row, col) + return self._major_index_fancy(row)._get_submatrix(minor=col) + + +def isspmatrix_csr(x): + """Is `x` of csr_matrix type? + + Parameters + ---------- + x + object to check for being a csr matrix + + Returns + ------- + bool + True if `x` is a csr matrix, False otherwise + + Examples + -------- + >>> from scipy.sparse import csr_array, csr_matrix, coo_matrix, isspmatrix_csr + >>> isspmatrix_csr(csr_matrix([[5]])) + True + >>> isspmatrix_csr(csr_array([[5]])) + False + >>> isspmatrix_csr(coo_matrix([[5]])) + False + """ + return isinstance(x, csr_matrix) + + +# This namespace class separates array from matrix with isinstance +class csr_array(_csr_base, sparray): + """ + Compressed Sparse Row array. + + This can be instantiated in several ways: + csr_array(D) + where D is a 2-D ndarray + + csr_array(S) + with another sparse array or matrix S (equivalent to S.tocsr()) + + csr_array((M, N), [dtype]) + to construct an empty array with shape (M, N) + dtype is optional, defaulting to dtype='d'. + + csr_array((data, (row_ind, col_ind)), [shape=(M, N)]) + where ``data``, ``row_ind`` and ``col_ind`` satisfy the + relationship ``a[row_ind[k], col_ind[k]] = data[k]``. + + csr_array((data, indices, indptr), [shape=(M, N)]) + is the standard CSR representation where the column indices for + row i are stored in ``indices[indptr[i]:indptr[i+1]]`` and their + corresponding values are stored in ``data[indptr[i]:indptr[i+1]]``. + If the shape parameter is not supplied, the array dimensions + are inferred from the index arrays. + + Attributes + ---------- + dtype : dtype + Data type of the array + shape : 2-tuple + Shape of the array + ndim : int + Number of dimensions (this is always 2) + nnz + size + data + CSR format data array of the array + indices + CSR format index array of the array + indptr + CSR format index pointer array of the array + has_sorted_indices + has_canonical_format + T + + Notes + ----- + + Sparse arrays can be used in arithmetic operations: they support + addition, subtraction, multiplication, division, and matrix power. + + Advantages of the CSR format + - efficient arithmetic operations CSR + CSR, CSR * CSR, etc. + - efficient row slicing + - fast matrix vector products + + Disadvantages of the CSR format + - slow column slicing operations (consider CSC) + - changes to the sparsity structure are expensive (consider LIL or DOK) + + Canonical Format + - Within each row, indices are sorted by column. + - There are no duplicate entries. + + Examples + -------- + + >>> import numpy as np + >>> from scipy.sparse import csr_array + >>> csr_array((3, 4), dtype=np.int8).toarray() + array([[0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]], dtype=int8) + + >>> row = np.array([0, 0, 1, 2, 2, 2]) + >>> col = np.array([0, 2, 2, 0, 1, 2]) + >>> data = np.array([1, 2, 3, 4, 5, 6]) + >>> csr_array((data, (row, col)), shape=(3, 3)).toarray() + array([[1, 0, 2], + [0, 0, 3], + [4, 5, 6]]) + + >>> indptr = np.array([0, 2, 3, 6]) + >>> indices = np.array([0, 2, 2, 0, 1, 2]) + >>> data = np.array([1, 2, 3, 4, 5, 6]) + >>> csr_array((data, indices, indptr), shape=(3, 3)).toarray() + array([[1, 0, 2], + [0, 0, 3], + [4, 5, 6]]) + + Duplicate entries are summed together: + + >>> row = np.array([0, 1, 2, 0]) + >>> col = np.array([0, 1, 1, 0]) + >>> data = np.array([1, 2, 4, 8]) + >>> csr_array((data, (row, col)), shape=(3, 3)).toarray() + array([[9, 0, 0], + [0, 2, 0], + [0, 4, 0]]) + + As an example of how to construct a CSR array incrementally, + the following snippet builds a term-document array from texts: + + >>> docs = [["hello", "world", "hello"], ["goodbye", "cruel", "world"]] + >>> indptr = [0] + >>> indices = [] + >>> data = [] + >>> vocabulary = {} + >>> for d in docs: + ... for term in d: + ... index = vocabulary.setdefault(term, len(vocabulary)) + ... indices.append(index) + ... data.append(1) + ... indptr.append(len(indices)) + ... + >>> csr_array((data, indices, indptr), dtype=int).toarray() + array([[2, 1, 0, 0], + [0, 1, 1, 1]]) + + """ + + +class csr_matrix(spmatrix, _csr_base): + """ + Compressed Sparse Row matrix. + + This can be instantiated in several ways: + csr_matrix(D) + where D is a 2-D ndarray + + csr_matrix(S) + with another sparse array or matrix S (equivalent to S.tocsr()) + + csr_matrix((M, N), [dtype]) + to construct an empty matrix with shape (M, N) + dtype is optional, defaulting to dtype='d'. + + csr_matrix((data, (row_ind, col_ind)), [shape=(M, N)]) + where ``data``, ``row_ind`` and ``col_ind`` satisfy the + relationship ``a[row_ind[k], col_ind[k]] = data[k]``. + + csr_matrix((data, indices, indptr), [shape=(M, N)]) + is the standard CSR representation where the column indices for + row i are stored in ``indices[indptr[i]:indptr[i+1]]`` and their + corresponding values are stored in ``data[indptr[i]:indptr[i+1]]``. + If the shape parameter is not supplied, the matrix dimensions + are inferred from the index arrays. + + Attributes + ---------- + dtype : dtype + Data type of the matrix + shape : 2-tuple + Shape of the matrix + ndim : int + Number of dimensions (this is always 2) + nnz + size + data + CSR format data array of the matrix + indices + CSR format index array of the matrix + indptr + CSR format index pointer array of the matrix + has_sorted_indices + has_canonical_format + T + + Notes + ----- + + Sparse matrices can be used in arithmetic operations: they support + addition, subtraction, multiplication, division, and matrix power. + + Advantages of the CSR format + - efficient arithmetic operations CSR + CSR, CSR * CSR, etc. + - efficient row slicing + - fast matrix vector products + + Disadvantages of the CSR format + - slow column slicing operations (consider CSC) + - changes to the sparsity structure are expensive (consider LIL or DOK) + + Canonical Format + - Within each row, indices are sorted by column. + - There are no duplicate entries. + + Examples + -------- + + >>> import numpy as np + >>> from scipy.sparse import csr_matrix + >>> csr_matrix((3, 4), dtype=np.int8).toarray() + array([[0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]], dtype=int8) + + >>> row = np.array([0, 0, 1, 2, 2, 2]) + >>> col = np.array([0, 2, 2, 0, 1, 2]) + >>> data = np.array([1, 2, 3, 4, 5, 6]) + >>> csr_matrix((data, (row, col)), shape=(3, 3)).toarray() + array([[1, 0, 2], + [0, 0, 3], + [4, 5, 6]]) + + >>> indptr = np.array([0, 2, 3, 6]) + >>> indices = np.array([0, 2, 2, 0, 1, 2]) + >>> data = np.array([1, 2, 3, 4, 5, 6]) + >>> csr_matrix((data, indices, indptr), shape=(3, 3)).toarray() + array([[1, 0, 2], + [0, 0, 3], + [4, 5, 6]]) + + Duplicate entries are summed together: + + >>> row = np.array([0, 1, 2, 0]) + >>> col = np.array([0, 1, 1, 0]) + >>> data = np.array([1, 2, 4, 8]) + >>> csr_matrix((data, (row, col)), shape=(3, 3)).toarray() + array([[9, 0, 0], + [0, 2, 0], + [0, 4, 0]]) + + As an example of how to construct a CSR matrix incrementally, + the following snippet builds a term-document matrix from texts: + + >>> docs = [["hello", "world", "hello"], ["goodbye", "cruel", "world"]] + >>> indptr = [0] + >>> indices = [] + >>> data = [] + >>> vocabulary = {} + >>> for d in docs: + ... for term in d: + ... index = vocabulary.setdefault(term, len(vocabulary)) + ... indices.append(index) + ... data.append(1) + ... indptr.append(len(indices)) + ... + >>> csr_matrix((data, indices, indptr), dtype=int).toarray() + array([[2, 1, 0, 0], + [0, 1, 1, 1]]) + + """ + diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_dia.py b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_dia.py new file mode 100644 index 0000000000000000000000000000000000000000..26512832b81d525b9fa1e7cee08c99195517098e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_dia.py @@ -0,0 +1,563 @@ +"""Sparse DIAgonal format""" + +__docformat__ = "restructuredtext en" + +__all__ = ['dia_array', 'dia_matrix', 'isspmatrix_dia'] + +import numpy as np + +from .._lib._util import copy_if_needed +from ._matrix import spmatrix +from ._base import issparse, _formats, _spbase, sparray +from ._data import _data_matrix +from ._sputils import ( + isshape, upcast_char, getdtype, get_sum_dtype, validateaxis, check_shape +) +from ._sparsetools import dia_matvec + + +class _dia_base(_data_matrix): + _format = 'dia' + + def __init__(self, arg1, shape=None, dtype=None, copy=False): + _data_matrix.__init__(self) + + if issparse(arg1): + if arg1.format == "dia": + if copy: + arg1 = arg1.copy() + self.data = arg1.data + self.offsets = arg1.offsets + self._shape = check_shape(arg1.shape) + else: + if arg1.format == self.format and copy: + A = arg1.copy() + else: + A = arg1.todia() + self.data = A.data + self.offsets = A.offsets + self._shape = check_shape(A.shape) + elif isinstance(arg1, tuple): + if isshape(arg1): + # It's a tuple of matrix dimensions (M, N) + # create empty matrix + self._shape = check_shape(arg1) + self.data = np.zeros((0,0), getdtype(dtype, default=float)) + idx_dtype = self._get_index_dtype(maxval=max(self.shape)) + self.offsets = np.zeros((0), dtype=idx_dtype) + else: + try: + # Try interpreting it as (data, offsets) + data, offsets = arg1 + except Exception as e: + message = 'unrecognized form for dia_array constructor' + raise ValueError(message) from e + else: + if shape is None: + raise ValueError('expected a shape argument') + if not copy: + copy = copy_if_needed + self.data = np.atleast_2d(np.array(arg1[0], dtype=dtype, copy=copy)) + offsets = np.array(arg1[1], + dtype=self._get_index_dtype(maxval=max(shape)), + copy=copy) + self.offsets = np.atleast_1d(offsets) + self._shape = check_shape(shape) + else: + #must be dense, convert to COO first, then to DIA + try: + arg1 = np.asarray(arg1) + except Exception as e: + raise ValueError("unrecognized form for" + " %s_matrix constructor" % self.format) from e + A = self._coo_container(arg1, dtype=dtype, shape=shape).todia() + self.data = A.data + self.offsets = A.offsets + self._shape = check_shape(A.shape) + + if dtype is not None: + self.data = self.data.astype(dtype) + + #check format + if self.offsets.ndim != 1: + raise ValueError('offsets array must have rank 1') + + if self.data.ndim != 2: + raise ValueError('data array must have rank 2') + + if self.data.shape[0] != len(self.offsets): + raise ValueError('number of diagonals (%d) ' + 'does not match the number of offsets (%d)' + % (self.data.shape[0], len(self.offsets))) + + if len(np.unique(self.offsets)) != len(self.offsets): + raise ValueError('offset array contains duplicate values') + + def __repr__(self): + _, fmt = _formats[self.format] + sparse_cls = 'array' if isinstance(self, sparray) else 'matrix' + shape_str = 'x'.join(str(x) for x in self.shape) + ndiag = self.data.shape[0] + return ( + f"<{shape_str} sparse {sparse_cls} of type '{self.dtype.type}'\n" + f"\twith {self.nnz} stored elements ({ndiag} diagonals) in {fmt} format>" + ) + + def _data_mask(self): + """Returns a mask of the same shape as self.data, where + mask[i,j] is True when data[i,j] corresponds to a stored element.""" + num_rows, num_cols = self.shape + offset_inds = np.arange(self.data.shape[1]) + row = offset_inds - self.offsets[:,None] + mask = (row >= 0) + mask &= (row < num_rows) + mask &= (offset_inds < num_cols) + return mask + + def count_nonzero(self): + mask = self._data_mask() + return np.count_nonzero(self.data[mask]) + + def _getnnz(self, axis=None): + if axis is not None: + raise NotImplementedError("_getnnz over an axis is not implemented " + "for DIA format") + M,N = self.shape + nnz = 0 + for k in self.offsets: + if k > 0: + nnz += min(M,N-k) + else: + nnz += min(M+k,N) + return int(nnz) + + _getnnz.__doc__ = _spbase._getnnz.__doc__ + count_nonzero.__doc__ = _spbase.count_nonzero.__doc__ + + def sum(self, axis=None, dtype=None, out=None): + validateaxis(axis) + + if axis is not None and axis < 0: + axis += 2 + + res_dtype = get_sum_dtype(self.dtype) + num_rows, num_cols = self.shape + ret = None + + if axis == 0: + mask = self._data_mask() + x = (self.data * mask).sum(axis=0) + if x.shape[0] == num_cols: + res = x + else: + res = np.zeros(num_cols, dtype=x.dtype) + res[:x.shape[0]] = x + ret = self._ascontainer(res, dtype=res_dtype) + + else: + row_sums = np.zeros((num_rows, 1), dtype=res_dtype) + one = np.ones(num_cols, dtype=res_dtype) + dia_matvec(num_rows, num_cols, len(self.offsets), + self.data.shape[1], self.offsets, self.data, one, row_sums) + + row_sums = self._ascontainer(row_sums) + + if axis is None: + return row_sums.sum(dtype=dtype, out=out) + + ret = self._ascontainer(row_sums.sum(axis=axis)) + + if out is not None and out.shape != ret.shape: + raise ValueError("dimensions do not match") + + return ret.sum(axis=(), dtype=dtype, out=out) + + sum.__doc__ = _spbase.sum.__doc__ + + def _add_sparse(self, other): + + # Check if other is also of type dia_array + if not isinstance(other, type(self)): + # If other is not of type dia_array, default to + # converting to csr_matrix, as is done in the _add_sparse + # method of parent class _spbase + return self.tocsr()._add_sparse(other) + + # The task is to compute m = self + other + # Start by making a copy of self, of the datatype + # that should result from adding self and other + dtype = np.promote_types(self.dtype, other.dtype) + m = self.astype(dtype, copy=True) + + # Then, add all the stored diagonals of other. + for d in other.offsets: + # Check if the diagonal has already been added. + if d in m.offsets: + # If the diagonal is already there, we need to take + # the sum of the existing and the new + m.setdiag(m.diagonal(d) + other.diagonal(d), d) + else: + m.setdiag(other.diagonal(d), d) + return m + + def _matmul_vector(self, other): + x = other + + y = np.zeros(self.shape[0], dtype=upcast_char(self.dtype.char, + x.dtype.char)) + + L = self.data.shape[1] + + M,N = self.shape + + dia_matvec(M,N, len(self.offsets), L, self.offsets, self.data, + x.ravel(), y.ravel()) + + return y + + def _setdiag(self, values, k=0): + M, N = self.shape + + if values.ndim == 0: + # broadcast + values_n = np.inf + else: + values_n = len(values) + + if k < 0: + n = min(M + k, N, values_n) + min_index = 0 + max_index = n + else: + n = min(M, N - k, values_n) + min_index = k + max_index = k + n + + if values.ndim != 0: + # allow also longer sequences + values = values[:n] + + data_rows, data_cols = self.data.shape + if k in self.offsets: + if max_index > data_cols: + data = np.zeros((data_rows, max_index), dtype=self.data.dtype) + data[:, :data_cols] = self.data + self.data = data + self.data[self.offsets == k, min_index:max_index] = values + else: + self.offsets = np.append(self.offsets, self.offsets.dtype.type(k)) + m = max(max_index, data_cols) + data = np.zeros((data_rows + 1, m), dtype=self.data.dtype) + data[:-1, :data_cols] = self.data + data[-1, min_index:max_index] = values + self.data = data + + def todia(self, copy=False): + if copy: + return self.copy() + else: + return self + + todia.__doc__ = _spbase.todia.__doc__ + + def transpose(self, axes=None, copy=False): + if axes is not None and axes != (1, 0): + raise ValueError("Sparse arrays/matrices do not support " + "an 'axes' parameter because swapping " + "dimensions is the only logical permutation.") + + num_rows, num_cols = self.shape + max_dim = max(self.shape) + + # flip diagonal offsets + offsets = -self.offsets + + # re-align the data matrix + r = np.arange(len(offsets), dtype=np.intc)[:, None] + c = np.arange(num_rows, dtype=np.intc) - (offsets % max_dim)[:, None] + pad_amount = max(0, max_dim-self.data.shape[1]) + data = np.hstack((self.data, np.zeros((self.data.shape[0], pad_amount), + dtype=self.data.dtype))) + data = data[r, c] + return self._dia_container((data, offsets), shape=( + num_cols, num_rows), copy=copy) + + transpose.__doc__ = _spbase.transpose.__doc__ + + def diagonal(self, k=0): + rows, cols = self.shape + if k <= -rows or k >= cols: + return np.empty(0, dtype=self.data.dtype) + idx, = np.nonzero(self.offsets == k) + first_col = max(0, k) + last_col = min(rows + k, cols) + result_size = last_col - first_col + if idx.size == 0: + return np.zeros(result_size, dtype=self.data.dtype) + result = self.data[idx[0], first_col:last_col] + padding = result_size - len(result) + if padding > 0: + result = np.pad(result, (0, padding), mode='constant') + return result + + diagonal.__doc__ = _spbase.diagonal.__doc__ + + def tocsc(self, copy=False): + if self.nnz == 0: + return self._csc_container(self.shape, dtype=self.dtype) + + num_rows, num_cols = self.shape + num_offsets, offset_len = self.data.shape + offset_inds = np.arange(offset_len) + + row = offset_inds - self.offsets[:,None] + mask = (row >= 0) + mask &= (row < num_rows) + mask &= (offset_inds < num_cols) + mask &= (self.data != 0) + + idx_dtype = self._get_index_dtype(maxval=max(self.shape)) + indptr = np.zeros(num_cols + 1, dtype=idx_dtype) + indptr[1:offset_len+1] = np.cumsum(mask.sum(axis=0)[:num_cols]) + if offset_len < num_cols: + indptr[offset_len+1:] = indptr[offset_len] + indices = row.T[mask.T].astype(idx_dtype, copy=False) + data = self.data.T[mask.T] + return self._csc_container((data, indices, indptr), shape=self.shape, + dtype=self.dtype) + + tocsc.__doc__ = _spbase.tocsc.__doc__ + + def tocoo(self, copy=False): + num_rows, num_cols = self.shape + num_offsets, offset_len = self.data.shape + offset_inds = np.arange(offset_len) + + row = offset_inds - self.offsets[:,None] + mask = (row >= 0) + mask &= (row < num_rows) + mask &= (offset_inds < num_cols) + mask &= (self.data != 0) + row = row[mask] + col = np.tile(offset_inds, num_offsets)[mask.ravel()] + idx_dtype = self._get_index_dtype( + arrays=(self.offsets,), maxval=max(self.shape) + ) + row = row.astype(idx_dtype, copy=False) + col = col.astype(idx_dtype, copy=False) + data = self.data[mask] + # Note: this cannot set has_canonical_format=True, because despite the + # lack of duplicates, we do not generate sorted indices. + return self._coo_container( + (data, (row, col)), shape=self.shape, dtype=self.dtype, copy=False + ) + + tocoo.__doc__ = _spbase.tocoo.__doc__ + + # needed by _data_matrix + def _with_data(self, data, copy=True): + """Returns a matrix with the same sparsity structure as self, + but with different data. By default the structure arrays are copied. + """ + if copy: + return self._dia_container( + (data, self.offsets.copy()), shape=self.shape + ) + else: + return self._dia_container( + (data, self.offsets), shape=self.shape + ) + + def resize(self, *shape): + shape = check_shape(shape) + M, N = shape + # we do not need to handle the case of expanding N + self.data = self.data[:, :N] + + if (M > self.shape[0] and + np.any(self.offsets + self.shape[0] < self.data.shape[1])): + # explicitly clear values that were previously hidden + mask = (self.offsets[:, None] + self.shape[0] <= + np.arange(self.data.shape[1])) + self.data[mask] = 0 + + self._shape = shape + + resize.__doc__ = _spbase.resize.__doc__ + + +def isspmatrix_dia(x): + """Is `x` of dia_matrix type? + + Parameters + ---------- + x + object to check for being a dia matrix + + Returns + ------- + bool + True if `x` is a dia matrix, False otherwise + + Examples + -------- + >>> from scipy.sparse import dia_array, dia_matrix, coo_matrix, isspmatrix_dia + >>> isspmatrix_dia(dia_matrix([[5]])) + True + >>> isspmatrix_dia(dia_array([[5]])) + False + >>> isspmatrix_dia(coo_matrix([[5]])) + False + """ + return isinstance(x, dia_matrix) + + +# This namespace class separates array from matrix with isinstance +class dia_array(_dia_base, sparray): + """ + Sparse array with DIAgonal storage. + + This can be instantiated in several ways: + dia_array(D) + where D is a 2-D ndarray + + dia_array(S) + with another sparse array or matrix S (equivalent to S.todia()) + + dia_array((M, N), [dtype]) + to construct an empty array with shape (M, N), + dtype is optional, defaulting to dtype='d'. + + dia_array((data, offsets), shape=(M, N)) + where the ``data[k,:]`` stores the diagonal entries for + diagonal ``offsets[k]`` (See example below) + + Attributes + ---------- + dtype : dtype + Data type of the array + shape : 2-tuple + Shape of the array + ndim : int + Number of dimensions (this is always 2) + nnz + size + data + DIA format data array of the array + offsets + DIA format offset array of the array + T + + Notes + ----- + + Sparse arrays can be used in arithmetic operations: they support + addition, subtraction, multiplication, division, and matrix power. + + Examples + -------- + + >>> import numpy as np + >>> from scipy.sparse import dia_array + >>> dia_array((3, 4), dtype=np.int8).toarray() + array([[0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]], dtype=int8) + + >>> data = np.array([[1, 2, 3, 4]]).repeat(3, axis=0) + >>> offsets = np.array([0, -1, 2]) + >>> dia_array((data, offsets), shape=(4, 4)).toarray() + array([[1, 0, 3, 0], + [1, 2, 0, 4], + [0, 2, 3, 0], + [0, 0, 3, 4]]) + + >>> from scipy.sparse import dia_array + >>> n = 10 + >>> ex = np.ones(n) + >>> data = np.array([ex, 2 * ex, ex]) + >>> offsets = np.array([-1, 0, 1]) + >>> dia_array((data, offsets), shape=(n, n)).toarray() + array([[2., 1., 0., ..., 0., 0., 0.], + [1., 2., 1., ..., 0., 0., 0.], + [0., 1., 2., ..., 0., 0., 0.], + ..., + [0., 0., 0., ..., 2., 1., 0.], + [0., 0., 0., ..., 1., 2., 1.], + [0., 0., 0., ..., 0., 1., 2.]]) + """ + + +class dia_matrix(spmatrix, _dia_base): + """ + Sparse matrix with DIAgonal storage. + + This can be instantiated in several ways: + dia_matrix(D) + where D is a 2-D ndarray + + dia_matrix(S) + with another sparse array or matrix S (equivalent to S.todia()) + + dia_matrix((M, N), [dtype]) + to construct an empty matrix with shape (M, N), + dtype is optional, defaulting to dtype='d'. + + dia_matrix((data, offsets), shape=(M, N)) + where the ``data[k,:]`` stores the diagonal entries for + diagonal ``offsets[k]`` (See example below) + + Attributes + ---------- + dtype : dtype + Data type of the matrix + shape : 2-tuple + Shape of the matrix + ndim : int + Number of dimensions (this is always 2) + nnz + size + data + DIA format data array of the matrix + offsets + DIA format offset array of the matrix + T + + Notes + ----- + + Sparse matrices can be used in arithmetic operations: they support + addition, subtraction, multiplication, division, and matrix power. + + Examples + -------- + + >>> import numpy as np + >>> from scipy.sparse import dia_matrix + >>> dia_matrix((3, 4), dtype=np.int8).toarray() + array([[0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]], dtype=int8) + + >>> data = np.array([[1, 2, 3, 4]]).repeat(3, axis=0) + >>> offsets = np.array([0, -1, 2]) + >>> dia_matrix((data, offsets), shape=(4, 4)).toarray() + array([[1, 0, 3, 0], + [1, 2, 0, 4], + [0, 2, 3, 0], + [0, 0, 3, 4]]) + + >>> from scipy.sparse import dia_matrix + >>> n = 10 + >>> ex = np.ones(n) + >>> data = np.array([ex, 2 * ex, ex]) + >>> offsets = np.array([-1, 0, 1]) + >>> dia_matrix((data, offsets), shape=(n, n)).toarray() + array([[2., 1., 0., ..., 0., 0., 0.], + [1., 2., 1., ..., 0., 0., 0.], + [0., 1., 2., ..., 0., 0., 0.], + ..., + [0., 0., 0., ..., 2., 1., 0.], + [0., 0., 0., ..., 1., 2., 1.], + [0., 0., 0., ..., 0., 1., 2.]]) + """ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_dok.py b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_dok.py new file mode 100644 index 0000000000000000000000000000000000000000..c5d7cd60242a96ae336f19d9cc31f1342b1c28dd --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_dok.py @@ -0,0 +1,672 @@ +"""Dictionary Of Keys based matrix""" + +__docformat__ = "restructuredtext en" + +__all__ = ['dok_array', 'dok_matrix', 'isspmatrix_dok'] + +import itertools +import numpy as np + +from ._matrix import spmatrix +from ._base import _spbase, sparray, issparse +from ._index import IndexMixin +from ._sputils import (isdense, getdtype, isshape, isintlike, isscalarlike, + upcast, upcast_scalar, check_shape) + + +class _dok_base(_spbase, IndexMixin, dict): + _format = 'dok' + + def __init__(self, arg1, shape=None, dtype=None, copy=False): + _spbase.__init__(self) + + is_array = isinstance(self, sparray) + if isinstance(arg1, tuple) and isshape(arg1, allow_1d=is_array): + self._shape = check_shape(arg1, allow_1d=is_array) + self._dict = {} + self.dtype = getdtype(dtype, default=float) + elif issparse(arg1): # Sparse ctor + if arg1.format == self.format: + arg1 = arg1.copy() if copy else arg1 + else: + arg1 = arg1.todok() + + if dtype is not None: + arg1 = arg1.astype(dtype, copy=False) + + self._dict = arg1._dict + self._shape = check_shape(arg1.shape, allow_1d=is_array) + self.dtype = arg1.dtype + else: # Dense ctor + try: + arg1 = np.asarray(arg1) + except Exception as e: + raise TypeError('Invalid input format.') from e + + if arg1.ndim > 2: + raise TypeError('Expected rank <=2 dense array or matrix.') + + if arg1.ndim == 1: + if dtype is not None: + arg1 = arg1.astype(dtype) + self._dict = {i: v for i, v in enumerate(arg1) if v != 0} + self.dtype = arg1.dtype + else: + d = self._coo_container(arg1, dtype=dtype).todok() + self._dict = d._dict + self.dtype = d.dtype + self._shape = check_shape(arg1.shape, allow_1d=is_array) + + def update(self, val): + # Prevent direct usage of update + raise NotImplementedError("Direct update to DOK sparse format is not allowed.") + + def _getnnz(self, axis=None): + if axis is not None: + raise NotImplementedError( + "_getnnz over an axis is not implemented for DOK format." + ) + return len(self._dict) + + def count_nonzero(self): + return sum(x != 0 for x in self.values()) + + _getnnz.__doc__ = _spbase._getnnz.__doc__ + count_nonzero.__doc__ = _spbase.count_nonzero.__doc__ + + def __len__(self): + return len(self._dict) + + def __contains__(self, key): + return key in self._dict + + def setdefault(self, key, default=None, /): + return self._dict.setdefault(key, default) + + def __delitem__(self, key, /): + del self._dict[key] + + def clear(self): + return self._dict.clear() + + def pop(self, key, default=None, /): + return self._dict.pop(key, default) + + def __reversed__(self): + raise TypeError("reversed is not defined for dok_array type") + + def __or__(self, other): + type_names = f"{type(self).__name__} and {type(other).__name__}" + raise TypeError(f"unsupported operand type for |: {type_names}") + + def __ror__(self, other): + type_names = f"{type(self).__name__} and {type(other).__name__}" + raise TypeError(f"unsupported operand type for |: {type_names}") + + def __ior__(self, other): + type_names = f"{type(self).__name__} and {type(other).__name__}" + raise TypeError(f"unsupported operand type for |: {type_names}") + + def popitem(self): + return self._dict.popitem() + + def items(self): + return self._dict.items() + + def keys(self): + return self._dict.keys() + + def values(self): + return self._dict.values() + + def get(self, key, default=0.0): + """This provides dict.get method functionality with type checking""" + if key in self._dict: + return self._dict[key] + if isintlike(key) and self.ndim == 1: + key = (key,) + if self.ndim != len(key): + raise IndexError(f'Index {key} length needs to match self.shape') + try: + for i in key: + assert isintlike(i) + except (AssertionError, TypeError, ValueError) as e: + raise IndexError('Index must be or consist of integers.') from e + key = tuple(i + M if i < 0 else i for i, M in zip(key, self.shape)) + if any(i < 0 or i >= M for i, M in zip(key, self.shape)): + raise IndexError('Index out of bounds.') + if self.ndim == 1: + key = key[0] + return self._dict.get(key, default) + + # override IndexMixin.__getitem__ for 1d case until fully implemented + def __getitem__(self, key): + if self.ndim == 2: + return super().__getitem__(key) + + if isinstance(key, tuple) and len(key) == 1: + key = key[0] + INT_TYPES = (int, np.integer) + if isinstance(key, INT_TYPES): + if key < 0: + key += self.shape[-1] + if key < 0 or key >= self.shape[-1]: + raise IndexError('index value out of bounds') + return self._get_int(key) + else: + raise IndexError('array/slice index for 1d dok_array not yet supported') + + # 1D get methods + def _get_int(self, idx): + return self._dict.get(idx, self.dtype.type(0)) + + # 2D get methods + def _get_intXint(self, row, col): + return self._dict.get((row, col), self.dtype.type(0)) + + def _get_intXslice(self, row, col): + return self._get_sliceXslice(slice(row, row + 1), col) + + def _get_sliceXint(self, row, col): + return self._get_sliceXslice(row, slice(col, col + 1)) + + def _get_sliceXslice(self, row, col): + row_start, row_stop, row_step = row.indices(self.shape[0]) + col_start, col_stop, col_step = col.indices(self.shape[1]) + row_range = range(row_start, row_stop, row_step) + col_range = range(col_start, col_stop, col_step) + shape = (len(row_range), len(col_range)) + # Switch paths only when advantageous + # (count the iterations in the loops, adjust for complexity) + if len(self) >= 2 * shape[0] * shape[1]: + # O(nr*nc) path: loop over + return self._get_columnXarray(row_range, col_range) + # O(nnz) path: loop over entries of self + newdok = self._dok_container(shape, dtype=self.dtype) + for key in self.keys(): + i, ri = divmod(int(key[0]) - row_start, row_step) + if ri != 0 or i < 0 or i >= shape[0]: + continue + j, rj = divmod(int(key[1]) - col_start, col_step) + if rj != 0 or j < 0 or j >= shape[1]: + continue + newdok._dict[i, j] = self._dict[key] + return newdok + + def _get_intXarray(self, row, col): + col = col.squeeze() + return self._get_columnXarray([row], col) + + def _get_arrayXint(self, row, col): + row = row.squeeze() + return self._get_columnXarray(row, [col]) + + def _get_sliceXarray(self, row, col): + row = list(range(*row.indices(self.shape[0]))) + return self._get_columnXarray(row, col) + + def _get_arrayXslice(self, row, col): + col = list(range(*col.indices(self.shape[1]))) + return self._get_columnXarray(row, col) + + def _get_columnXarray(self, row, col): + # outer indexing + newdok = self._dok_container((len(row), len(col)), dtype=self.dtype) + + for i, r in enumerate(row): + for j, c in enumerate(col): + v = self._dict.get((r, c), 0) + if v: + newdok._dict[i, j] = v + return newdok + + def _get_arrayXarray(self, row, col): + # inner indexing + i, j = map(np.atleast_2d, np.broadcast_arrays(row, col)) + newdok = self._dok_container(i.shape, dtype=self.dtype) + + for key in itertools.product(range(i.shape[0]), range(i.shape[1])): + v = self._dict.get((i[key], j[key]), 0) + if v: + newdok._dict[key] = v + return newdok + + # override IndexMixin.__setitem__ for 1d case until fully implemented + def __setitem__(self, key, value): + if self.ndim == 2: + return super().__setitem__(key, value) + + if isinstance(key, tuple) and len(key) == 1: + key = key[0] + INT_TYPES = (int, np.integer) + if isinstance(key, INT_TYPES): + if key < 0: + key += self.shape[-1] + if key < 0 or key >= self.shape[-1]: + raise IndexError('index value out of bounds') + return self._set_int(key, value) + else: + raise IndexError('array index for 1d dok_array not yet provided') + + # 1D set methods + def _set_int(self, idx, x): + if x: + self._dict[idx] = x + elif idx in self._dict: + del self._dict[idx] + + # 2D set methods + def _set_intXint(self, row, col, x): + key = (row, col) + if x: + self._dict[key] = x + elif key in self._dict: + del self._dict[key] + + def _set_arrayXarray(self, row, col, x): + row = list(map(int, row.ravel())) + col = list(map(int, col.ravel())) + x = x.ravel() + self._dict.update(zip(zip(row, col), x)) + + for i in np.nonzero(x == 0)[0]: + key = (row[i], col[i]) + if self._dict[key] == 0: + # may have been superseded by later update + del self._dict[key] + + def __add__(self, other): + if isscalarlike(other): + res_dtype = upcast_scalar(self.dtype, other) + new = self._dok_container(self.shape, dtype=res_dtype) + # Add this scalar to each element. + for key in itertools.product(*[range(d) for d in self.shape]): + aij = self._dict.get(key, 0) + other + if aij: + new[key] = aij + elif issparse(other): + if other.shape != self.shape: + raise ValueError("Matrix dimensions are not equal.") + res_dtype = upcast(self.dtype, other.dtype) + new = self._dok_container(self.shape, dtype=res_dtype) + new._dict = self._dict.copy() + if other.format == "dok": + o_items = other.items() + else: + other = other.tocoo() + if self.ndim == 1: + o_items = zip(other.coords[0], other.data) + else: + o_items = zip(zip(*other.coords), other.data) + with np.errstate(over='ignore'): + new._dict.update((k, new[k] + v) for k, v in o_items) + elif isdense(other): + new = self.todense() + other + else: + return NotImplemented + return new + + def __radd__(self, other): + return self + other # addition is comutative + + def __neg__(self): + if self.dtype.kind == 'b': + raise NotImplementedError( + 'Negating a sparse boolean matrix is not supported.' + ) + new = self._dok_container(self.shape, dtype=self.dtype) + new._dict.update((k, -v) for k, v in self.items()) + return new + + def _mul_scalar(self, other): + res_dtype = upcast_scalar(self.dtype, other) + # Multiply this scalar by every element. + new = self._dok_container(self.shape, dtype=res_dtype) + new._dict.update(((k, v * other) for k, v in self.items())) + return new + + def _matmul_vector(self, other): + res_dtype = upcast(self.dtype, other.dtype) + + # vector @ vector + if self.ndim == 1: + if issparse(other): + if other.format == "dok": + keys = self.keys() & other.keys() + else: + keys = self.keys() & other.tocoo().coords[0] + return res_dtype(sum(self._dict[k] * other._dict[k] for k in keys)) + elif isdense(other): + return res_dtype(sum(other[k] * v for k, v in self.items())) + else: + return NotImplemented + + # matrix @ vector + result = np.zeros(self.shape[0], dtype=res_dtype) + for (i, j), v in self.items(): + result[i] += v * other[j] + return result + + def _matmul_multivector(self, other): + result_dtype = upcast(self.dtype, other.dtype) + # vector @ multivector + if self.ndim == 1: + # works for other 1d or 2d + return sum(v * other[j] for j, v in self._dict.items()) + + # matrix @ multivector + M = self.shape[0] + new_shape = (M,) if other.ndim == 1 else (M, other.shape[1]) + result = np.zeros(new_shape, dtype=result_dtype) + for (i, j), v in self.items(): + result[i] += v * other[j] + return result + + def __imul__(self, other): + if isscalarlike(other): + self._dict.update((k, v * other) for k, v in self.items()) + return self + return NotImplemented + + def __truediv__(self, other): + if isscalarlike(other): + res_dtype = upcast_scalar(self.dtype, other) + new = self._dok_container(self.shape, dtype=res_dtype) + new._dict.update(((k, v / other) for k, v in self.items())) + return new + return self.tocsr() / other + + def __itruediv__(self, other): + if isscalarlike(other): + self._dict.update((k, v / other) for k, v in self.items()) + return self + return NotImplemented + + def __reduce__(self): + # this approach is necessary because __setstate__ is called after + # __setitem__ upon unpickling and since __init__ is not called there + # is no shape attribute hence it is not possible to unpickle it. + return dict.__reduce__(self) + + def diagonal(self, k=0): + if self.ndim == 2: + return super().diagonal(k) + raise ValueError("diagonal requires two dimensions") + + def transpose(self, axes=None, copy=False): + if self.ndim == 1: + return self.copy() + + if axes is not None and axes != (1, 0): + raise ValueError( + "Sparse arrays/matrices do not support " + "an 'axes' parameter because swapping " + "dimensions is the only logical permutation." + ) + + M, N = self.shape + new = self._dok_container((N, M), dtype=self.dtype, copy=copy) + new._dict.update((((right, left), val) for (left, right), val in self.items())) + return new + + transpose.__doc__ = _spbase.transpose.__doc__ + + def conjtransp(self): + """Return the conjugate transpose.""" + if self.ndim == 1: + new = self.tocoo() + new.data = new.data.conjugate() + return new + M, N = self.shape + new = self._dok_container((N, M), dtype=self.dtype) + new._dict = {(right, left): np.conj(val) for (left, right), val in self.items()} + return new + + def copy(self): + new = self._dok_container(self.shape, dtype=self.dtype) + new._dict.update(self._dict) + return new + + copy.__doc__ = _spbase.copy.__doc__ + + @classmethod + def fromkeys(cls, iterable, value=1, /): + tmp = dict.fromkeys(iterable, value) + if isinstance(next(iter(tmp)), tuple): + shape = tuple(max(idx) + 1 for idx in zip(*tmp)) + else: + shape = (max(tmp) + 1,) + result = cls(shape, dtype=type(value)) + result._dict = tmp + return result + + def tocoo(self, copy=False): + nnz = self.nnz + if nnz == 0: + return self._coo_container(self.shape, dtype=self.dtype) + + idx_dtype = self._get_index_dtype(maxval=max(self.shape)) + data = np.fromiter(self.values(), dtype=self.dtype, count=nnz) + # handle 1d keys specially b/c not a tuple + inds = zip(*self.keys()) if self.ndim > 1 else (self.keys(),) + coords = tuple(np.fromiter(ix, dtype=idx_dtype, count=nnz) for ix in inds) + A = self._coo_container((data, coords), shape=self.shape, dtype=self.dtype) + A.has_canonical_format = True + return A + + tocoo.__doc__ = _spbase.tocoo.__doc__ + + def todok(self, copy=False): + if copy: + return self.copy() + return self + + todok.__doc__ = _spbase.todok.__doc__ + + def tocsc(self, copy=False): + if self.ndim == 1: + raise NotImplementedError("tocsr() not valid for 1d sparse array") + return self.tocoo(copy=False).tocsc(copy=copy) + + tocsc.__doc__ = _spbase.tocsc.__doc__ + + def resize(self, *shape): + is_array = isinstance(self, sparray) + shape = check_shape(shape, allow_1d=is_array) + if len(shape) != len(self.shape): + # TODO implement resize across dimensions + raise NotImplementedError + + if self.ndim == 1: + newN = shape[-1] + for i in list(self._dict): + if i >= newN: + del self._dict[i] + self._shape = shape + return + + newM, newN = shape + M, N = self.shape + if newM < M or newN < N: + # Remove all elements outside new dimensions + for i, j in list(self.keys()): + if i >= newM or j >= newN: + del self._dict[i, j] + self._shape = shape + + resize.__doc__ = _spbase.resize.__doc__ + + # Added for 1d to avoid `tocsr` from _base.py + def astype(self, dtype, casting='unsafe', copy=True): + dtype = np.dtype(dtype) + if self.dtype != dtype: + result = self._dok_container(self.shape, dtype=dtype) + data = np.array(list(self._dict.values()), dtype=dtype) + result._dict = dict(zip(self._dict, data)) + return result + elif copy: + return self.copy() + return self + + +def isspmatrix_dok(x): + """Is `x` of dok_array type? + + Parameters + ---------- + x + object to check for being a dok matrix + + Returns + ------- + bool + True if `x` is a dok matrix, False otherwise + + Examples + -------- + >>> from scipy.sparse import dok_array, dok_matrix, coo_matrix, isspmatrix_dok + >>> isspmatrix_dok(dok_matrix([[5]])) + True + >>> isspmatrix_dok(dok_array([[5]])) + False + >>> isspmatrix_dok(coo_matrix([[5]])) + False + """ + return isinstance(x, dok_matrix) + + +# This namespace class separates array from matrix with isinstance +class dok_array(_dok_base, sparray): + """ + Dictionary Of Keys based sparse array. + + This is an efficient structure for constructing sparse + arrays incrementally. + + This can be instantiated in several ways: + dok_array(D) + where D is a 2-D ndarray + + dok_array(S) + with another sparse array or matrix S (equivalent to S.todok()) + + dok_array((M,N), [dtype]) + create the array with initial shape (M,N) + dtype is optional, defaulting to dtype='d' + + Attributes + ---------- + dtype : dtype + Data type of the array + shape : 2-tuple + Shape of the array + ndim : int + Number of dimensions (this is always 2) + nnz + Number of nonzero elements + size + T + + Notes + ----- + + Sparse arrays can be used in arithmetic operations: they support + addition, subtraction, multiplication, division, and matrix power. + + - Allows for efficient O(1) access of individual elements. + - Duplicates are not allowed. + - Can be efficiently converted to a coo_array once constructed. + + Examples + -------- + >>> import numpy as np + >>> from scipy.sparse import dok_array + >>> S = dok_array((5, 5), dtype=np.float32) + >>> for i in range(5): + ... for j in range(5): + ... S[i, j] = i + j # Update element + + """ + + +class dok_matrix(spmatrix, _dok_base): + """ + Dictionary Of Keys based sparse matrix. + + This is an efficient structure for constructing sparse + matrices incrementally. + + This can be instantiated in several ways: + dok_matrix(D) + where D is a 2-D ndarray + + dok_matrix(S) + with another sparse array or matrix S (equivalent to S.todok()) + + dok_matrix((M,N), [dtype]) + create the matrix with initial shape (M,N) + dtype is optional, defaulting to dtype='d' + + Attributes + ---------- + dtype : dtype + Data type of the matrix + shape : 2-tuple + Shape of the matrix + ndim : int + Number of dimensions (this is always 2) + nnz + Number of nonzero elements + size + T + + Notes + ----- + + Sparse matrices can be used in arithmetic operations: they support + addition, subtraction, multiplication, division, and matrix power. + + - Allows for efficient O(1) access of individual elements. + - Duplicates are not allowed. + - Can be efficiently converted to a coo_matrix once constructed. + + Examples + -------- + >>> import numpy as np + >>> from scipy.sparse import dok_matrix + >>> S = dok_matrix((5, 5), dtype=np.float32) + >>> for i in range(5): + ... for j in range(5): + ... S[i, j] = i + j # Update element + + """ + + def set_shape(self, shape): + new_matrix = self.reshape(shape, copy=False).asformat(self.format) + self.__dict__ = new_matrix.__dict__ + + def get_shape(self): + """Get shape of a sparse matrix.""" + return self._shape + + shape = property(fget=get_shape, fset=set_shape) + + def __reversed__(self): + return self._dict.__reversed__() + + def __or__(self, other): + if isinstance(other, _dok_base): + return self._dict | other._dict + return self._dict | other + + def __ror__(self, other): + if isinstance(other, _dok_base): + return self._dict | other._dict + return self._dict | other + + def __ior__(self, other): + if isinstance(other, _dok_base): + self._dict |= other._dict + else: + self._dict |= other + return self diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_extract.py b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_extract.py new file mode 100644 index 0000000000000000000000000000000000000000..349a056bd2f673b0a0b5379fa8b18d720eb5a86d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_extract.py @@ -0,0 +1,178 @@ +"""Functions to extract parts of sparse matrices +""" + +__docformat__ = "restructuredtext en" + +__all__ = ['find', 'tril', 'triu'] + + +from ._coo import coo_matrix, coo_array +from ._base import sparray + + +def find(A): + """Return the indices and values of the nonzero elements of a matrix + + Parameters + ---------- + A : dense or sparse array or matrix + Matrix whose nonzero elements are desired. + + Returns + ------- + (I,J,V) : tuple of arrays + I,J, and V contain the row indices, column indices, and values + of the nonzero entries. + + + Examples + -------- + >>> from scipy.sparse import csr_array, find + >>> A = csr_array([[7.0, 8.0, 0],[0, 0, 9.0]]) + >>> find(A) + (array([0, 0, 1], dtype=int32), + array([0, 1, 2], dtype=int32), + array([ 7., 8., 9.])) + + """ + + A = coo_array(A, copy=True) + A.sum_duplicates() + # remove explicit zeros + nz_mask = A.data != 0 + return A.row[nz_mask], A.col[nz_mask], A.data[nz_mask] + + +def tril(A, k=0, format=None): + """Return the lower triangular portion of a sparse array or matrix + + Returns the elements on or below the k-th diagonal of A. + - k = 0 corresponds to the main diagonal + - k > 0 is above the main diagonal + - k < 0 is below the main diagonal + + Parameters + ---------- + A : dense or sparse array or matrix + Matrix whose lower trianglar portion is desired. + k : integer : optional + The top-most diagonal of the lower triangle. + format : string + Sparse format of the result, e.g. format="csr", etc. + + Returns + ------- + L : sparse matrix + Lower triangular portion of A in sparse format. + + See Also + -------- + triu : upper triangle in sparse format + + Examples + -------- + >>> from scipy.sparse import csr_array, tril + >>> A = csr_array([[1, 2, 0, 0, 3], [4, 5, 0, 6, 7], [0, 0, 8, 9, 0]], + ... dtype='int32') + >>> A.toarray() + array([[1, 2, 0, 0, 3], + [4, 5, 0, 6, 7], + [0, 0, 8, 9, 0]]) + >>> tril(A).toarray() + array([[1, 0, 0, 0, 0], + [4, 5, 0, 0, 0], + [0, 0, 8, 0, 0]]) + >>> tril(A).nnz + 4 + >>> tril(A, k=1).toarray() + array([[1, 2, 0, 0, 0], + [4, 5, 0, 0, 0], + [0, 0, 8, 9, 0]]) + >>> tril(A, k=-1).toarray() + array([[0, 0, 0, 0, 0], + [4, 0, 0, 0, 0], + [0, 0, 0, 0, 0]]) + >>> tril(A, format='csc') + <3x5 sparse array of type '' + with 4 stored elements in Compressed Sparse Column format> + + """ + coo_sparse = coo_array if isinstance(A, sparray) else coo_matrix + + # convert to COOrdinate format where things are easy + A = coo_sparse(A, copy=False) + mask = A.row + k >= A.col + + row = A.row[mask] + col = A.col[mask] + data = A.data[mask] + new_coo = coo_sparse((data, (row, col)), shape=A.shape, dtype=A.dtype) + return new_coo.asformat(format) + + +def triu(A, k=0, format=None): + """Return the upper triangular portion of a sparse array or matrix + + Returns the elements on or above the k-th diagonal of A. + - k = 0 corresponds to the main diagonal + - k > 0 is above the main diagonal + - k < 0 is below the main diagonal + + Parameters + ---------- + A : dense or sparse array or matrix + Matrix whose upper trianglar portion is desired. + k : integer : optional + The bottom-most diagonal of the upper triangle. + format : string + Sparse format of the result, e.g. format="csr", etc. + + Returns + ------- + L : sparse array or matrix + Upper triangular portion of A in sparse format. + Sparse array if A is a sparse array, otherwise matrix. + + See Also + -------- + tril : lower triangle in sparse format + + Examples + -------- + >>> from scipy.sparse import csr_array, triu + >>> A = csr_array([[1, 2, 0, 0, 3], [4, 5, 0, 6, 7], [0, 0, 8, 9, 0]], + ... dtype='int32') + >>> A.toarray() + array([[1, 2, 0, 0, 3], + [4, 5, 0, 6, 7], + [0, 0, 8, 9, 0]]) + >>> triu(A).toarray() + array([[1, 2, 0, 0, 3], + [0, 5, 0, 6, 7], + [0, 0, 8, 9, 0]]) + >>> triu(A).nnz + 8 + >>> triu(A, k=1).toarray() + array([[0, 2, 0, 0, 3], + [0, 0, 0, 6, 7], + [0, 0, 0, 9, 0]]) + >>> triu(A, k=-1).toarray() + array([[1, 2, 0, 0, 3], + [4, 5, 0, 6, 7], + [0, 0, 8, 9, 0]]) + >>> triu(A, format='csc') + <3x5 sparse array of type '' + with 8 stored elements in Compressed Sparse Column format> + + """ + coo_sparse = coo_array if isinstance(A, sparray) else coo_matrix + + # convert to COOrdinate format where things are easy + A = coo_sparse(A, copy=False) + mask = A.row + k <= A.col + + row = A.row[mask] + col = A.col[mask] + data = A.data[mask] + new_coo = coo_sparse((data, (row, col)), shape=A.shape, dtype=A.dtype) + return new_coo.asformat(format) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_index.py b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_index.py new file mode 100644 index 0000000000000000000000000000000000000000..c0fc3d01b0ebd153703a76af431626d958b7de64 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_index.py @@ -0,0 +1,392 @@ +"""Indexing mixin for sparse array/matrix classes. +""" +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np +from ._sputils import isintlike + +if TYPE_CHECKING: + import numpy.typing as npt + +INT_TYPES = (int, np.integer) + + +def _broadcast_arrays(a, b): + """ + Same as np.broadcast_arrays(a, b) but old writeability rules. + + NumPy >= 1.17.0 transitions broadcast_arrays to return + read-only arrays. Set writeability explicitly to avoid warnings. + Retain the old writeability rules, as our Cython code assumes + the old behavior. + """ + x, y = np.broadcast_arrays(a, b) + x.flags.writeable = a.flags.writeable + y.flags.writeable = b.flags.writeable + return x, y + + +class IndexMixin: + """ + This class provides common dispatching and validation logic for indexing. + """ + def _raise_on_1d_array_slice(self): + """We do not currently support 1D sparse arrays. + + This function is called each time that a 1D array would + result, raising an error instead. + + Once 1D sparse arrays are implemented, it should be removed. + """ + from scipy.sparse import sparray + + if isinstance(self, sparray): + raise NotImplementedError( + 'We have not yet implemented 1D sparse slices; ' + 'please index using explicit indices, e.g. `x[:, [0]]`' + ) + + def __getitem__(self, key): + row, col = self._validate_indices(key) + + # Dispatch to specialized methods. + if isinstance(row, INT_TYPES): + if isinstance(col, INT_TYPES): + return self._get_intXint(row, col) + elif isinstance(col, slice): + self._raise_on_1d_array_slice() + return self._get_intXslice(row, col) + elif col.ndim == 1: + self._raise_on_1d_array_slice() + return self._get_intXarray(row, col) + elif col.ndim == 2: + return self._get_intXarray(row, col) + raise IndexError('index results in >2 dimensions') + elif isinstance(row, slice): + if isinstance(col, INT_TYPES): + self._raise_on_1d_array_slice() + return self._get_sliceXint(row, col) + elif isinstance(col, slice): + if row == slice(None) and row == col: + return self.copy() + return self._get_sliceXslice(row, col) + elif col.ndim == 1: + return self._get_sliceXarray(row, col) + raise IndexError('index results in >2 dimensions') + elif row.ndim == 1: + if isinstance(col, INT_TYPES): + self._raise_on_1d_array_slice() + return self._get_arrayXint(row, col) + elif isinstance(col, slice): + return self._get_arrayXslice(row, col) + else: # row.ndim == 2 + if isinstance(col, INT_TYPES): + return self._get_arrayXint(row, col) + elif isinstance(col, slice): + raise IndexError('index results in >2 dimensions') + elif row.shape[1] == 1 and (col.ndim == 1 or col.shape[0] == 1): + # special case for outer indexing + return self._get_columnXarray(row[:,0], col.ravel()) + + # The only remaining case is inner (fancy) indexing + row, col = _broadcast_arrays(row, col) + if row.shape != col.shape: + raise IndexError('number of row and column indices differ') + if row.size == 0: + return self.__class__(np.atleast_2d(row).shape, dtype=self.dtype) + return self._get_arrayXarray(row, col) + + def __setitem__(self, key, x): + row, col = self._validate_indices(key) + + if isinstance(row, INT_TYPES) and isinstance(col, INT_TYPES): + x = np.asarray(x, dtype=self.dtype) + if x.size != 1: + raise ValueError('Trying to assign a sequence to an item') + self._set_intXint(row, col, x.flat[0]) + return + + if isinstance(row, slice): + row = np.arange(*row.indices(self.shape[0]))[:, None] + else: + row = np.atleast_1d(row) + + if isinstance(col, slice): + col = np.arange(*col.indices(self.shape[1]))[None, :] + if row.ndim == 1: + row = row[:, None] + else: + col = np.atleast_1d(col) + + i, j = _broadcast_arrays(row, col) + if i.shape != j.shape: + raise IndexError('number of row and column indices differ') + + from ._base import issparse + if issparse(x): + if i.ndim == 1: + # Inner indexing, so treat them like row vectors. + i = i[None] + j = j[None] + broadcast_row = x.shape[0] == 1 and i.shape[0] != 1 + broadcast_col = x.shape[1] == 1 and i.shape[1] != 1 + if not ((broadcast_row or x.shape[0] == i.shape[0]) and + (broadcast_col or x.shape[1] == i.shape[1])): + raise ValueError('shape mismatch in assignment') + if x.shape[0] == 0 or x.shape[1] == 0: + return + x = x.tocoo(copy=True) + x.sum_duplicates() + self._set_arrayXarray_sparse(i, j, x) + else: + # Make x and i into the same shape + x = np.asarray(x, dtype=self.dtype) + if x.squeeze().shape != i.squeeze().shape: + x = np.broadcast_to(x, i.shape) + if x.size == 0: + return + x = x.reshape(i.shape) + self._set_arrayXarray(i, j, x) + + def _validate_indices(self, key): + # First, check if indexing with single boolean matrix. + from ._base import _spbase + if (isinstance(key, (_spbase, np.ndarray)) and + key.ndim == 2 and key.dtype.kind == 'b'): + if key.shape != self.shape: + raise IndexError('boolean index shape does not match array shape') + row, col = key.nonzero() + else: + row, col = _unpack_index(key) + M, N = self.shape + + def _validate_bool_idx( + idx: npt.NDArray[np.bool_], + axis_size: int, + axis_name: str + ) -> npt.NDArray[np.int_]: + if len(idx) != axis_size: + raise IndexError( + f"boolean {axis_name} index has incorrect length: {len(idx)} " + f"instead of {axis_size}" + ) + return _boolean_index_to_array(idx) + + if isintlike(row): + row = int(row) + if row < -M or row >= M: + raise IndexError('row index (%d) out of range' % row) + if row < 0: + row += M + elif (bool_row := _compatible_boolean_index(row)) is not None: + row = _validate_bool_idx(bool_row, M, "row") + elif not isinstance(row, slice): + row = self._asindices(row, M) + + if isintlike(col): + col = int(col) + if col < -N or col >= N: + raise IndexError('column index (%d) out of range' % col) + if col < 0: + col += N + elif (bool_col := _compatible_boolean_index(col)) is not None: + col = _validate_bool_idx(bool_col, N, "column") + elif not isinstance(col, slice): + col = self._asindices(col, N) + + return row, col + + def _asindices(self, idx, length): + """Convert `idx` to a valid index for an axis with a given length. + + Subclasses that need special validation can override this method. + """ + try: + x = np.asarray(idx) + except (ValueError, TypeError, MemoryError) as e: + raise IndexError('invalid index') from e + + if x.ndim not in (1, 2): + raise IndexError('Index dimension must be 1 or 2') + + if x.size == 0: + return x + + # Check bounds + max_indx = x.max() + if max_indx >= length: + raise IndexError('index (%d) out of range' % max_indx) + + min_indx = x.min() + if min_indx < 0: + if min_indx < -length: + raise IndexError('index (%d) out of range' % min_indx) + if x is idx or not x.flags.owndata: + x = x.copy() + x[x < 0] += length + return x + + def _getrow(self, i): + """Return a copy of row i of the matrix, as a (1 x n) row vector. + """ + M, N = self.shape + i = int(i) + if i < -M or i >= M: + raise IndexError('index (%d) out of range' % i) + if i < 0: + i += M + return self._get_intXslice(i, slice(None)) + + def _getcol(self, i): + """Return a copy of column i of the matrix, as a (m x 1) column vector. + """ + M, N = self.shape + i = int(i) + if i < -N or i >= N: + raise IndexError('index (%d) out of range' % i) + if i < 0: + i += N + return self._get_sliceXint(slice(None), i) + + def _get_intXint(self, row, col): + raise NotImplementedError() + + def _get_intXarray(self, row, col): + raise NotImplementedError() + + def _get_intXslice(self, row, col): + raise NotImplementedError() + + def _get_sliceXint(self, row, col): + raise NotImplementedError() + + def _get_sliceXslice(self, row, col): + raise NotImplementedError() + + def _get_sliceXarray(self, row, col): + raise NotImplementedError() + + def _get_arrayXint(self, row, col): + raise NotImplementedError() + + def _get_arrayXslice(self, row, col): + raise NotImplementedError() + + def _get_columnXarray(self, row, col): + raise NotImplementedError() + + def _get_arrayXarray(self, row, col): + raise NotImplementedError() + + def _set_intXint(self, row, col, x): + raise NotImplementedError() + + def _set_arrayXarray(self, row, col, x): + raise NotImplementedError() + + def _set_arrayXarray_sparse(self, row, col, x): + # Fall back to densifying x + x = np.asarray(x.toarray(), dtype=self.dtype) + x, _ = _broadcast_arrays(x, row) + self._set_arrayXarray(row, col, x) + + +def _unpack_index(index) -> tuple[ + int | slice | npt.NDArray[np.bool_ | np.int_], + int | slice | npt.NDArray[np.bool_ | np.int_] +]: + """ Parse index. Always return a tuple of the form (row, col). + Valid type for row/col is integer, slice, array of bool, or array of integers. + """ + # Parse any ellipses. + index = _check_ellipsis(index) + + # Next, parse the tuple or object + if isinstance(index, tuple): + if len(index) == 2: + row, col = index + elif len(index) == 1: + row, col = index[0], slice(None) + else: + raise IndexError('invalid number of indices') + else: + idx = _compatible_boolean_index(index) + if idx is None: + row, col = index, slice(None) + elif idx.ndim < 2: + return idx, slice(None) + elif idx.ndim == 2: + return idx.nonzero() + # Next, check for validity and transform the index as needed. + from ._base import issparse + if issparse(row) or issparse(col): + # Supporting sparse boolean indexing with both row and col does + # not work because spmatrix.ndim is always 2. + raise IndexError( + 'Indexing with sparse matrices is not supported ' + 'except boolean indexing where matrix and index ' + 'are equal shapes.') + return row, col + + +def _check_ellipsis(index): + """Process indices with Ellipsis. Returns modified index.""" + if index is Ellipsis: + return (slice(None), slice(None)) + + if not isinstance(index, tuple): + return index + + # Find any Ellipsis objects. + ellipsis_indices = [i for i, v in enumerate(index) if v is Ellipsis] + if not ellipsis_indices: + return index + if len(ellipsis_indices) > 1: + raise IndexError("an index can only have a single ellipsis ('...')") + + # Replace the Ellipsis object with 0, 1, or 2 null-slices as needed. + i, = ellipsis_indices + num_slices = max(0, 3 - len(index)) + return index[:i] + (slice(None),) * num_slices + index[i + 1:] + + +def _maybe_bool_ndarray(idx): + """Returns a compatible array if elements are boolean. + """ + idx = np.asanyarray(idx) + if idx.dtype.kind == 'b': + return idx + return None + + +def _first_element_bool(idx, max_dim=2): + """Returns True if first element of the incompatible + array type is boolean. + """ + if max_dim < 1: + return None + try: + first = next(iter(idx), None) + except TypeError: + return None + if isinstance(first, bool): + return True + return _first_element_bool(first, max_dim-1) + + +def _compatible_boolean_index(idx): + """Returns a boolean index array that can be converted to + integer array. Returns None if no such array exists. + """ + # Presence of attribute `ndim` indicates a compatible array type. + if hasattr(idx, 'ndim') or _first_element_bool(idx): + return _maybe_bool_ndarray(idx) + return None + + +def _boolean_index_to_array(idx): + if idx.ndim > 1: + raise IndexError('invalid index shape') + return np.where(idx)[0] diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_lil.py b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_lil.py new file mode 100644 index 0000000000000000000000000000000000000000..b5590010386190fa5fadcdb3e9fae3cc236a3b0e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_lil.py @@ -0,0 +1,618 @@ +"""List of Lists sparse matrix class +""" + +__docformat__ = "restructuredtext en" + +__all__ = ['lil_array', 'lil_matrix', 'isspmatrix_lil'] + +from bisect import bisect_left + +import numpy as np + +from ._matrix import spmatrix +from ._base import _spbase, sparray, issparse +from ._index import IndexMixin, INT_TYPES, _broadcast_arrays +from ._sputils import (getdtype, isshape, isscalarlike, upcast_scalar, + check_shape, check_reshape_kwargs) +from . import _csparsetools + + +class _lil_base(_spbase, IndexMixin): + _format = 'lil' + + def __init__(self, arg1, shape=None, dtype=None, copy=False): + _spbase.__init__(self) + self.dtype = getdtype(dtype, arg1, default=float) + + # First get the shape + if issparse(arg1): + if arg1.format == "lil" and copy: + A = arg1.copy() + else: + A = arg1.tolil() + + if dtype is not None: + A = A.astype(dtype, copy=False) + + self._shape = check_shape(A.shape) + self.dtype = A.dtype + self.rows = A.rows + self.data = A.data + elif isinstance(arg1,tuple): + if isshape(arg1): + if shape is not None: + raise ValueError('invalid use of shape parameter') + M, N = arg1 + self._shape = check_shape((M, N)) + self.rows = np.empty((M,), dtype=object) + self.data = np.empty((M,), dtype=object) + for i in range(M): + self.rows[i] = [] + self.data[i] = [] + else: + raise TypeError('unrecognized lil_array constructor usage') + else: + # assume A is dense + try: + A = self._ascontainer(arg1) + except TypeError as e: + raise TypeError('unsupported matrix type') from e + else: + A = self._csr_container(A, dtype=dtype).tolil() + + self._shape = check_shape(A.shape) + self.dtype = A.dtype + self.rows = A.rows + self.data = A.data + + def __iadd__(self,other): + self[:,:] = self + other + return self + + def __isub__(self,other): + self[:,:] = self - other + return self + + def __imul__(self,other): + if isscalarlike(other): + self[:,:] = self * other + return self + else: + return NotImplemented + + def __itruediv__(self,other): + if isscalarlike(other): + self[:,:] = self / other + return self + else: + return NotImplemented + + # Whenever the dimensions change, empty lists should be created for each + # row + + def _getnnz(self, axis=None): + if axis is None: + return sum([len(rowvals) for rowvals in self.data]) + if axis < 0: + axis += 2 + if axis == 0: + out = np.zeros(self.shape[1], dtype=np.intp) + for row in self.rows: + out[row] += 1 + return out + elif axis == 1: + return np.array([len(rowvals) for rowvals in self.data], dtype=np.intp) + else: + raise ValueError('axis out of bounds') + + def count_nonzero(self): + return sum(np.count_nonzero(rowvals) for rowvals in self.data) + + _getnnz.__doc__ = _spbase._getnnz.__doc__ + count_nonzero.__doc__ = _spbase.count_nonzero.__doc__ + + def __str__(self): + val = '' + for i, row in enumerate(self.rows): + for pos, j in enumerate(row): + val += f" {str((i, j))}\t{str(self.data[i][pos])}\n" + return val[:-1] + + def getrowview(self, i): + """Returns a view of the 'i'th row (without copying). + """ + new = self._lil_container((1, self.shape[1]), dtype=self.dtype) + new.rows[0] = self.rows[i] + new.data[0] = self.data[i] + return new + + def getrow(self, i): + """Returns a copy of the 'i'th row. + """ + M, N = self.shape + if i < 0: + i += M + if i < 0 or i >= M: + raise IndexError('row index out of bounds') + new = self._lil_container((1, N), dtype=self.dtype) + new.rows[0] = self.rows[i][:] + new.data[0] = self.data[i][:] + return new + + def __getitem__(self, key): + # Fast path for simple (int, int) indexing. + if (isinstance(key, tuple) and len(key) == 2 and + isinstance(key[0], INT_TYPES) and + isinstance(key[1], INT_TYPES)): + # lil_get1 handles validation for us. + return self._get_intXint(*key) + # Everything else takes the normal path. + return IndexMixin.__getitem__(self, key) + + def _asindices(self, idx, N): + # LIL routines handle bounds-checking for us, so don't do it here. + try: + x = np.asarray(idx) + except (ValueError, TypeError, MemoryError) as e: + raise IndexError('invalid index') from e + if x.ndim not in (1, 2): + raise IndexError('Index dimension must be <= 2') + return x + + def _get_intXint(self, row, col): + v = _csparsetools.lil_get1(self.shape[0], self.shape[1], self.rows, + self.data, row, col) + return self.dtype.type(v) + + def _get_sliceXint(self, row, col): + row = range(*row.indices(self.shape[0])) + return self._get_row_ranges(row, slice(col, col+1)) + + def _get_arrayXint(self, row, col): + row = row.squeeze() + return self._get_row_ranges(row, slice(col, col+1)) + + def _get_intXslice(self, row, col): + return self._get_row_ranges((row,), col) + + def _get_sliceXslice(self, row, col): + row = range(*row.indices(self.shape[0])) + return self._get_row_ranges(row, col) + + def _get_arrayXslice(self, row, col): + return self._get_row_ranges(row, col) + + def _get_intXarray(self, row, col): + row = np.array(row, dtype=col.dtype, ndmin=1) + return self._get_columnXarray(row, col) + + def _get_sliceXarray(self, row, col): + row = np.arange(*row.indices(self.shape[0])) + return self._get_columnXarray(row, col) + + def _get_columnXarray(self, row, col): + # outer indexing + row, col = _broadcast_arrays(row[:,None], col) + return self._get_arrayXarray(row, col) + + def _get_arrayXarray(self, row, col): + # inner indexing + i, j = map(np.atleast_2d, _prepare_index_for_memoryview(row, col)) + new = self._lil_container(i.shape, dtype=self.dtype) + _csparsetools.lil_fancy_get(self.shape[0], self.shape[1], + self.rows, self.data, + new.rows, new.data, + i, j) + return new + + def _get_row_ranges(self, rows, col_slice): + """ + Fast path for indexing in the case where column index is slice. + + This gains performance improvement over brute force by more + efficient skipping of zeros, by accessing the elements + column-wise in order. + + Parameters + ---------- + rows : sequence or range + Rows indexed. If range, must be within valid bounds. + col_slice : slice + Columns indexed + + """ + j_start, j_stop, j_stride = col_slice.indices(self.shape[1]) + col_range = range(j_start, j_stop, j_stride) + nj = len(col_range) + new = self._lil_container((len(rows), nj), dtype=self.dtype) + + _csparsetools.lil_get_row_ranges(self.shape[0], self.shape[1], + self.rows, self.data, + new.rows, new.data, + rows, + j_start, j_stop, j_stride, nj) + + return new + + def _set_intXint(self, row, col, x): + _csparsetools.lil_insert(self.shape[0], self.shape[1], self.rows, + self.data, row, col, x) + + def _set_arrayXarray(self, row, col, x): + i, j, x = map(np.atleast_2d, _prepare_index_for_memoryview(row, col, x)) + _csparsetools.lil_fancy_set(self.shape[0], self.shape[1], + self.rows, self.data, + i, j, x) + + def _set_arrayXarray_sparse(self, row, col, x): + # Fall back to densifying x + x = np.asarray(x.toarray(), dtype=self.dtype) + x, _ = _broadcast_arrays(x, row) + self._set_arrayXarray(row, col, x) + + def __setitem__(self, key, x): + if isinstance(key, tuple) and len(key) == 2: + row, col = key + # Fast path for simple (int, int) indexing. + if isinstance(row, INT_TYPES) and isinstance(col, INT_TYPES): + x = self.dtype.type(x) + if x.size > 1: + raise ValueError("Trying to assign a sequence to an item") + return self._set_intXint(row, col, x) + # Fast path for full-matrix sparse assignment. + if (isinstance(row, slice) and isinstance(col, slice) and + row == slice(None) and col == slice(None) and + issparse(x) and x.shape == self.shape): + x = self._lil_container(x, dtype=self.dtype) + self.rows = x.rows + self.data = x.data + return + # Everything else takes the normal path. + IndexMixin.__setitem__(self, key, x) + + def _mul_scalar(self, other): + if other == 0: + # Multiply by zero: return the zero matrix + new = self._lil_container(self.shape, dtype=self.dtype) + else: + res_dtype = upcast_scalar(self.dtype, other) + + new = self.copy() + new = new.astype(res_dtype) + # Multiply this scalar by every element. + for j, rowvals in enumerate(new.data): + new.data[j] = [val*other for val in rowvals] + return new + + def __truediv__(self, other): # self / other + if isscalarlike(other): + new = self.copy() + new.dtype = np.result_type(self, other) + # Divide every element by this scalar + for j, rowvals in enumerate(new.data): + new.data[j] = [val/other for val in rowvals] + return new + else: + return self.tocsr() / other + + def copy(self): + M, N = self.shape + new = self._lil_container(self.shape, dtype=self.dtype) + # This is ~14x faster than calling deepcopy() on rows and data. + _csparsetools.lil_get_row_ranges(M, N, self.rows, self.data, + new.rows, new.data, range(M), + 0, N, 1, N) + return new + + copy.__doc__ = _spbase.copy.__doc__ + + def reshape(self, *args, **kwargs): + shape = check_shape(args, self.shape) + order, copy = check_reshape_kwargs(kwargs) + + # Return early if reshape is not required + if shape == self.shape: + if copy: + return self.copy() + else: + return self + + new = self._lil_container(shape, dtype=self.dtype) + + if order == 'C': + ncols = self.shape[1] + for i, row in enumerate(self.rows): + for col, j in enumerate(row): + new_r, new_c = np.unravel_index(i * ncols + j, shape) + new[new_r, new_c] = self[i, j] + elif order == 'F': + nrows = self.shape[0] + for i, row in enumerate(self.rows): + for col, j in enumerate(row): + new_r, new_c = np.unravel_index(i + j * nrows, shape, order) + new[new_r, new_c] = self[i, j] + else: + raise ValueError("'order' must be 'C' or 'F'") + + return new + + reshape.__doc__ = _spbase.reshape.__doc__ + + def resize(self, *shape): + shape = check_shape(shape) + new_M, new_N = shape + M, N = self.shape + + if new_M < M: + self.rows = self.rows[:new_M] + self.data = self.data[:new_M] + elif new_M > M: + self.rows = np.resize(self.rows, new_M) + self.data = np.resize(self.data, new_M) + for i in range(M, new_M): + self.rows[i] = [] + self.data[i] = [] + + if new_N < N: + for row, data in zip(self.rows, self.data): + trunc = bisect_left(row, new_N) + del row[trunc:] + del data[trunc:] + + self._shape = shape + + resize.__doc__ = _spbase.resize.__doc__ + + def toarray(self, order=None, out=None): + d = self._process_toarray_args(order, out) + for i, row in enumerate(self.rows): + for pos, j in enumerate(row): + d[i, j] = self.data[i][pos] + return d + + toarray.__doc__ = _spbase.toarray.__doc__ + + def transpose(self, axes=None, copy=False): + return self.tocsr(copy=copy).transpose(axes=axes, copy=False).tolil(copy=False) + + transpose.__doc__ = _spbase.transpose.__doc__ + + def tolil(self, copy=False): + if copy: + return self.copy() + else: + return self + + tolil.__doc__ = _spbase.tolil.__doc__ + + def tocsr(self, copy=False): + M, N = self.shape + if M == 0 or N == 0: + return self._csr_container((M, N), dtype=self.dtype) + + # construct indptr array + if M*N <= np.iinfo(np.int32).max: + # fast path: it is known that 64-bit indexing will not be needed. + idx_dtype = np.int32 + indptr = np.empty(M + 1, dtype=idx_dtype) + indptr[0] = 0 + _csparsetools.lil_get_lengths(self.rows, indptr[1:]) + np.cumsum(indptr, out=indptr) + nnz = indptr[-1] + else: + idx_dtype = self._get_index_dtype(maxval=N) + lengths = np.empty(M, dtype=idx_dtype) + _csparsetools.lil_get_lengths(self.rows, lengths) + nnz = lengths.sum(dtype=np.int64) + idx_dtype = self._get_index_dtype(maxval=max(N, nnz)) + indptr = np.empty(M + 1, dtype=idx_dtype) + indptr[0] = 0 + np.cumsum(lengths, dtype=idx_dtype, out=indptr[1:]) + + indices = np.empty(nnz, dtype=idx_dtype) + data = np.empty(nnz, dtype=self.dtype) + _csparsetools.lil_flatten_to_array(self.rows, indices) + _csparsetools.lil_flatten_to_array(self.data, data) + + # init csr matrix + return self._csr_container((data, indices, indptr), shape=self.shape) + + tocsr.__doc__ = _spbase.tocsr.__doc__ + + +def _prepare_index_for_memoryview(i, j, x=None): + """ + Convert index and data arrays to form suitable for passing to the + Cython fancy getset routines. + + The conversions are necessary since to (i) ensure the integer + index arrays are in one of the accepted types, and (ii) to ensure + the arrays are writable so that Cython memoryview support doesn't + choke on them. + + Parameters + ---------- + i, j + Index arrays + x : optional + Data arrays + + Returns + ------- + i, j, x + Re-formatted arrays (x is omitted, if input was None) + + """ + if i.dtype > j.dtype: + j = j.astype(i.dtype) + elif i.dtype < j.dtype: + i = i.astype(j.dtype) + + if not i.flags.writeable or i.dtype not in (np.int32, np.int64): + i = i.astype(np.intp) + if not j.flags.writeable or j.dtype not in (np.int32, np.int64): + j = j.astype(np.intp) + + if x is not None: + if not x.flags.writeable: + x = x.copy() + return i, j, x + else: + return i, j + + +def isspmatrix_lil(x): + """Is `x` of lil_matrix type? + + Parameters + ---------- + x + object to check for being a lil matrix + + Returns + ------- + bool + True if `x` is a lil matrix, False otherwise + + Examples + -------- + >>> from scipy.sparse import lil_array, lil_matrix, coo_matrix, isspmatrix_lil + >>> isspmatrix_lil(lil_matrix([[5]])) + True + >>> isspmatrix_lil(lil_array([[5]])) + False + >>> isspmatrix_lil(coo_matrix([[5]])) + False + """ + return isinstance(x, lil_matrix) + + +# This namespace class separates array from matrix with isinstance +class lil_array(_lil_base, sparray): + """ + Row-based LIst of Lists sparse array. + + This is a structure for constructing sparse arrays incrementally. + Note that inserting a single item can take linear time in the worst case; + to construct the array efficiently, make sure the items are pre-sorted by + index, per row. + + This can be instantiated in several ways: + lil_array(D) + where D is a 2-D ndarray + + lil_array(S) + with another sparse array or matrix S (equivalent to S.tolil()) + + lil_array((M, N), [dtype]) + to construct an empty array with shape (M, N) + dtype is optional, defaulting to dtype='d'. + + Attributes + ---------- + dtype : dtype + Data type of the array + shape : 2-tuple + Shape of the array + ndim : int + Number of dimensions (this is always 2) + nnz + size + data + LIL format data array of the array + rows + LIL format row index array of the array + T + + Notes + ----- + Sparse arrays can be used in arithmetic operations: they support + addition, subtraction, multiplication, division, and matrix power. + + Advantages of the LIL format + - supports flexible slicing + - changes to the array sparsity structure are efficient + + Disadvantages of the LIL format + - arithmetic operations LIL + LIL are slow (consider CSR or CSC) + - slow column slicing (consider CSC) + - slow matrix vector products (consider CSR or CSC) + + Intended Usage + - LIL is a convenient format for constructing sparse arrays + - once an array has been constructed, convert to CSR or + CSC format for fast arithmetic and matrix vector operations + - consider using the COO format when constructing large arrays + + Data Structure + - An array (``self.rows``) of rows, each of which is a sorted + list of column indices of non-zero elements. + - The corresponding nonzero values are stored in similar + fashion in ``self.data``. + + """ + + +class lil_matrix(spmatrix, _lil_base): + """ + Row-based LIst of Lists sparse matrix. + + This is a structure for constructing sparse matrices incrementally. + Note that inserting a single item can take linear time in the worst case; + to construct the matrix efficiently, make sure the items are pre-sorted by + index, per row. + + This can be instantiated in several ways: + lil_matrix(D) + where D is a 2-D ndarray + + lil_matrix(S) + with another sparse array or matrix S (equivalent to S.tolil()) + + lil_matrix((M, N), [dtype]) + to construct an empty matrix with shape (M, N) + dtype is optional, defaulting to dtype='d'. + + Attributes + ---------- + dtype : dtype + Data type of the matrix + shape : 2-tuple + Shape of the matrix + ndim : int + Number of dimensions (this is always 2) + nnz + size + data + LIL format data array of the matrix + rows + LIL format row index array of the matrix + T + + Notes + ----- + Sparse matrices can be used in arithmetic operations: they support + addition, subtraction, multiplication, division, and matrix power. + + Advantages of the LIL format + - supports flexible slicing + - changes to the matrix sparsity structure are efficient + + Disadvantages of the LIL format + - arithmetic operations LIL + LIL are slow (consider CSR or CSC) + - slow column slicing (consider CSC) + - slow matrix vector products (consider CSR or CSC) + + Intended Usage + - LIL is a convenient format for constructing sparse matrices + - once a matrix has been constructed, convert to CSR or + CSC format for fast arithmetic and matrix vector operations + - consider using the COO format when constructing large matrices + + Data Structure + - An array (``self.rows``) of rows, each of which is a sorted + list of column indices of non-zero elements. + - The corresponding nonzero values are stored in similar + fashion in ``self.data``. + + """ diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_matrix.py b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_matrix.py new file mode 100644 index 0000000000000000000000000000000000000000..1ab8749423833b78f7efc17feb6e1a8e6405408a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_matrix.py @@ -0,0 +1,113 @@ +class spmatrix: + """This class provides a base class for all sparse matrix classes. + + It cannot be instantiated. Most of the work is provided by subclasses. + """ + + @property + def _bsr_container(self): + from ._bsr import bsr_matrix + return bsr_matrix + + @property + def _coo_container(self): + from ._coo import coo_matrix + return coo_matrix + + @property + def _csc_container(self): + from ._csc import csc_matrix + return csc_matrix + + @property + def _csr_container(self): + from ._csr import csr_matrix + return csr_matrix + + @property + def _dia_container(self): + from ._dia import dia_matrix + return dia_matrix + + @property + def _dok_container(self): + from ._dok import dok_matrix + return dok_matrix + + @property + def _lil_container(self): + from ._lil import lil_matrix + return lil_matrix + + # Restore matrix multiplication + def __mul__(self, other): + return self._matmul_dispatch(other) + + def __rmul__(self, other): + return self._rmatmul_dispatch(other) + + # Restore matrix power + def __pow__(self, power): + from .linalg import matrix_power + + return matrix_power(self, power) + + ## Backward compatibility + + def set_shape(self, shape): + """Set the shape of the matrix in-place""" + # Make sure copy is False since this is in place + # Make sure format is unchanged because we are doing a __dict__ swap + new_self = self.reshape(shape, copy=False).asformat(self.format) + self.__dict__ = new_self.__dict__ + + def get_shape(self): + """Get the shape of the matrix""" + return self._shape + + shape = property(fget=get_shape, fset=set_shape, + doc="Shape of the matrix") + + def asfptype(self): + """Upcast matrix to a floating point format (if necessary)""" + return self._asfptype() + + def getmaxprint(self): + """Maximum number of elements to display when printed.""" + return self._getmaxprint() + + def getformat(self): + """Matrix storage format""" + return self.format + + def getnnz(self, axis=None): + """Number of stored values, including explicit zeros. + + Parameters + ---------- + axis : None, 0, or 1 + Select between the number of values across the whole array, in + each column, or in each row. + """ + return self._getnnz(axis=axis) + + def getH(self): + """Return the Hermitian transpose of this matrix. + + See Also + -------- + numpy.matrix.getH : NumPy's implementation of `getH` for matrices + """ + return self.conjugate().transpose() + + def getcol(self, j): + """Returns a copy of column j of the matrix, as an (m x 1) sparse + matrix (column vector). + """ + return self._getcol(j) + + def getrow(self, i): + """Returns a copy of row i of the matrix, as a (1 x n) sparse + matrix (row vector). + """ + return self._getrow(i) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_matrix_io.py b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_matrix_io.py new file mode 100644 index 0000000000000000000000000000000000000000..e115260afb9f625a68f6b14d8750d3650603bd11 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_matrix_io.py @@ -0,0 +1,167 @@ +import numpy as np +import scipy as sp + +__all__ = ['save_npz', 'load_npz'] + + +# Make loading safe vs. malicious input +PICKLE_KWARGS = dict(allow_pickle=False) + + +def save_npz(file, matrix, compressed=True): + """ Save a sparse matrix or array to a file using ``.npz`` format. + + Parameters + ---------- + file : str or file-like object + Either the file name (string) or an open file (file-like object) + where the data will be saved. If file is a string, the ``.npz`` + extension will be appended to the file name if it is not already + there. + matrix: spmatrix or sparray + The sparse matrix or array to save. + Supported formats: ``csc``, ``csr``, ``bsr``, ``dia`` or ``coo``. + compressed : bool, optional + Allow compressing the file. Default: True + + See Also + -------- + scipy.sparse.load_npz: Load a sparse matrix from a file using ``.npz`` format. + numpy.savez: Save several arrays into a ``.npz`` archive. + numpy.savez_compressed : Save several arrays into a compressed ``.npz`` archive. + + Examples + -------- + Store sparse matrix to disk, and load it again: + + >>> import numpy as np + >>> import scipy as sp + >>> sparse_matrix = sp.sparse.csc_matrix([[0, 0, 3], [4, 0, 0]]) + >>> sparse_matrix + <2x3 sparse matrix of type '' + with 2 stored elements in Compressed Sparse Column format> + >>> sparse_matrix.toarray() + array([[0, 0, 3], + [4, 0, 0]], dtype=int64) + + >>> sp.sparse.save_npz('/tmp/sparse_matrix.npz', sparse_matrix) + >>> sparse_matrix = sp.sparse.load_npz('/tmp/sparse_matrix.npz') + + >>> sparse_matrix + <2x3 sparse matrix of type '' + with 2 stored elements in Compressed Sparse Column format> + >>> sparse_matrix.toarray() + array([[0, 0, 3], + [4, 0, 0]], dtype=int64) + """ + arrays_dict = {} + if matrix.format in ('csc', 'csr', 'bsr'): + arrays_dict.update(indices=matrix.indices, indptr=matrix.indptr) + elif matrix.format == 'dia': + arrays_dict.update(offsets=matrix.offsets) + elif matrix.format == 'coo': + arrays_dict.update(row=matrix.row, col=matrix.col) + else: + msg = f'Save is not implemented for sparse matrix of format {matrix.format}.' + raise NotImplementedError(msg) + arrays_dict.update( + format=matrix.format.encode('ascii'), + shape=matrix.shape, + data=matrix.data + ) + if isinstance(matrix, sp.sparse.sparray): + arrays_dict.update(_is_array=True) + if compressed: + np.savez_compressed(file, **arrays_dict) + else: + np.savez(file, **arrays_dict) + + +def load_npz(file): + """ Load a sparse array/matrix from a file using ``.npz`` format. + + Parameters + ---------- + file : str or file-like object + Either the file name (string) or an open file (file-like object) + where the data will be loaded. + + Returns + ------- + result : csc_array, csr_array, bsr_array, dia_array or coo_array + A sparse array/matrix containing the loaded data. + + Raises + ------ + OSError + If the input file does not exist or cannot be read. + + See Also + -------- + scipy.sparse.save_npz: Save a sparse array/matrix to a file using ``.npz`` format. + numpy.load: Load several arrays from a ``.npz`` archive. + + Examples + -------- + Store sparse array/matrix to disk, and load it again: + + >>> import numpy as np + >>> import scipy as sp + >>> sparse_array = sp.sparse.csc_array([[0, 0, 3], [4, 0, 0]]) + >>> sparse_array + <2x3 sparse array of type '' + with 2 stored elements in Compressed Sparse Column format> + >>> sparse_array.toarray() + array([[0, 0, 3], + [4, 0, 0]], dtype=int64) + + >>> sp.sparse.save_npz('/tmp/sparse_array.npz', sparse_array) + >>> sparse_array = sp.sparse.load_npz('/tmp/sparse_array.npz') + + >>> sparse_array + <2x3 sparse array of type '' + with 2 stored elements in Compressed Sparse Column format> + >>> sparse_array.toarray() + array([[0, 0, 3], + [4, 0, 0]], dtype=int64) + + In this example we force the result to be csr_array from csr_matrix + >>> sparse_matrix = sp.sparse.csc_matrix([[0, 0, 3], [4, 0, 0]]) + >>> sp.sparse.save_npz('/tmp/sparse_matrix.npz', sparse_matrix) + >>> tmp = sp.sparse.load_npz('/tmp/sparse_matrix.npz') + >>> sparse_array = sp.sparse.csr_array(tmp) + """ + with np.load(file, **PICKLE_KWARGS) as loaded: + sparse_format = loaded.get('format') + if sparse_format is None: + raise ValueError(f'The file {file} does not contain ' + f'a sparse array or matrix.') + sparse_format = sparse_format.item() + + if not isinstance(sparse_format, str): + # Play safe with Python 2 vs 3 backward compatibility; + # files saved with SciPy < 1.0.0 may contain unicode or bytes. + sparse_format = sparse_format.decode('ascii') + + if loaded.get('_is_array'): + sparse_type = sparse_format + '_array' + else: + sparse_type = sparse_format + '_matrix' + + try: + cls = getattr(sp.sparse, f'{sparse_type}') + except AttributeError as e: + raise ValueError(f'Unknown format "{sparse_type}"') from e + + if sparse_format in ('csc', 'csr', 'bsr'): + return cls((loaded['data'], loaded['indices'], loaded['indptr']), + shape=loaded['shape']) + elif sparse_format == 'dia': + return cls((loaded['data'], loaded['offsets']), + shape=loaded['shape']) + elif sparse_format == 'coo': + return cls((loaded['data'], (loaded['row'], loaded['col'])), + shape=loaded['shape']) + else: + raise NotImplementedError(f'Load is not implemented for ' + f'sparse matrix of format {sparse_format}.') diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_spfuncs.py b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_spfuncs.py new file mode 100644 index 0000000000000000000000000000000000000000..8e9b0abcede6387e74538baf839a303c6cc1b6be --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/_spfuncs.py @@ -0,0 +1,76 @@ +""" Functions that operate on sparse matrices +""" + +__all__ = ['count_blocks','estimate_blocksize'] + +from ._base import issparse +from ._csr import csr_array +from ._sparsetools import csr_count_blocks + + +def estimate_blocksize(A,efficiency=0.7): + """Attempt to determine the blocksize of a sparse matrix + + Returns a blocksize=(r,c) such that + - A.nnz / A.tobsr( (r,c) ).nnz > efficiency + """ + if not (issparse(A) and A.format in ("csc", "csr")): + A = csr_array(A) + + if A.nnz == 0: + return (1,1) + + if not 0 < efficiency < 1.0: + raise ValueError('efficiency must satisfy 0.0 < efficiency < 1.0') + + high_efficiency = (1.0 + efficiency) / 2.0 + nnz = float(A.nnz) + M,N = A.shape + + if M % 2 == 0 and N % 2 == 0: + e22 = nnz / (4 * count_blocks(A,(2,2))) + else: + e22 = 0.0 + + if M % 3 == 0 and N % 3 == 0: + e33 = nnz / (9 * count_blocks(A,(3,3))) + else: + e33 = 0.0 + + if e22 > high_efficiency and e33 > high_efficiency: + e66 = nnz / (36 * count_blocks(A,(6,6))) + if e66 > efficiency: + return (6,6) + else: + return (3,3) + else: + if M % 4 == 0 and N % 4 == 0: + e44 = nnz / (16 * count_blocks(A,(4,4))) + else: + e44 = 0.0 + + if e44 > efficiency: + return (4,4) + elif e33 > efficiency: + return (3,3) + elif e22 > efficiency: + return (2,2) + else: + return (1,1) + + +def count_blocks(A,blocksize): + """For a given blocksize=(r,c) count the number of occupied + blocks in a sparse matrix A + """ + r,c = blocksize + if r < 1 or c < 1: + raise ValueError('r and c must be positive') + + if issparse(A): + if A.format == "csr": + M,N = A.shape + return csr_count_blocks(M,N,r,c,A.indptr,A.indices) + elif A.format == "csc": + return count_blocks(A.T,(c,r)) + return count_blocks(csr_array(A),blocksize) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/base.py b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/base.py new file mode 100644 index 0000000000000000000000000000000000000000..d0a427e4570e07cc71e9e45bf98c7cf61798125b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/base.py @@ -0,0 +1,33 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.sparse` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'MAXPRINT', + 'SparseEfficiencyWarning', + 'SparseFormatWarning', + 'SparseWarning', + 'asmatrix', + 'check_reshape_kwargs', + 'check_shape', + 'get_sum_dtype', + 'isdense', + 'isscalarlike', + 'issparse', + 'isspmatrix', + 'spmatrix', + 'validateaxis', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="sparse", module="base", + private_modules=["_base"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/bsr.py b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/bsr.py new file mode 100644 index 0000000000000000000000000000000000000000..c686301a78fc3e2221600eb06035a5cb12898cdb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/bsr.py @@ -0,0 +1,36 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.sparse` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'bsr_matmat', + 'bsr_matrix', + 'bsr_matvec', + 'bsr_matvecs', + 'bsr_sort_indices', + 'bsr_tocsr', + 'bsr_transpose', + 'check_shape', + 'csr_matmat_maxnnz', + 'getdata', + 'getdtype', + 'isshape', + 'isspmatrix_bsr', + 'spmatrix', + 'to_native', + 'upcast', + 'warn', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="sparse", module="bsr", + private_modules=["_bsr"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/compressed.py b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/compressed.py new file mode 100644 index 0000000000000000000000000000000000000000..e6dc8a73e5ab527cfe0b73d558dae25047cfb98b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/compressed.py @@ -0,0 +1,43 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.sparse` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'IndexMixin', + 'SparseEfficiencyWarning', + 'check_shape', + 'csr_column_index1', + 'csr_column_index2', + 'csr_row_index', + 'csr_row_slice', + 'csr_sample_offsets', + 'csr_sample_values', + 'csr_todense', + 'downcast_intp_index', + 'get_csr_submatrix', + 'get_sum_dtype', + 'getdtype', + 'is_pydata_spmatrix', + 'isdense', + 'isintlike', + 'isscalarlike', + 'isshape', + 'operator', + 'to_native', + 'upcast', + 'upcast_char', + 'warn', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="sparse", module="compressed", + private_modules=["_compressed"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/construct.py b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/construct.py new file mode 100644 index 0000000000000000000000000000000000000000..c3d34d2fd38887877980727bceaaa215129bf283 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/construct.py @@ -0,0 +1,44 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.sparse` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'block_diag', + 'bmat', + 'bsr_matrix', + 'check_random_state', + 'coo_matrix', + 'csc_matrix', + 'csr_hstack', + 'csr_matrix', + 'dia_matrix', + 'diags', + 'eye', + 'get_index_dtype', + 'hstack', + 'identity', + 'isscalarlike', + 'issparse', + 'kron', + 'kronsum', + 'numbers', + 'rand', + 'random', + 'rng_integers', + 'spdiags', + 'upcast', + 'vstack', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="sparse", module="construct", + private_modules=["_construct"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/coo.py b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/coo.py new file mode 100644 index 0000000000000000000000000000000000000000..bda2da3d09a676ab79739331a21ba26102bb90ae --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/coo.py @@ -0,0 +1,37 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.sparse` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'SparseEfficiencyWarning', + 'check_reshape_kwargs', + 'check_shape', + 'coo_matrix', + 'coo_matvec', + 'coo_tocsr', + 'coo_todense', + 'downcast_intp_index', + 'getdata', + 'getdtype', + 'isshape', + 'isspmatrix_coo', + 'operator', + 'spmatrix', + 'to_native', + 'upcast', + 'upcast_char', + 'warn', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="sparse", module="coo", + private_modules=["_coo"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/csc.py b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/csc.py new file mode 100644 index 0000000000000000000000000000000000000000..d140b841e0724155f8602a4215836e2c8a7fad72 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/csc.py @@ -0,0 +1,25 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.sparse` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'csc_matrix', + 'csc_tocsr', + 'expandptr', + 'isspmatrix_csc', + 'spmatrix', + 'upcast', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="sparse", module="csc", + private_modules=["_csc"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/csr.py b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/csr.py new file mode 100644 index 0000000000000000000000000000000000000000..86bb1e072ebe4480e9dcb01f2d36f7387872b898 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/csr.py @@ -0,0 +1,27 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.sparse` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'csr_count_blocks', + 'csr_matrix', + 'csr_tobsr', + 'csr_tocsc', + 'get_csr_submatrix', + 'isspmatrix_csr', + 'spmatrix', + 'upcast', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="sparse", module="csr", + private_modules=["_csr"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/data.py b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/data.py new file mode 100644 index 0000000000000000000000000000000000000000..a9958bcda6dd35ac0779514d79b7f1c494c1b01a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/data.py @@ -0,0 +1,23 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.sparse` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'isscalarlike', + 'name', + 'npfunc', + 'validateaxis', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="sparse", module="data", + private_modules=["_data"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/dia.py b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/dia.py new file mode 100644 index 0000000000000000000000000000000000000000..f79abd39f114b23df8ceb6eafb7fcc1c07218dcb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/dia.py @@ -0,0 +1,29 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.sparse` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'check_shape', + 'dia_matrix', + 'dia_matvec', + 'get_sum_dtype', + 'getdtype', + 'isshape', + 'isspmatrix_dia', + 'spmatrix', + 'upcast_char', + 'validateaxis', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="sparse", module="dia", + private_modules=["_dia"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/dok.py b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/dok.py new file mode 100644 index 0000000000000000000000000000000000000000..847824456eaa3145d5ecb078e30251875168775b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/dok.py @@ -0,0 +1,32 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.sparse` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'IndexMixin', + 'check_shape', + 'dok_matrix', + 'getdtype', + 'isdense', + 'isintlike', + 'isscalarlike', + 'isshape', + 'isspmatrix_dok', + 'itertools', + 'spmatrix', + 'upcast', + 'upcast_scalar', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="sparse", module="dok", + private_modules=["_dok"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/extract.py b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/extract.py new file mode 100644 index 0000000000000000000000000000000000000000..be5e161b6f99e57e2b2a6b3d4f1ef6427c07658d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/extract.py @@ -0,0 +1,23 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.sparse` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'coo_matrix', + 'find', + 'tril', + 'triu', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="sparse", module="extract", + private_modules=["_extract"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/lil.py b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/lil.py new file mode 100644 index 0000000000000000000000000000000000000000..31e5f20e4887c4e163aa2d807c89fe0768e3afb0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/lil.py @@ -0,0 +1,31 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.sparse` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'INT_TYPES', + 'IndexMixin', + 'bisect_left', + 'check_reshape_kwargs', + 'check_shape', + 'getdtype', + 'isscalarlike', + 'isshape', + 'isspmatrix_lil', + 'lil_matrix', + 'spmatrix', + 'upcast_scalar', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="sparse", module="lil", + private_modules=["_lil"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/sparsetools.py b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/sparsetools.py new file mode 100644 index 0000000000000000000000000000000000000000..47ac80adae7145a6192f9fb9b225a1762ff830ce --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/sparsetools.py @@ -0,0 +1,98 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.sparse` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'bsr_diagonal', + 'bsr_eldiv_bsr', + 'bsr_elmul_bsr', + 'bsr_ge_bsr', + 'bsr_gt_bsr', + 'bsr_le_bsr', + 'bsr_lt_bsr', + 'bsr_matmat', + 'bsr_matvec', + 'bsr_matvecs', + 'bsr_maximum_bsr', + 'bsr_minimum_bsr', + 'bsr_minus_bsr', + 'bsr_ne_bsr', + 'bsr_plus_bsr', + 'bsr_scale_columns', + 'bsr_scale_rows', + 'bsr_sort_indices', + 'bsr_tocsr', + 'bsr_transpose', + 'coo_matvec', + 'coo_tocsr', + 'coo_todense', + 'cs_graph_components', + 'csc_diagonal', + 'csc_eldiv_csc', + 'csc_elmul_csc', + 'csc_ge_csc', + 'csc_gt_csc', + 'csc_le_csc', + 'csc_lt_csc', + 'csc_matmat', + 'csc_matmat_maxnnz', + 'csc_matvec', + 'csc_matvecs', + 'csc_maximum_csc', + 'csc_minimum_csc', + 'csc_minus_csc', + 'csc_ne_csc', + 'csc_plus_csc', + 'csc_tocsr', + 'csr_column_index1', + 'csr_column_index2', + 'csr_count_blocks', + 'csr_diagonal', + 'csr_eldiv_csr', + 'csr_eliminate_zeros', + 'csr_elmul_csr', + 'csr_ge_csr', + 'csr_gt_csr', + 'csr_has_canonical_format', + 'csr_has_sorted_indices', + 'csr_hstack', + 'csr_le_csr', + 'csr_lt_csr', + 'csr_matmat', + 'csr_matmat_maxnnz', + 'csr_matvec', + 'csr_matvecs', + 'csr_maximum_csr', + 'csr_minimum_csr', + 'csr_minus_csr', + 'csr_ne_csr', + 'csr_plus_csr', + 'csr_row_index', + 'csr_row_slice', + 'csr_sample_offsets', + 'csr_sample_values', + 'csr_scale_columns', + 'csr_scale_rows', + 'csr_sort_indices', + 'csr_sum_duplicates', + 'csr_tobsr', + 'csr_tocsc', + 'csr_todense', + 'dia_matvec', + 'expandptr', + 'get_csr_submatrix', + 'test_throw_error', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="sparse", module="sparsetools", + private_modules=["_sparsetools"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/spfuncs.py b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/spfuncs.py new file mode 100644 index 0000000000000000000000000000000000000000..b005a9b7c56b82f0c902c26664607b237d808f68 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/spfuncs.py @@ -0,0 +1,22 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.sparse` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'csr_count_blocks', + 'estimate_blocksize', + 'count_blocks' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="sparse", module="spfuncs", + private_modules=["_spfuncs"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/sputils.py b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/sputils.py new file mode 100644 index 0000000000000000000000000000000000000000..bdacb42dd0fb23b956fb95f2eea913cfb933d029 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/sputils.py @@ -0,0 +1,44 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.sparse` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'asmatrix', + 'check_reshape_kwargs', + 'check_shape', + 'downcast_intp_index', + 'get_index_dtype', + 'get_sum_dtype', + 'getdata', + 'getdtype', + 'is_pydata_spmatrix', + 'isdense', + 'isintlike', + 'ismatrix', + 'isscalarlike', + 'issequence', + 'isshape', + 'matrix', + 'operator', + 'prod', + 'supported_dtypes', + 'sys', + 'to_native', + 'upcast', + 'upcast_char', + 'upcast_scalar', + 'validateaxis', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="sparse", module="sputils", + private_modules=["_sputils"], all=__all__, + attribute=name) diff --git a/llmeval-env/lib/python3.10/site-packages/scipy/sparse/tests/test_matrix_io.py b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/tests/test_matrix_io.py new file mode 100644 index 0000000000000000000000000000000000000000..90b4ea64a8928073eb5dd3f1b2752379f57327d9 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/scipy/sparse/tests/test_matrix_io.py @@ -0,0 +1,109 @@ +import os +import numpy as np +import tempfile + +from pytest import raises as assert_raises +from numpy.testing import assert_equal, assert_ + +from scipy.sparse import (sparray, csc_matrix, csr_matrix, bsr_matrix, dia_matrix, + coo_matrix, dok_matrix, csr_array, save_npz, load_npz) + + +DATA_DIR = os.path.join(os.path.dirname(__file__), 'data') + + +def _save_and_load(matrix): + fd, tmpfile = tempfile.mkstemp(suffix='.npz') + os.close(fd) + try: + save_npz(tmpfile, matrix) + loaded_matrix = load_npz(tmpfile) + finally: + os.remove(tmpfile) + return loaded_matrix + +def _check_save_and_load(dense_matrix): + for matrix_class in [csc_matrix, csr_matrix, bsr_matrix, dia_matrix, coo_matrix]: + matrix = matrix_class(dense_matrix) + loaded_matrix = _save_and_load(matrix) + assert_(type(loaded_matrix) is matrix_class) + assert_(loaded_matrix.shape == dense_matrix.shape) + assert_(loaded_matrix.dtype == dense_matrix.dtype) + assert_equal(loaded_matrix.toarray(), dense_matrix) + +def test_save_and_load_random(): + N = 10 + np.random.seed(0) + dense_matrix = np.random.random((N, N)) + dense_matrix[dense_matrix > 0.7] = 0 + _check_save_and_load(dense_matrix) + +def test_save_and_load_empty(): + dense_matrix = np.zeros((4,6)) + _check_save_and_load(dense_matrix) + +def test_save_and_load_one_entry(): + dense_matrix = np.zeros((4,6)) + dense_matrix[1,2] = 1 + _check_save_and_load(dense_matrix) + +def test_sparray_vs_spmatrix(): + #save/load matrix + fd, tmpfile = tempfile.mkstemp(suffix='.npz') + os.close(fd) + try: + save_npz(tmpfile, csr_matrix([[1.2, 0, 0.9], [0, 0.3, 0]])) + loaded_matrix = load_npz(tmpfile) + finally: + os.remove(tmpfile) + + #save/load array + fd, tmpfile = tempfile.mkstemp(suffix='.npz') + os.close(fd) + try: + save_npz(tmpfile, csr_array([[1.2, 0, 0.9], [0, 0.3, 0]])) + loaded_array = load_npz(tmpfile) + finally: + os.remove(tmpfile) + + assert not isinstance(loaded_matrix, sparray) + assert isinstance(loaded_array, sparray) + assert_(loaded_matrix.dtype == loaded_array.dtype) + assert_equal(loaded_matrix.toarray(), loaded_array.toarray()) + +def test_malicious_load(): + class Executor: + def __reduce__(self): + return (assert_, (False, 'unexpected code execution')) + + fd, tmpfile = tempfile.mkstemp(suffix='.npz') + os.close(fd) + try: + np.savez(tmpfile, format=Executor()) + + # Should raise a ValueError, not execute code + assert_raises(ValueError, load_npz, tmpfile) + finally: + os.remove(tmpfile) + + +def test_py23_compatibility(): + # Try loading files saved on Python 2 and Python 3. They are not + # the same, since files saved with SciPy versions < 1.0.0 may + # contain unicode. + + a = load_npz(os.path.join(DATA_DIR, 'csc_py2.npz')) + b = load_npz(os.path.join(DATA_DIR, 'csc_py3.npz')) + c = csc_matrix([[0]]) + + assert_equal(a.toarray(), c.toarray()) + assert_equal(b.toarray(), c.toarray()) + +def test_implemented_error(): + # Attempts to save an unsupported type and checks that an + # NotImplementedError is raised. + + x = dok_matrix((2,3)) + x[0,1] = 1 + + assert_raises(NotImplementedError, save_npz, 'x.npz', x)