diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__init__.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ec3dd05726c906ec0d887b72e1de8dd28de00299 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__init__.py @@ -0,0 +1,324 @@ +""" +===================================== +Sparse matrices (:mod:`scipy.sparse`) +===================================== + +.. currentmodule:: scipy.sparse + +.. toctree:: + :hidden: + + sparse.csgraph + sparse.linalg + +SciPy 2-D sparse array package for numeric data. + +.. note:: + + This package is switching to an array interface, compatible with + NumPy arrays, from the older matrix interface. We recommend that + you use the array objects (`bsr_array`, `coo_array`, etc.) for + all new work. + + When using the array interface, please note that: + + - ``x * y`` no longer performs matrix multiplication, but + element-wise multiplication (just like with NumPy arrays). To + make code work with both arrays and matrices, use ``x @ y`` for + matrix multiplication. + - Operations such as `sum`, that used to produce dense matrices, now + produce arrays, whose multiplication behavior differs similarly. + - Sparse arrays currently must be two-dimensional. This also means + that all *slicing* operations on these objects must produce + two-dimensional results, or they will result in an error. This + will be addressed in a future version. + + The construction utilities (`eye`, `kron`, `random`, `diags`, etc.) + have not yet been ported, but their results can be wrapped into arrays:: + + A = csr_array(eye(3)) + +Contents +======== + +Sparse array classes +-------------------- + +.. autosummary:: + :toctree: generated/ + + bsr_array - Block Sparse Row array + coo_array - A sparse array in COOrdinate format + csc_array - Compressed Sparse Column array + csr_array - Compressed Sparse Row array + dia_array - Sparse array with DIAgonal storage + dok_array - Dictionary Of Keys based sparse array + lil_array - Row-based list of lists sparse array + sparray - Sparse array base class + +Sparse matrix classes +--------------------- + +.. autosummary:: + :toctree: generated/ + + bsr_matrix - Block Sparse Row matrix + coo_matrix - A sparse matrix in COOrdinate format + csc_matrix - Compressed Sparse Column matrix + csr_matrix - Compressed Sparse Row matrix + dia_matrix - Sparse matrix with DIAgonal storage + dok_matrix - Dictionary Of Keys based sparse matrix + lil_matrix - Row-based list of lists sparse matrix + spmatrix - Sparse matrix base class + +Functions +--------- + +Building sparse arrays: + +.. autosummary:: + :toctree: generated/ + + diags_array - Return a sparse array from diagonals + eye_array - Sparse MxN array whose k-th diagonal is all ones + random_array - Random values in a given shape array + block_array - Build a sparse array from sub-blocks + +Building sparse matrices: + +.. autosummary:: + :toctree: generated/ + + eye - Sparse MxN matrix whose k-th diagonal is all ones + identity - Identity matrix in sparse matrix format + diags - Return a sparse matrix from diagonals + spdiags - Return a sparse matrix from diagonals + bmat - Build a sparse matrix from sparse sub-blocks + random - Random values in a given shape matrix + rand - Random values in a given shape matrix (old interface) + +Building larger structures from smaller (array or matrix) + +.. autosummary:: + :toctree: generated/ + + kron - kronecker product of two sparse matrices + kronsum - kronecker sum of sparse matrices + block_diag - Build a block diagonal sparse matrix + tril - Lower triangular portion of a matrix in sparse format + triu - Upper triangular portion of a matrix in sparse format + hstack - Stack sparse matrices horizontally (column wise) + vstack - Stack sparse matrices vertically (row wise) + +Save and load sparse matrices: + +.. autosummary:: + :toctree: generated/ + + save_npz - Save a sparse matrix/array to a file using ``.npz`` format. + load_npz - Load a sparse matrix/array from a file using ``.npz`` format. + +Sparse tools: + +.. autosummary:: + :toctree: generated/ + + find + +Identifying sparse arrays: + +- use `isinstance(A, sp.sparse.sparray)` to check whether an array or matrix. +- use `A.format == 'csr'` to check the sparse format + +Identifying sparse matrices: + +.. autosummary:: + :toctree: generated/ + + issparse + isspmatrix + isspmatrix_csc + isspmatrix_csr + isspmatrix_bsr + isspmatrix_lil + isspmatrix_dok + isspmatrix_coo + isspmatrix_dia + +Submodules +---------- + +.. autosummary:: + + csgraph - Compressed sparse graph routines + linalg - sparse linear algebra routines + +Exceptions +---------- + +.. autosummary:: + :toctree: generated/ + + SparseEfficiencyWarning + SparseWarning + + +Usage information +================= + +There are seven available sparse array types: + + 1. `csc_array`: Compressed Sparse Column format + 2. `csr_array`: Compressed Sparse Row format + 3. `bsr_array`: Block Sparse Row format + 4. `lil_array`: List of Lists format + 5. `dok_array`: Dictionary of Keys format + 6. `coo_array`: COOrdinate format (aka IJV, triplet format) + 7. `dia_array`: DIAgonal format + +To construct an array efficiently, use either `dok_array` or `lil_array`. +The `lil_array` class supports basic slicing and fancy indexing with a +similar syntax to NumPy arrays. As illustrated below, the COO format +may also be used to efficiently construct arrays. Despite their +similarity to NumPy arrays, it is **strongly discouraged** to use NumPy +functions directly on these arrays because NumPy may not properly convert +them for computations, leading to unexpected (and incorrect) results. If you +do want to apply a NumPy function to these arrays, first check if SciPy has +its own implementation for the given sparse array class, or **convert the +sparse array to a NumPy array** (e.g., using the ``toarray`` method of the +class) first before applying the method. + +To perform manipulations such as multiplication or inversion, first +convert the array to either CSC or CSR format. The `lil_array` format is +row-based, so conversion to CSR is efficient, whereas conversion to CSC +is less so. + +All conversions among the CSR, CSC, and COO formats are efficient, +linear-time operations. + +Matrix vector product +--------------------- +To do a vector product between a sparse array and a vector simply use +the array ``dot`` method, as described in its docstring: + +>>> import numpy as np +>>> from scipy.sparse import csr_array +>>> A = csr_array([[1, 2, 0], [0, 0, 3], [4, 0, 5]]) +>>> v = np.array([1, 0, -1]) +>>> A.dot(v) +array([ 1, -3, -1], dtype=int64) + +.. warning:: As of NumPy 1.7, ``np.dot`` is not aware of sparse arrays, + therefore using it will result on unexpected results or errors. + The corresponding dense array should be obtained first instead: + + >>> np.dot(A.toarray(), v) + array([ 1, -3, -1], dtype=int64) + + but then all the performance advantages would be lost. + +The CSR format is especially suitable for fast matrix vector products. + +Example 1 +--------- +Construct a 1000x1000 `lil_array` and add some values to it: + +>>> from scipy.sparse import lil_array +>>> from scipy.sparse.linalg import spsolve +>>> from numpy.linalg import solve, norm +>>> from numpy.random import rand + +>>> A = lil_array((1000, 1000)) +>>> A[0, :100] = rand(100) +>>> A[1, 100:200] = A[0, :100] +>>> A.setdiag(rand(1000)) + +Now convert it to CSR format and solve A x = b for x: + +>>> A = A.tocsr() +>>> b = rand(1000) +>>> x = spsolve(A, b) + +Convert it to a dense array and solve, and check that the result +is the same: + +>>> x_ = solve(A.toarray(), b) + +Now we can compute norm of the error with: + +>>> err = norm(x-x_) +>>> err < 1e-10 +True + +It should be small :) + + +Example 2 +--------- + +Construct an array in COO format: + +>>> from scipy import sparse +>>> from numpy import array +>>> I = array([0,3,1,0]) +>>> J = array([0,3,1,2]) +>>> V = array([4,5,7,9]) +>>> A = sparse.coo_array((V,(I,J)),shape=(4,4)) + +Notice that the indices do not need to be sorted. + +Duplicate (i,j) entries are summed when converting to CSR or CSC. + +>>> I = array([0,0,1,3,1,0,0]) +>>> J = array([0,2,1,3,1,0,0]) +>>> V = array([1,1,1,1,1,1,1]) +>>> B = sparse.coo_array((V,(I,J)),shape=(4,4)).tocsr() + +This is useful for constructing finite-element stiffness and mass matrices. + +Further details +--------------- + +CSR column indices are not necessarily sorted. Likewise for CSC row +indices. Use the ``.sorted_indices()`` and ``.sort_indices()`` methods when +sorted indices are required (e.g., when passing data to other libraries). + +""" + +# Original code by Travis Oliphant. +# Modified and extended by Ed Schofield, Robert Cimrman, +# Nathan Bell, and Jake Vanderplas. + +import warnings as _warnings + +from ._base import * +from ._csr import * +from ._csc import * +from ._lil import * +from ._dok import * +from ._coo import * +from ._dia import * +from ._bsr import * +from ._construct import * +from ._extract import * +from ._matrix import spmatrix +from ._matrix_io import * + +# For backward compatibility with v0.19. +from . import csgraph + +# Deprecated namespaces, to be removed in v2.0.0 +from . import ( + base, bsr, compressed, construct, coo, csc, csr, data, dia, dok, extract, + lil, sparsetools, sputils +) + +__all__ = [s for s in dir() if not s.startswith('_')] + +# Filter PendingDeprecationWarning for np.matrix introduced with numpy 1.15 +msg = 'the matrix subclass is not the recommended way' +_warnings.filterwarnings('ignore', message=msg) + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_base.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..edcb488815987de316f283c5d9f1cf9eb6c09f61 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_base.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_bsr.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_bsr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2a28ef8eb98ad6724c87bbdc8890e1f7715411b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_bsr.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_compressed.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_compressed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..282269b35b68b05dca1d0f6e470fa3347acafb3b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_compressed.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_construct.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_construct.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a362e1116df27a37f7bb1bbec5f025ad18de08f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_construct.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_coo.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_coo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40a53dbe13e375be38bca1affd2872e6855eac47 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_coo.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_csc.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_csc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a60c1247187bd942e314265dcaeacc29aae86528 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_csc.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_csr.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_csr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51331b48714afe697e0bfcb95bf084e87841629c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_csr.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_data.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_data.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3452ae8bca467ee5720a654fed2b1bb8356d9673 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_data.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_dok.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_dok.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00987de42517aa06dce7d25864458c71762fac18 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_dok.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_extract.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_extract.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c9afff27158ff1d609a3971d24cf1722385004a1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_extract.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_index.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_index.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3a903b01751ea96ebe9afb7dfae9b31141eee9d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_index.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_lil.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_lil.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96c226f7d95c61dbbdafb184de3953482d4a870a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_lil.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_matrix.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_matrix.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da71095bee1c837c216ef4c5feceb256fb9c0a88 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_matrix.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_matrix_io.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_matrix_io.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0300844391151e962d0dbb39b32c218e757c76f9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_matrix_io.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_spfuncs.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_spfuncs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..66b113aaf3b3b92c1e3e8489dc35d9890b77e719 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_spfuncs.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/compressed.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/compressed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7333c7956686b625275b937c8eb4c1ea6d2e744d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/compressed.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/construct.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/construct.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..270f5871ce4d12580b393eb337698c04401935f9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/construct.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/coo.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/coo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d22c7d39a09b6c81979fba253328896c6456c7c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/coo.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/csc.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/csc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0633ea33ffe93e7624b12d2cc5602306aa874a62 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/csc.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/data.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/data.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f70ae89526aee4e26ca97e4d3a0206ef3193c8ed Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/data.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/dia.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/dia.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfe7674913e7a541ce3747d7498c2397e3d68ca3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/dia.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/dok.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/dok.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ac7e4ac6c216c8199ae60795946d14892c88fcf Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/dok.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/lil.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/lil.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..254e7e77b1c0388ff540bbe4987f34161ffd97ab Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/lil.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/sparsetools.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/sparsetools.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b43f6e007cfe6f1fa7990b28ee1e0ab4f55a45e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/sparsetools.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/spfuncs.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/spfuncs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95d76a77e58d5986285518fcbdafb9b4edf1cdee Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/spfuncs.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_bsr.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_bsr.py new file mode 100644 index 0000000000000000000000000000000000000000..8702fcdc9b4583e2c73912fae6153938e1dd026b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_bsr.py @@ -0,0 +1,855 @@ +"""Compressed Block Sparse Row format""" + +__docformat__ = "restructuredtext en" + +__all__ = ['bsr_array', 'bsr_matrix', 'isspmatrix_bsr'] + +from warnings import warn + +import numpy as np + +from scipy._lib._util import copy_if_needed +from ._matrix import spmatrix +from ._data import _data_matrix, _minmax_mixin +from ._compressed import _cs_matrix +from ._base import issparse, _formats, _spbase, sparray +from ._sputils import (isshape, getdtype, getdata, to_native, upcast, + check_shape) +from . import _sparsetools +from ._sparsetools import (bsr_matvec, bsr_matvecs, csr_matmat_maxnnz, + bsr_matmat, bsr_transpose, bsr_sort_indices, + bsr_tocsr) + + +class _bsr_base(_cs_matrix, _minmax_mixin): + _format = 'bsr' + + def __init__(self, arg1, shape=None, dtype=None, copy=False, blocksize=None): + _data_matrix.__init__(self) + + if issparse(arg1): + if arg1.format == self.format and copy: + arg1 = arg1.copy() + else: + arg1 = arg1.tobsr(blocksize=blocksize) + self.indptr, self.indices, self.data, self._shape = ( + arg1.indptr, arg1.indices, arg1.data, arg1._shape + ) + + elif isinstance(arg1,tuple): + if isshape(arg1): + # it's a tuple of matrix dimensions (M,N) + self._shape = check_shape(arg1) + M,N = self.shape + # process blocksize + if blocksize is None: + blocksize = (1,1) + else: + if not isshape(blocksize): + raise ValueError('invalid blocksize=%s' % blocksize) + blocksize = tuple(blocksize) + self.data = np.zeros((0,) + blocksize, getdtype(dtype, default=float)) + + R,C = blocksize + if (M % R) != 0 or (N % C) != 0: + raise ValueError('shape must be multiple of blocksize') + + # Select index dtype large enough to pass array and + # scalar parameters to sparsetools + idx_dtype = self._get_index_dtype(maxval=max(M//R, N//C, R, C)) + self.indices = np.zeros(0, dtype=idx_dtype) + self.indptr = np.zeros(M//R + 1, dtype=idx_dtype) + + elif len(arg1) == 2: + # (data,(row,col)) format + coo = self._coo_container(arg1, dtype=dtype, shape=shape) + bsr = coo.tobsr(blocksize=blocksize) + self.indptr, self.indices, self.data, self._shape = ( + bsr.indptr, bsr.indices, bsr.data, bsr._shape + ) + + elif len(arg1) == 3: + # (data,indices,indptr) format + (data, indices, indptr) = arg1 + + # Select index dtype large enough to pass array and + # scalar parameters to sparsetools + maxval = 1 + if shape is not None: + maxval = max(shape) + if blocksize is not None: + maxval = max(maxval, max(blocksize)) + idx_dtype = self._get_index_dtype((indices, indptr), maxval=maxval, + check_contents=True) + if not copy: + copy = copy_if_needed + self.indices = np.array(indices, copy=copy, dtype=idx_dtype) + self.indptr = np.array(indptr, copy=copy, dtype=idx_dtype) + self.data = getdata(data, copy=copy, dtype=dtype) + if self.data.ndim != 3: + raise ValueError( + f'BSR data must be 3-dimensional, got shape={self.data.shape}' + ) + if blocksize is not None: + if not isshape(blocksize): + raise ValueError(f'invalid blocksize={blocksize}') + if tuple(blocksize) != self.data.shape[1:]: + raise ValueError('mismatching blocksize={} vs {}'.format( + blocksize, self.data.shape[1:])) + else: + raise ValueError('unrecognized bsr_array constructor usage') + else: + # must be dense + try: + arg1 = np.asarray(arg1) + except Exception as e: + raise ValueError("unrecognized form for" + " %s_matrix constructor" % self.format) from e + arg1 = self._coo_container( + arg1, dtype=dtype + ).tobsr(blocksize=blocksize) + self.indptr, self.indices, self.data, self._shape = ( + arg1.indptr, arg1.indices, arg1.data, arg1._shape + ) + + if shape is not None: + self._shape = check_shape(shape) + else: + if self.shape is None: + # shape not already set, try to infer dimensions + try: + M = len(self.indptr) - 1 + N = self.indices.max() + 1 + except Exception as e: + raise ValueError('unable to infer matrix dimensions') from e + else: + R,C = self.blocksize + self._shape = check_shape((M*R,N*C)) + + if self.shape is None: + if shape is None: + # TODO infer shape here + raise ValueError('need to infer shape') + else: + self._shape = check_shape(shape) + + if dtype is not None: + self.data = self.data.astype(dtype, copy=False) + + self.check_format(full_check=False) + + def check_format(self, full_check=True): + """Check whether the array/matrix respects the BSR format. + + Parameters + ---------- + full_check : bool, optional + If `True`, run rigorous check, scanning arrays for valid values. + Note that activating those check might copy arrays for casting, + modifying indices and index pointers' inplace. + If `False`, run basic checks on attributes. O(1) operations. + Default is `True`. + """ + M,N = self.shape + R,C = self.blocksize + + # index arrays should have integer data types + if self.indptr.dtype.kind != 'i': + warn(f"indptr array has non-integer dtype ({self.indptr.dtype.name})", + stacklevel=2) + if self.indices.dtype.kind != 'i': + warn(f"indices array has non-integer dtype ({self.indices.dtype.name})", + stacklevel=2) + + # check array shapes + if self.indices.ndim != 1 or self.indptr.ndim != 1: + raise ValueError("indices, and indptr should be 1-D") + if self.data.ndim != 3: + raise ValueError("data should be 3-D") + + # check index pointer + if (len(self.indptr) != M//R + 1): + raise ValueError("index pointer size (%d) should be (%d)" % + (len(self.indptr), M//R + 1)) + if (self.indptr[0] != 0): + raise ValueError("index pointer should start with 0") + + # check index and data arrays + if (len(self.indices) != len(self.data)): + raise ValueError("indices and data should have the same size") + if (self.indptr[-1] > len(self.indices)): + raise ValueError("Last value of index pointer should be less than " + "the size of index and data arrays") + + self.prune() + + if full_check: + # check format validity (more expensive) + if self.nnz > 0: + if self.indices.max() >= N//C: + raise ValueError("column index values must be < %d (now max %d)" + % (N//C, self.indices.max())) + if self.indices.min() < 0: + raise ValueError("column index values must be >= 0") + if np.diff(self.indptr).min() < 0: + raise ValueError("index pointer values must form a " + "non-decreasing sequence") + + idx_dtype = self._get_index_dtype((self.indices, self.indptr)) + self.indptr = np.asarray(self.indptr, dtype=idx_dtype) + self.indices = np.asarray(self.indices, dtype=idx_dtype) + self.data = to_native(self.data) + # if not self.has_sorted_indices(): + # warn('Indices were not in sorted order. Sorting indices.') + # self.sort_indices(check_first=False) + + @property + def blocksize(self) -> tuple: + """Block size of the matrix.""" + return self.data.shape[1:] + + def _getnnz(self, axis=None): + if axis is not None: + raise NotImplementedError("_getnnz over an axis is not implemented " + "for BSR format") + R,C = self.blocksize + return int(self.indptr[-1] * R * C) + + _getnnz.__doc__ = _spbase._getnnz.__doc__ + + def __repr__(self): + _, fmt = _formats[self.format] + sparse_cls = 'array' if isinstance(self, sparray) else 'matrix' + shape_str = 'x'.join(str(x) for x in self.shape) + blksz = 'x'.join(str(x) for x in self.blocksize) + return ( + f"<{shape_str} sparse {sparse_cls} of type '{self.dtype.type}'\n" + f"\twith {self.nnz} stored elements (blocksize = {blksz}) in {fmt} format>" + ) + + def diagonal(self, k=0): + rows, cols = self.shape + if k <= -rows or k >= cols: + return np.empty(0, dtype=self.data.dtype) + R, C = self.blocksize + y = np.zeros(min(rows + min(k, 0), cols - max(k, 0)), + dtype=upcast(self.dtype)) + _sparsetools.bsr_diagonal(k, rows // R, cols // C, R, C, + self.indptr, self.indices, + np.ravel(self.data), y) + return y + + diagonal.__doc__ = _spbase.diagonal.__doc__ + + ########################## + # NotImplemented methods # + ########################## + + def __getitem__(self,key): + raise NotImplementedError + + def __setitem__(self,key,val): + raise NotImplementedError + + ###################### + # Arithmetic methods # + ###################### + + def _add_dense(self, other): + return self.tocoo(copy=False)._add_dense(other) + + def _matmul_vector(self, other): + M,N = self.shape + R,C = self.blocksize + + result = np.zeros(self.shape[0], dtype=upcast(self.dtype, other.dtype)) + + bsr_matvec(M//R, N//C, R, C, + self.indptr, self.indices, self.data.ravel(), + other, result) + + return result + + def _matmul_multivector(self,other): + R,C = self.blocksize + M,N = self.shape + n_vecs = other.shape[1] # number of column vectors + + result = np.zeros((M,n_vecs), dtype=upcast(self.dtype,other.dtype)) + + bsr_matvecs(M//R, N//C, n_vecs, R, C, + self.indptr, self.indices, self.data.ravel(), + other.ravel(), result.ravel()) + + return result + + def _matmul_sparse(self, other): + M, K1 = self.shape + K2, N = other.shape + + R,n = self.blocksize + + # convert to this format + if other.format == "bsr": + C = other.blocksize[1] + else: + C = 1 + + if other.format == "csr" and n == 1: + other = other.tobsr(blocksize=(n,C), copy=False) # lightweight conversion + else: + other = other.tobsr(blocksize=(n,C)) + + idx_dtype = self._get_index_dtype((self.indptr, self.indices, + other.indptr, other.indices)) + + bnnz = csr_matmat_maxnnz(M//R, N//C, + self.indptr.astype(idx_dtype), + self.indices.astype(idx_dtype), + other.indptr.astype(idx_dtype), + other.indices.astype(idx_dtype)) + + idx_dtype = self._get_index_dtype((self.indptr, self.indices, + other.indptr, other.indices), + maxval=bnnz) + indptr = np.empty(self.indptr.shape, dtype=idx_dtype) + indices = np.empty(bnnz, dtype=idx_dtype) + data = np.empty(R*C*bnnz, dtype=upcast(self.dtype,other.dtype)) + + bsr_matmat(bnnz, M//R, N//C, R, C, n, + self.indptr.astype(idx_dtype), + self.indices.astype(idx_dtype), + np.ravel(self.data), + other.indptr.astype(idx_dtype), + other.indices.astype(idx_dtype), + np.ravel(other.data), + indptr, + indices, + data) + + data = data.reshape(-1,R,C) + + # TODO eliminate zeros + + return self._bsr_container( + (data, indices, indptr), shape=(M, N), blocksize=(R, C) + ) + + ###################### + # Conversion methods # + ###################### + + def tobsr(self, blocksize=None, copy=False): + """Convert this array/matrix into Block Sparse Row Format. + + With copy=False, the data/indices may be shared between this + array/matrix and the resultant bsr_array/bsr_matrix. + + If blocksize=(R, C) is provided, it will be used for determining + block size of the bsr_array/bsr_matrix. + """ + if blocksize not in [None, self.blocksize]: + return self.tocsr().tobsr(blocksize=blocksize) + if copy: + return self.copy() + else: + return self + + def tocsr(self, copy=False): + M, N = self.shape + R, C = self.blocksize + nnz = self.nnz + idx_dtype = self._get_index_dtype((self.indptr, self.indices), + maxval=max(nnz, N)) + indptr = np.empty(M + 1, dtype=idx_dtype) + indices = np.empty(nnz, dtype=idx_dtype) + data = np.empty(nnz, dtype=upcast(self.dtype)) + + bsr_tocsr(M // R, # n_brow + N // C, # n_bcol + R, C, + self.indptr.astype(idx_dtype, copy=False), + self.indices.astype(idx_dtype, copy=False), + self.data, + indptr, + indices, + data) + return self._csr_container((data, indices, indptr), shape=self.shape) + + tocsr.__doc__ = _spbase.tocsr.__doc__ + + def tocsc(self, copy=False): + return self.tocsr(copy=False).tocsc(copy=copy) + + tocsc.__doc__ = _spbase.tocsc.__doc__ + + def tocoo(self, copy=True): + """Convert this array/matrix to COOrdinate format. + + When copy=False the data array will be shared between + this array/matrix and the resultant coo_array/coo_matrix. + """ + + M,N = self.shape + R,C = self.blocksize + + indptr_diff = np.diff(self.indptr) + if indptr_diff.dtype.itemsize > np.dtype(np.intp).itemsize: + # Check for potential overflow + indptr_diff_limited = indptr_diff.astype(np.intp) + if np.any(indptr_diff_limited != indptr_diff): + raise ValueError("Matrix too big to convert") + indptr_diff = indptr_diff_limited + + idx_dtype = self._get_index_dtype(maxval=max(M, N)) + row = (R * np.arange(M//R, dtype=idx_dtype)).repeat(indptr_diff) + row = row.repeat(R*C).reshape(-1,R,C) + row += np.tile(np.arange(R, dtype=idx_dtype).reshape(-1,1), (1,C)) + row = row.reshape(-1) + + col = ((C * self.indices).astype(idx_dtype, copy=False) + .repeat(R*C).reshape(-1,R,C)) + col += np.tile(np.arange(C, dtype=idx_dtype), (R,1)) + col = col.reshape(-1) + + data = self.data.reshape(-1) + + if copy: + data = data.copy() + + return self._coo_container( + (data, (row, col)), shape=self.shape + ) + + def toarray(self, order=None, out=None): + return self.tocoo(copy=False).toarray(order=order, out=out) + + toarray.__doc__ = _spbase.toarray.__doc__ + + def transpose(self, axes=None, copy=False): + if axes is not None and axes != (1, 0): + raise ValueError("Sparse matrices do not support " + "an 'axes' parameter because swapping " + "dimensions is the only logical permutation.") + + R, C = self.blocksize + M, N = self.shape + NBLK = self.nnz//(R*C) + + if self.nnz == 0: + return self._bsr_container((N, M), blocksize=(C, R), + dtype=self.dtype, copy=copy) + + indptr = np.empty(N//C + 1, dtype=self.indptr.dtype) + indices = np.empty(NBLK, dtype=self.indices.dtype) + data = np.empty((NBLK, C, R), dtype=self.data.dtype) + + bsr_transpose(M//R, N//C, R, C, + self.indptr, self.indices, self.data.ravel(), + indptr, indices, data.ravel()) + + return self._bsr_container((data, indices, indptr), + shape=(N, M), copy=copy) + + transpose.__doc__ = _spbase.transpose.__doc__ + + ############################################################## + # methods that examine or modify the internal data structure # + ############################################################## + + def eliminate_zeros(self): + """Remove zero elements in-place.""" + + if not self.nnz: + return # nothing to do + + R,C = self.blocksize + M,N = self.shape + + mask = (self.data != 0).reshape(-1,R*C).sum(axis=1) # nonzero blocks + + nonzero_blocks = mask.nonzero()[0] + + self.data[:len(nonzero_blocks)] = self.data[nonzero_blocks] + + # modifies self.indptr and self.indices *in place* + _sparsetools.csr_eliminate_zeros(M//R, N//C, self.indptr, + self.indices, mask) + self.prune() + + def sum_duplicates(self): + """Eliminate duplicate array/matrix entries by adding them together + + The is an *in place* operation + """ + if self.has_canonical_format: + return + self.sort_indices() + R, C = self.blocksize + M, N = self.shape + + # port of _sparsetools.csr_sum_duplicates + n_row = M // R + nnz = 0 + row_end = 0 + for i in range(n_row): + jj = row_end + row_end = self.indptr[i+1] + while jj < row_end: + j = self.indices[jj] + x = self.data[jj] + jj += 1 + while jj < row_end and self.indices[jj] == j: + x += self.data[jj] + jj += 1 + self.indices[nnz] = j + self.data[nnz] = x + nnz += 1 + self.indptr[i+1] = nnz + + self.prune() # nnz may have changed + self.has_canonical_format = True + + def sort_indices(self): + """Sort the indices of this array/matrix *in place* + """ + if self.has_sorted_indices: + return + + R,C = self.blocksize + M,N = self.shape + + bsr_sort_indices(M//R, N//C, R, C, self.indptr, self.indices, self.data.ravel()) + + self.has_sorted_indices = True + + def prune(self): + """Remove empty space after all non-zero elements. + """ + + R,C = self.blocksize + M,N = self.shape + + if len(self.indptr) != M//R + 1: + raise ValueError("index pointer has invalid length") + + bnnz = self.indptr[-1] + + if len(self.indices) < bnnz: + raise ValueError("indices array has too few elements") + if len(self.data) < bnnz: + raise ValueError("data array has too few elements") + + self.data = self.data[:bnnz] + self.indices = self.indices[:bnnz] + + # utility functions + def _binopt(self, other, op, in_shape=None, out_shape=None): + """Apply the binary operation fn to two sparse matrices.""" + + # Ideally we'd take the GCDs of the blocksize dimensions + # and explode self and other to match. + other = self.__class__(other, blocksize=self.blocksize) + + # e.g. bsr_plus_bsr, etc. + fn = getattr(_sparsetools, self.format + op + self.format) + + R,C = self.blocksize + + max_bnnz = len(self.data) + len(other.data) + idx_dtype = self._get_index_dtype((self.indptr, self.indices, + other.indptr, other.indices), + maxval=max_bnnz) + indptr = np.empty(self.indptr.shape, dtype=idx_dtype) + indices = np.empty(max_bnnz, dtype=idx_dtype) + + bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_'] + if op in bool_ops: + data = np.empty(R*C*max_bnnz, dtype=np.bool_) + else: + data = np.empty(R*C*max_bnnz, dtype=upcast(self.dtype,other.dtype)) + + fn(self.shape[0]//R, self.shape[1]//C, R, C, + self.indptr.astype(idx_dtype), + self.indices.astype(idx_dtype), + self.data, + other.indptr.astype(idx_dtype), + other.indices.astype(idx_dtype), + np.ravel(other.data), + indptr, + indices, + data) + + actual_bnnz = indptr[-1] + indices = indices[:actual_bnnz] + data = data[:R*C*actual_bnnz] + + if actual_bnnz < max_bnnz/2: + indices = indices.copy() + data = data.copy() + + data = data.reshape(-1,R,C) + + return self.__class__((data, indices, indptr), shape=self.shape) + + # needed by _data_matrix + def _with_data(self,data,copy=True): + """Returns a matrix with the same sparsity structure as self, + but with different data. By default the structure arrays + (i.e. .indptr and .indices) are copied. + """ + if copy: + return self.__class__((data,self.indices.copy(),self.indptr.copy()), + shape=self.shape,dtype=data.dtype) + else: + return self.__class__((data,self.indices,self.indptr), + shape=self.shape,dtype=data.dtype) + +# # these functions are used by the parent class +# # to remove redundancy between bsc_matrix and bsr_matrix +# def _swap(self,x): +# """swap the members of x if this is a column-oriented matrix +# """ +# return (x[0],x[1]) + + +def isspmatrix_bsr(x): + """Is `x` of a bsr_matrix type? + + Parameters + ---------- + x + object to check for being a bsr matrix + + Returns + ------- + bool + True if `x` is a bsr matrix, False otherwise + + Examples + -------- + >>> from scipy.sparse import bsr_array, bsr_matrix, csr_matrix, isspmatrix_bsr + >>> isspmatrix_bsr(bsr_matrix([[5]])) + True + >>> isspmatrix_bsr(bsr_array([[5]])) + False + >>> isspmatrix_bsr(csr_matrix([[5]])) + False + """ + return isinstance(x, bsr_matrix) + + +# This namespace class separates array from matrix with isinstance +class bsr_array(_bsr_base, sparray): + """ + Block Sparse Row format sparse array. + + This can be instantiated in several ways: + bsr_array(D, [blocksize=(R,C)]) + where D is a 2-D ndarray. + + bsr_array(S, [blocksize=(R,C)]) + with another sparse array or matrix S (equivalent to S.tobsr()) + + bsr_array((M, N), [blocksize=(R,C), dtype]) + to construct an empty sparse array with shape (M, N) + dtype is optional, defaulting to dtype='d'. + + bsr_array((data, ij), [blocksize=(R,C), shape=(M, N)]) + where ``data`` and ``ij`` satisfy ``a[ij[0, k], ij[1, k]] = data[k]`` + + bsr_array((data, indices, indptr), [shape=(M, N)]) + is the standard BSR representation where the block column + indices for row i are stored in ``indices[indptr[i]:indptr[i+1]]`` + and their corresponding block values are stored in + ``data[ indptr[i]: indptr[i+1] ]``. If the shape parameter is not + supplied, the array dimensions are inferred from the index arrays. + + Attributes + ---------- + dtype : dtype + Data type of the array + shape : 2-tuple + Shape of the array + ndim : int + Number of dimensions (this is always 2) + nnz + size + data + BSR format data array of the array + indices + BSR format index array of the array + indptr + BSR format index pointer array of the array + blocksize + Block size + has_sorted_indices : bool + Whether indices are sorted + has_canonical_format : bool + T + + Notes + ----- + Sparse arrays can be used in arithmetic operations: they support + addition, subtraction, multiplication, division, and matrix power. + + **Summary of BSR format** + + The Block Sparse Row (BSR) format is very similar to the Compressed + Sparse Row (CSR) format. BSR is appropriate for sparse matrices with dense + sub matrices like the last example below. Such sparse block matrices often + arise in vector-valued finite element discretizations. In such cases, BSR is + considerably more efficient than CSR and CSC for many sparse arithmetic + operations. + + **Blocksize** + + The blocksize (R,C) must evenly divide the shape of the sparse array (M,N). + That is, R and C must satisfy the relationship ``M % R = 0`` and + ``N % C = 0``. + + If no blocksize is specified, a simple heuristic is applied to determine + an appropriate blocksize. + + **Canonical Format** + + In canonical format, there are no duplicate blocks and indices are sorted + per row. + + Examples + -------- + >>> import numpy as np + >>> from scipy.sparse import bsr_array + >>> bsr_array((3, 4), dtype=np.int8).toarray() + array([[0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]], dtype=int8) + + >>> row = np.array([0, 0, 1, 2, 2, 2]) + >>> col = np.array([0, 2, 2, 0, 1, 2]) + >>> data = np.array([1, 2, 3 ,4, 5, 6]) + >>> bsr_array((data, (row, col)), shape=(3, 3)).toarray() + array([[1, 0, 2], + [0, 0, 3], + [4, 5, 6]]) + + >>> indptr = np.array([0, 2, 3, 6]) + >>> indices = np.array([0, 2, 2, 0, 1, 2]) + >>> data = np.array([1, 2, 3, 4, 5, 6]).repeat(4).reshape(6, 2, 2) + >>> bsr_array((data,indices,indptr), shape=(6, 6)).toarray() + array([[1, 1, 0, 0, 2, 2], + [1, 1, 0, 0, 2, 2], + [0, 0, 0, 0, 3, 3], + [0, 0, 0, 0, 3, 3], + [4, 4, 5, 5, 6, 6], + [4, 4, 5, 5, 6, 6]]) + + """ + + +class bsr_matrix(spmatrix, _bsr_base): + """ + Block Sparse Row format sparse matrix. + + This can be instantiated in several ways: + bsr_matrix(D, [blocksize=(R,C)]) + where D is a 2-D ndarray. + + bsr_matrix(S, [blocksize=(R,C)]) + with another sparse array or matrix S (equivalent to S.tobsr()) + + bsr_matrix((M, N), [blocksize=(R,C), dtype]) + to construct an empty sparse matrix with shape (M, N) + dtype is optional, defaulting to dtype='d'. + + bsr_matrix((data, ij), [blocksize=(R,C), shape=(M, N)]) + where ``data`` and ``ij`` satisfy ``a[ij[0, k], ij[1, k]] = data[k]`` + + bsr_matrix((data, indices, indptr), [shape=(M, N)]) + is the standard BSR representation where the block column + indices for row i are stored in ``indices[indptr[i]:indptr[i+1]]`` + and their corresponding block values are stored in + ``data[ indptr[i]: indptr[i+1] ]``. If the shape parameter is not + supplied, the matrix dimensions are inferred from the index arrays. + + Attributes + ---------- + dtype : dtype + Data type of the matrix + shape : 2-tuple + Shape of the matrix + ndim : int + Number of dimensions (this is always 2) + nnz + size + data + BSR format data array of the matrix + indices + BSR format index array of the matrix + indptr + BSR format index pointer array of the matrix + blocksize + Block size + has_sorted_indices : bool + Whether indices are sorted + has_canonical_format : bool + T + + Notes + ----- + Sparse matrices can be used in arithmetic operations: they support + addition, subtraction, multiplication, division, and matrix power. + + **Summary of BSR format** + + The Block Sparse Row (BSR) format is very similar to the Compressed + Sparse Row (CSR) format. BSR is appropriate for sparse matrices with dense + sub matrices like the last example below. Such sparse block matrices often + arise in vector-valued finite element discretizations. In such cases, BSR is + considerably more efficient than CSR and CSC for many sparse arithmetic + operations. + + **Blocksize** + + The blocksize (R,C) must evenly divide the shape of the sparse matrix (M,N). + That is, R and C must satisfy the relationship ``M % R = 0`` and + ``N % C = 0``. + + If no blocksize is specified, a simple heuristic is applied to determine + an appropriate blocksize. + + **Canonical Format** + + In canonical format, there are no duplicate blocks and indices are sorted + per row. + + Examples + -------- + >>> import numpy as np + >>> from scipy.sparse import bsr_matrix + >>> bsr_matrix((3, 4), dtype=np.int8).toarray() + array([[0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]], dtype=int8) + + >>> row = np.array([0, 0, 1, 2, 2, 2]) + >>> col = np.array([0, 2, 2, 0, 1, 2]) + >>> data = np.array([1, 2, 3 ,4, 5, 6]) + >>> bsr_matrix((data, (row, col)), shape=(3, 3)).toarray() + array([[1, 0, 2], + [0, 0, 3], + [4, 5, 6]]) + + >>> indptr = np.array([0, 2, 3, 6]) + >>> indices = np.array([0, 2, 2, 0, 1, 2]) + >>> data = np.array([1, 2, 3, 4, 5, 6]).repeat(4).reshape(6, 2, 2) + >>> bsr_matrix((data,indices,indptr), shape=(6, 6)).toarray() + array([[1, 1, 0, 0, 2, 2], + [1, 1, 0, 0, 2, 2], + [0, 0, 0, 0, 3, 3], + [0, 0, 0, 0, 3, 3], + [4, 4, 5, 5, 6, 6], + [4, 4, 5, 5, 6, 6]]) + + """ + diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_compressed.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_compressed.py new file mode 100644 index 0000000000000000000000000000000000000000..dd73fc27b9bf6b35125da7776bd88435ea66b650 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_compressed.py @@ -0,0 +1,1367 @@ +"""Base class for sparse matrix formats using compressed storage.""" +__all__ = [] + +from warnings import warn +import operator + +import numpy as np +from scipy._lib._util import _prune_array, copy_if_needed + +from ._base import _spbase, issparse, SparseEfficiencyWarning +from ._data import _data_matrix, _minmax_mixin +from . import _sparsetools +from ._sparsetools import (get_csr_submatrix, csr_sample_offsets, csr_todense, + csr_sample_values, csr_row_index, csr_row_slice, + csr_column_index1, csr_column_index2) +from ._index import IndexMixin +from ._sputils import (upcast, upcast_char, to_native, isdense, isshape, + getdtype, isscalarlike, isintlike, downcast_intp_index, + get_sum_dtype, check_shape, is_pydata_spmatrix) + + +class _cs_matrix(_data_matrix, _minmax_mixin, IndexMixin): + """ + base array/matrix class for compressed row- and column-oriented arrays/matrices + """ + + def __init__(self, arg1, shape=None, dtype=None, copy=False): + _data_matrix.__init__(self) + + if issparse(arg1): + if arg1.format == self.format and copy: + arg1 = arg1.copy() + else: + arg1 = arg1.asformat(self.format) + self.indptr, self.indices, self.data, self._shape = ( + arg1.indptr, arg1.indices, arg1.data, arg1._shape + ) + + elif isinstance(arg1, tuple): + if isshape(arg1): + # It's a tuple of matrix dimensions (M, N) + # create empty matrix + self._shape = check_shape(arg1) + M, N = self.shape + # Select index dtype large enough to pass array and + # scalar parameters to sparsetools + idx_dtype = self._get_index_dtype(maxval=max(M, N)) + self.data = np.zeros(0, getdtype(dtype, default=float)) + self.indices = np.zeros(0, idx_dtype) + self.indptr = np.zeros(self._swap((M, N))[0] + 1, + dtype=idx_dtype) + else: + if len(arg1) == 2: + # (data, ij) format + coo = self._coo_container(arg1, shape=shape, dtype=dtype) + arrays = coo._coo_to_compressed(self._swap) + self.indptr, self.indices, self.data, self._shape = arrays + elif len(arg1) == 3: + # (data, indices, indptr) format + (data, indices, indptr) = arg1 + + # Select index dtype large enough to pass array and + # scalar parameters to sparsetools + maxval = None + if shape is not None: + maxval = max(shape) + idx_dtype = self._get_index_dtype((indices, indptr), + maxval=maxval, + check_contents=True) + + if not copy: + copy = copy_if_needed + self.indices = np.array(indices, copy=copy, dtype=idx_dtype) + self.indptr = np.array(indptr, copy=copy, dtype=idx_dtype) + self.data = np.array(data, copy=copy, dtype=dtype) + else: + raise ValueError(f"unrecognized {self.format}_matrix " + "constructor usage") + + else: + # must be dense + try: + arg1 = np.asarray(arg1) + except Exception as e: + msg = f"unrecognized {self.format}_matrix constructor usage" + raise ValueError(msg) from e + coo = self._coo_container(arg1, dtype=dtype) + arrays = coo._coo_to_compressed(self._swap) + self.indptr, self.indices, self.data, self._shape = arrays + + # Read matrix dimensions given, if any + if shape is not None: + self._shape = check_shape(shape) + else: + if self.shape is None: + # shape not already set, try to infer dimensions + try: + major_dim = len(self.indptr) - 1 + minor_dim = self.indices.max() + 1 + except Exception as e: + raise ValueError('unable to infer matrix dimensions') from e + else: + self._shape = check_shape(self._swap((major_dim, minor_dim))) + + if dtype is not None: + self.data = self.data.astype(dtype, copy=False) + + self.check_format(full_check=False) + + def _getnnz(self, axis=None): + if axis is None: + return int(self.indptr[-1]) + else: + if axis < 0: + axis += 2 + axis, _ = self._swap((axis, 1 - axis)) + _, N = self._swap(self.shape) + if axis == 0: + return np.bincount(downcast_intp_index(self.indices), + minlength=N) + elif axis == 1: + return np.diff(self.indptr) + raise ValueError('axis out of bounds') + + _getnnz.__doc__ = _spbase._getnnz.__doc__ + + def check_format(self, full_check=True): + """Check whether the array/matrix respects the CSR or CSC format. + + Parameters + ---------- + full_check : bool, optional + If `True`, run rigorous check, scanning arrays for valid values. + Note that activating those check might copy arrays for casting, + modifying indices and index pointers' inplace. + If `False`, run basic checks on attributes. O(1) operations. + Default is `True`. + """ + # use _swap to determine proper bounds + major_name, minor_name = self._swap(('row', 'column')) + major_dim, minor_dim = self._swap(self.shape) + + # index arrays should have integer data types + if self.indptr.dtype.kind != 'i': + warn(f"indptr array has non-integer dtype ({self.indptr.dtype.name})", + stacklevel=3) + if self.indices.dtype.kind != 'i': + warn(f"indices array has non-integer dtype ({self.indices.dtype.name})", + stacklevel=3) + + # check array shapes + for x in [self.data.ndim, self.indices.ndim, self.indptr.ndim]: + if x != 1: + raise ValueError('data, indices, and indptr should be 1-D') + + # check index pointer + if (len(self.indptr) != major_dim + 1): + raise ValueError("index pointer size ({}) should be ({})" + "".format(len(self.indptr), major_dim + 1)) + if (self.indptr[0] != 0): + raise ValueError("index pointer should start with 0") + + # check index and data arrays + if (len(self.indices) != len(self.data)): + raise ValueError("indices and data should have the same size") + if (self.indptr[-1] > len(self.indices)): + raise ValueError("Last value of index pointer should be less than " + "the size of index and data arrays") + + self.prune() + + if full_check: + # check format validity (more expensive) + if self.nnz > 0: + if self.indices.max() >= minor_dim: + raise ValueError(f"{minor_name} index values must be < {minor_dim}") + if self.indices.min() < 0: + raise ValueError(f"{minor_name} index values must be >= 0") + if np.diff(self.indptr).min() < 0: + raise ValueError("index pointer values must form a " + "non-decreasing sequence") + + idx_dtype = self._get_index_dtype((self.indptr, self.indices)) + self.indptr = np.asarray(self.indptr, dtype=idx_dtype) + self.indices = np.asarray(self.indices, dtype=idx_dtype) + self.data = to_native(self.data) + + # if not self.has_sorted_indices(): + # warn('Indices were not in sorted order. Sorting indices.') + # self.sort_indices() + # assert(self.has_sorted_indices()) + # TODO check for duplicates? + + ####################### + # Boolean comparisons # + ####################### + + def _scalar_binopt(self, other, op): + """Scalar version of self._binopt, for cases in which no new nonzeros + are added. Produces a new sparse array in canonical form. + """ + self.sum_duplicates() + res = self._with_data(op(self.data, other), copy=True) + res.eliminate_zeros() + return res + + def __eq__(self, other): + # Scalar other. + if isscalarlike(other): + if np.isnan(other): + return self.__class__(self.shape, dtype=np.bool_) + + if other == 0: + warn("Comparing a sparse matrix with 0 using == is inefficient" + ", try using != instead.", SparseEfficiencyWarning, + stacklevel=3) + all_true = self.__class__(np.ones(self.shape, dtype=np.bool_)) + inv = self._scalar_binopt(other, operator.ne) + return all_true - inv + else: + return self._scalar_binopt(other, operator.eq) + # Dense other. + elif isdense(other): + return self.todense() == other + # Pydata sparse other. + elif is_pydata_spmatrix(other): + return NotImplemented + # Sparse other. + elif issparse(other): + warn("Comparing sparse matrices using == is inefficient, try using" + " != instead.", SparseEfficiencyWarning, stacklevel=3) + # TODO sparse broadcasting + if self.shape != other.shape: + return False + elif self.format != other.format: + other = other.asformat(self.format) + res = self._binopt(other, '_ne_') + all_true = self.__class__(np.ones(self.shape, dtype=np.bool_)) + return all_true - res + else: + return NotImplemented + + def __ne__(self, other): + # Scalar other. + if isscalarlike(other): + if np.isnan(other): + warn("Comparing a sparse matrix with nan using != is" + " inefficient", SparseEfficiencyWarning, stacklevel=3) + all_true = self.__class__(np.ones(self.shape, dtype=np.bool_)) + return all_true + elif other != 0: + warn("Comparing a sparse matrix with a nonzero scalar using !=" + " is inefficient, try using == instead.", + SparseEfficiencyWarning, stacklevel=3) + all_true = self.__class__(np.ones(self.shape), dtype=np.bool_) + inv = self._scalar_binopt(other, operator.eq) + return all_true - inv + else: + return self._scalar_binopt(other, operator.ne) + # Dense other. + elif isdense(other): + return self.todense() != other + # Pydata sparse other. + elif is_pydata_spmatrix(other): + return NotImplemented + # Sparse other. + elif issparse(other): + # TODO sparse broadcasting + if self.shape != other.shape: + return True + elif self.format != other.format: + other = other.asformat(self.format) + return self._binopt(other, '_ne_') + else: + return NotImplemented + + def _inequality(self, other, op, op_name, bad_scalar_msg): + # Scalar other. + if isscalarlike(other): + if 0 == other and op_name in ('_le_', '_ge_'): + raise NotImplementedError(" >= and <= don't work with 0.") + elif op(0, other): + warn(bad_scalar_msg, SparseEfficiencyWarning, stacklevel=3) + other_arr = np.empty(self.shape, dtype=np.result_type(other)) + other_arr.fill(other) + other_arr = self.__class__(other_arr) + return self._binopt(other_arr, op_name) + else: + return self._scalar_binopt(other, op) + # Dense other. + elif isdense(other): + return op(self.todense(), other) + # Sparse other. + elif issparse(other): + # TODO sparse broadcasting + if self.shape != other.shape: + raise ValueError("inconsistent shapes") + elif self.format != other.format: + other = other.asformat(self.format) + if op_name not in ('_ge_', '_le_'): + return self._binopt(other, op_name) + + warn("Comparing sparse matrices using >= and <= is inefficient, " + "using <, >, or !=, instead.", + SparseEfficiencyWarning, stacklevel=3) + all_true = self.__class__(np.ones(self.shape, dtype=np.bool_)) + res = self._binopt(other, '_gt_' if op_name == '_le_' else '_lt_') + return all_true - res + else: + return NotImplemented + + def __lt__(self, other): + return self._inequality(other, operator.lt, '_lt_', + "Comparing a sparse matrix with a scalar " + "greater than zero using < is inefficient, " + "try using >= instead.") + + def __gt__(self, other): + return self._inequality(other, operator.gt, '_gt_', + "Comparing a sparse matrix with a scalar " + "less than zero using > is inefficient, " + "try using <= instead.") + + def __le__(self, other): + return self._inequality(other, operator.le, '_le_', + "Comparing a sparse matrix with a scalar " + "greater than zero using <= is inefficient, " + "try using > instead.") + + def __ge__(self, other): + return self._inequality(other, operator.ge, '_ge_', + "Comparing a sparse matrix with a scalar " + "less than zero using >= is inefficient, " + "try using < instead.") + + ################################# + # Arithmetic operator overrides # + ################################# + + def _add_dense(self, other): + if other.shape != self.shape: + raise ValueError(f'Incompatible shapes ({self.shape} and {other.shape})') + dtype = upcast_char(self.dtype.char, other.dtype.char) + order = self._swap('CF')[0] + result = np.array(other, dtype=dtype, order=order, copy=True) + M, N = self._swap(self.shape) + y = result if result.flags.c_contiguous else result.T + csr_todense(M, N, self.indptr, self.indices, self.data, y) + return self._container(result, copy=False) + + def _add_sparse(self, other): + return self._binopt(other, '_plus_') + + def _sub_sparse(self, other): + return self._binopt(other, '_minus_') + + def multiply(self, other): + """Point-wise multiplication by another array/matrix, vector, or + scalar. + """ + # Scalar multiplication. + if isscalarlike(other): + return self._mul_scalar(other) + # Sparse matrix or vector. + if issparse(other): + if self.shape == other.shape: + other = self.__class__(other) + return self._binopt(other, '_elmul_') + if other.ndim == 1: + raise TypeError("broadcast from a 1d array not yet supported") + # Single element. + elif other.shape == (1, 1): + return self._mul_scalar(other.toarray()[0, 0]) + elif self.shape == (1, 1): + return other._mul_scalar(self.toarray()[0, 0]) + # A row times a column. + elif self.shape[1] == 1 and other.shape[0] == 1: + return self._matmul_sparse(other.tocsc()) + elif self.shape[0] == 1 and other.shape[1] == 1: + return other._matmul_sparse(self.tocsc()) + # Row vector times matrix. other is a row. + elif other.shape[0] == 1 and self.shape[1] == other.shape[1]: + other = self._dia_container( + (other.toarray().ravel(), [0]), + shape=(other.shape[1], other.shape[1]) + ) + return self._matmul_sparse(other) + # self is a row. + elif self.shape[0] == 1 and self.shape[1] == other.shape[1]: + copy = self._dia_container( + (self.toarray().ravel(), [0]), + shape=(self.shape[1], self.shape[1]) + ) + return other._matmul_sparse(copy) + # Column vector times matrix. other is a column. + elif other.shape[1] == 1 and self.shape[0] == other.shape[0]: + other = self._dia_container( + (other.toarray().ravel(), [0]), + shape=(other.shape[0], other.shape[0]) + ) + return other._matmul_sparse(self) + # self is a column. + elif self.shape[1] == 1 and self.shape[0] == other.shape[0]: + copy = self._dia_container( + (self.toarray().ravel(), [0]), + shape=(self.shape[0], self.shape[0]) + ) + return copy._matmul_sparse(other) + else: + raise ValueError("inconsistent shapes") + + # Assume other is a dense matrix/array, which produces a single-item + # object array if other isn't convertible to ndarray. + other = np.atleast_2d(other) + + if other.ndim != 2: + return np.multiply(self.toarray(), other) + # Single element / wrapped object. + if other.size == 1: + if other.dtype == np.object_: + # 'other' not convertible to ndarray. + return NotImplemented + return self._mul_scalar(other.flat[0]) + # Fast case for trivial sparse matrix. + elif self.shape == (1, 1): + return np.multiply(self.toarray()[0, 0], other) + + ret = self.tocoo() + # Matching shapes. + if self.shape == other.shape: + data = np.multiply(ret.data, other[ret.row, ret.col]) + # Sparse row vector times... + elif self.shape[0] == 1: + if other.shape[1] == 1: # Dense column vector. + data = np.multiply(ret.data, other) + elif other.shape[1] == self.shape[1]: # Dense matrix. + data = np.multiply(ret.data, other[:, ret.col]) + else: + raise ValueError("inconsistent shapes") + row = np.repeat(np.arange(other.shape[0]), len(ret.row)) + col = np.tile(ret.col, other.shape[0]) + return self._coo_container( + (data.view(np.ndarray).ravel(), (row, col)), + shape=(other.shape[0], self.shape[1]), + copy=False + ) + # Sparse column vector times... + elif self.shape[1] == 1: + if other.shape[0] == 1: # Dense row vector. + data = np.multiply(ret.data[:, None], other) + elif other.shape[0] == self.shape[0]: # Dense matrix. + data = np.multiply(ret.data[:, None], other[ret.row]) + else: + raise ValueError("inconsistent shapes") + row = np.repeat(ret.row, other.shape[1]) + col = np.tile(np.arange(other.shape[1]), len(ret.col)) + return self._coo_container( + (data.view(np.ndarray).ravel(), (row, col)), + shape=(self.shape[0], other.shape[1]), + copy=False + ) + # Sparse matrix times dense row vector. + elif other.shape[0] == 1 and self.shape[1] == other.shape[1]: + data = np.multiply(ret.data, other[:, ret.col].ravel()) + # Sparse matrix times dense column vector. + elif other.shape[1] == 1 and self.shape[0] == other.shape[0]: + data = np.multiply(ret.data, other[ret.row].ravel()) + else: + raise ValueError("inconsistent shapes") + ret.data = data.view(np.ndarray).ravel() + return ret + + ########################### + # Multiplication handlers # + ########################### + + def _matmul_vector(self, other): + M, N = self.shape + + # output array + result = np.zeros(M, dtype=upcast_char(self.dtype.char, + other.dtype.char)) + + # csr_matvec or csc_matvec + fn = getattr(_sparsetools, self.format + '_matvec') + fn(M, N, self.indptr, self.indices, self.data, other, result) + + return result + + def _matmul_multivector(self, other): + M, N = self.shape + n_vecs = other.shape[1] # number of column vectors + + result = np.zeros((M, n_vecs), + dtype=upcast_char(self.dtype.char, other.dtype.char)) + + # csr_matvecs or csc_matvecs + fn = getattr(_sparsetools, self.format + '_matvecs') + fn(M, N, n_vecs, self.indptr, self.indices, self.data, + other.ravel(), result.ravel()) + + return result + + def _matmul_sparse(self, other): + M, K1 = self.shape + K2, N = other.shape + + major_axis = self._swap((M, N))[0] + other = self.__class__(other) # convert to this format + + idx_dtype = self._get_index_dtype((self.indptr, self.indices, + other.indptr, other.indices)) + + fn = getattr(_sparsetools, self.format + '_matmat_maxnnz') + nnz = fn(M, N, + np.asarray(self.indptr, dtype=idx_dtype), + np.asarray(self.indices, dtype=idx_dtype), + np.asarray(other.indptr, dtype=idx_dtype), + np.asarray(other.indices, dtype=idx_dtype)) + + idx_dtype = self._get_index_dtype((self.indptr, self.indices, + other.indptr, other.indices), + maxval=nnz) + + indptr = np.empty(major_axis + 1, dtype=idx_dtype) + indices = np.empty(nnz, dtype=idx_dtype) + data = np.empty(nnz, dtype=upcast(self.dtype, other.dtype)) + + fn = getattr(_sparsetools, self.format + '_matmat') + fn(M, N, np.asarray(self.indptr, dtype=idx_dtype), + np.asarray(self.indices, dtype=idx_dtype), + self.data, + np.asarray(other.indptr, dtype=idx_dtype), + np.asarray(other.indices, dtype=idx_dtype), + other.data, + indptr, indices, data) + + return self.__class__((data, indices, indptr), shape=(M, N)) + + def diagonal(self, k=0): + rows, cols = self.shape + if k <= -rows or k >= cols: + return np.empty(0, dtype=self.data.dtype) + fn = getattr(_sparsetools, self.format + "_diagonal") + y = np.empty(min(rows + min(k, 0), cols - max(k, 0)), + dtype=upcast(self.dtype)) + fn(k, self.shape[0], self.shape[1], self.indptr, self.indices, + self.data, y) + return y + + diagonal.__doc__ = _spbase.diagonal.__doc__ + + ##################### + # Other binary ops # + ##################### + + def _maximum_minimum(self, other, npop, op_name, dense_check): + if isscalarlike(other): + if dense_check(other): + warn("Taking maximum (minimum) with > 0 (< 0) number results" + " to a dense matrix.", SparseEfficiencyWarning, + stacklevel=3) + other_arr = np.empty(self.shape, dtype=np.asarray(other).dtype) + other_arr.fill(other) + other_arr = self.__class__(other_arr) + return self._binopt(other_arr, op_name) + else: + self.sum_duplicates() + new_data = npop(self.data, np.asarray(other)) + mat = self.__class__((new_data, self.indices, self.indptr), + dtype=new_data.dtype, shape=self.shape) + return mat + elif isdense(other): + return npop(self.todense(), other) + elif issparse(other): + return self._binopt(other, op_name) + else: + raise ValueError("Operands not compatible.") + + def maximum(self, other): + return self._maximum_minimum(other, np.maximum, + '_maximum_', lambda x: np.asarray(x) > 0) + + maximum.__doc__ = _spbase.maximum.__doc__ + + def minimum(self, other): + return self._maximum_minimum(other, np.minimum, + '_minimum_', lambda x: np.asarray(x) < 0) + + minimum.__doc__ = _spbase.minimum.__doc__ + + ##################### + # Reduce operations # + ##################### + + def sum(self, axis=None, dtype=None, out=None): + """Sum the array/matrix over the given axis. If the axis is None, sum + over both rows and columns, returning a scalar. + """ + # The _spbase base class already does axis=0 and axis=1 efficiently + # so we only do the case axis=None here + if (not hasattr(self, 'blocksize') and + axis in self._swap(((1, -1), (0, 2)))[0]): + # faster than multiplication for large minor axis in CSC/CSR + res_dtype = get_sum_dtype(self.dtype) + ret = np.zeros(len(self.indptr) - 1, dtype=res_dtype) + + major_index, value = self._minor_reduce(np.add) + ret[major_index] = value + ret = self._ascontainer(ret) + if axis % 2 == 1: + ret = ret.T + + if out is not None and out.shape != ret.shape: + raise ValueError('dimensions do not match') + + return ret.sum(axis=(), dtype=dtype, out=out) + # _spbase will handle the remaining situations when axis + # is in {None, -1, 0, 1} + else: + return _spbase.sum(self, axis=axis, dtype=dtype, out=out) + + sum.__doc__ = _spbase.sum.__doc__ + + def _minor_reduce(self, ufunc, data=None): + """Reduce nonzeros with a ufunc over the minor axis when non-empty + + Can be applied to a function of self.data by supplying data parameter. + + Warning: this does not call sum_duplicates() + + Returns + ------- + major_index : array of ints + Major indices where nonzero + + value : array of self.dtype + Reduce result for nonzeros in each major_index + """ + if data is None: + data = self.data + major_index = np.flatnonzero(np.diff(self.indptr)) + value = ufunc.reduceat(data, + downcast_intp_index(self.indptr[major_index])) + return major_index, value + + ####################### + # Getting and Setting # + ####################### + + def _get_intXint(self, row, col): + M, N = self._swap(self.shape) + major, minor = self._swap((row, col)) + indptr, indices, data = get_csr_submatrix( + M, N, self.indptr, self.indices, self.data, + major, major + 1, minor, minor + 1) + return data.sum(dtype=self.dtype) + + def _get_sliceXslice(self, row, col): + major, minor = self._swap((row, col)) + if major.step in (1, None) and minor.step in (1, None): + return self._get_submatrix(major, minor, copy=True) + return self._major_slice(major)._minor_slice(minor) + + def _get_arrayXarray(self, row, col): + # inner indexing + idx_dtype = self.indices.dtype + M, N = self._swap(self.shape) + major, minor = self._swap((row, col)) + major = np.asarray(major, dtype=idx_dtype) + minor = np.asarray(minor, dtype=idx_dtype) + + val = np.empty(major.size, dtype=self.dtype) + csr_sample_values(M, N, self.indptr, self.indices, self.data, + major.size, major.ravel(), minor.ravel(), val) + if major.ndim == 1: + return self._ascontainer(val) + return self.__class__(val.reshape(major.shape)) + + def _get_columnXarray(self, row, col): + # outer indexing + major, minor = self._swap((row, col)) + return self._major_index_fancy(major)._minor_index_fancy(minor) + + def _major_index_fancy(self, idx): + """Index along the major axis where idx is an array of ints. + """ + idx_dtype = self._get_index_dtype((self.indptr, self.indices)) + indices = np.asarray(idx, dtype=idx_dtype).ravel() + + _, N = self._swap(self.shape) + M = len(indices) + new_shape = self._swap((M, N)) + if M == 0: + return self.__class__(new_shape, dtype=self.dtype) + + row_nnz = (self.indptr[indices + 1] - self.indptr[indices]).astype(idx_dtype) + + res_indptr = np.zeros(M+1, dtype=idx_dtype) + np.cumsum(row_nnz, out=res_indptr[1:]) + + nnz = res_indptr[-1] + res_indices = np.empty(nnz, dtype=idx_dtype) + res_data = np.empty(nnz, dtype=self.dtype) + csr_row_index( + M, + indices, + self.indptr.astype(idx_dtype, copy=False), + self.indices.astype(idx_dtype, copy=False), + self.data, + res_indices, + res_data + ) + + return self.__class__((res_data, res_indices, res_indptr), + shape=new_shape, copy=False) + + def _major_slice(self, idx, copy=False): + """Index along the major axis where idx is a slice object. + """ + if idx == slice(None): + return self.copy() if copy else self + + M, N = self._swap(self.shape) + start, stop, step = idx.indices(M) + M = len(range(start, stop, step)) + new_shape = self._swap((M, N)) + if M == 0: + return self.__class__(new_shape, dtype=self.dtype) + + # Work out what slices are needed for `row_nnz` + # start,stop can be -1, only if step is negative + start0, stop0 = start, stop + if stop == -1 and start >= 0: + stop0 = None + start1, stop1 = start + 1, stop + 1 + + row_nnz = self.indptr[start1:stop1:step] - \ + self.indptr[start0:stop0:step] + idx_dtype = self.indices.dtype + res_indptr = np.zeros(M+1, dtype=idx_dtype) + np.cumsum(row_nnz, out=res_indptr[1:]) + + if step == 1: + all_idx = slice(self.indptr[start], self.indptr[stop]) + res_indices = np.array(self.indices[all_idx], copy=copy) + res_data = np.array(self.data[all_idx], copy=copy) + else: + nnz = res_indptr[-1] + res_indices = np.empty(nnz, dtype=idx_dtype) + res_data = np.empty(nnz, dtype=self.dtype) + csr_row_slice(start, stop, step, self.indptr, self.indices, + self.data, res_indices, res_data) + + return self.__class__((res_data, res_indices, res_indptr), + shape=new_shape, copy=False) + + def _minor_index_fancy(self, idx): + """Index along the minor axis where idx is an array of ints. + """ + idx_dtype = self._get_index_dtype((self.indices, self.indptr)) + indices = self.indices.astype(idx_dtype, copy=False) + indptr = self.indptr.astype(idx_dtype, copy=False) + + idx = np.asarray(idx, dtype=idx_dtype).ravel() + + M, N = self._swap(self.shape) + k = len(idx) + new_shape = self._swap((M, k)) + if k == 0: + return self.__class__(new_shape, dtype=self.dtype) + + # pass 1: count idx entries and compute new indptr + col_offsets = np.zeros(N, dtype=idx_dtype) + res_indptr = np.empty_like(self.indptr, dtype=idx_dtype) + csr_column_index1( + k, + idx, + M, + N, + indptr, + indices, + col_offsets, + res_indptr, + ) + + # pass 2: copy indices/data for selected idxs + col_order = np.argsort(idx).astype(idx_dtype, copy=False) + nnz = res_indptr[-1] + res_indices = np.empty(nnz, dtype=idx_dtype) + res_data = np.empty(nnz, dtype=self.dtype) + csr_column_index2(col_order, col_offsets, len(self.indices), + indices, self.data, res_indices, res_data) + return self.__class__((res_data, res_indices, res_indptr), + shape=new_shape, copy=False) + + def _minor_slice(self, idx, copy=False): + """Index along the minor axis where idx is a slice object. + """ + if idx == slice(None): + return self.copy() if copy else self + + M, N = self._swap(self.shape) + start, stop, step = idx.indices(N) + N = len(range(start, stop, step)) + if N == 0: + return self.__class__(self._swap((M, N)), dtype=self.dtype) + if step == 1: + return self._get_submatrix(minor=idx, copy=copy) + # TODO: don't fall back to fancy indexing here + return self._minor_index_fancy(np.arange(start, stop, step)) + + def _get_submatrix(self, major=None, minor=None, copy=False): + """Return a submatrix of this matrix. + + major, minor: None, int, or slice with step 1 + """ + M, N = self._swap(self.shape) + i0, i1 = _process_slice(major, M) + j0, j1 = _process_slice(minor, N) + + if i0 == 0 and j0 == 0 and i1 == M and j1 == N: + return self.copy() if copy else self + + indptr, indices, data = get_csr_submatrix( + M, N, self.indptr, self.indices, self.data, i0, i1, j0, j1) + + shape = self._swap((i1 - i0, j1 - j0)) + return self.__class__((data, indices, indptr), shape=shape, + dtype=self.dtype, copy=False) + + def _set_intXint(self, row, col, x): + i, j = self._swap((row, col)) + self._set_many(i, j, x) + + def _set_arrayXarray(self, row, col, x): + i, j = self._swap((row, col)) + self._set_many(i, j, x) + + def _set_arrayXarray_sparse(self, row, col, x): + # clear entries that will be overwritten + self._zero_many(*self._swap((row, col))) + + M, N = row.shape # matches col.shape + broadcast_row = M != 1 and x.shape[0] == 1 + broadcast_col = N != 1 and x.shape[1] == 1 + r, c = x.row, x.col + + x = np.asarray(x.data, dtype=self.dtype) + if x.size == 0: + return + + if broadcast_row: + r = np.repeat(np.arange(M), len(r)) + c = np.tile(c, M) + x = np.tile(x, M) + if broadcast_col: + r = np.repeat(r, N) + c = np.tile(np.arange(N), len(c)) + x = np.repeat(x, N) + # only assign entries in the new sparsity structure + i, j = self._swap((row[r, c], col[r, c])) + self._set_many(i, j, x) + + def _setdiag(self, values, k): + if 0 in self.shape: + return + + M, N = self.shape + broadcast = (values.ndim == 0) + + if k < 0: + if broadcast: + max_index = min(M + k, N) + else: + max_index = min(M + k, N, len(values)) + i = np.arange(-k, max_index - k, dtype=self.indices.dtype) + j = np.arange(max_index, dtype=self.indices.dtype) + + else: + if broadcast: + max_index = min(M, N - k) + else: + max_index = min(M, N - k, len(values)) + i = np.arange(max_index, dtype=self.indices.dtype) + j = np.arange(k, k + max_index, dtype=self.indices.dtype) + + if not broadcast: + values = values[:len(i)] + + x = np.atleast_1d(np.asarray(values, dtype=self.dtype)).ravel() + if x.squeeze().shape != i.squeeze().shape: + x = np.broadcast_to(x, i.shape) + if x.size == 0: + return + + M, N = self._swap((M, N)) + i, j = self._swap((i, j)) + n_samples = x.size + offsets = np.empty(n_samples, dtype=self.indices.dtype) + ret = csr_sample_offsets(M, N, self.indptr, self.indices, n_samples, + i, j, offsets) + if ret == 1: + # rinse and repeat + self.sum_duplicates() + csr_sample_offsets(M, N, self.indptr, self.indices, n_samples, + i, j, offsets) + if -1 not in offsets: + # only affects existing non-zero cells + self.data[offsets] = x + return + + mask = (offsets <= -1) + # Boundary between csc and convert to coo + # The value 0.001 is justified in gh-19962#issuecomment-1920499678 + if mask.sum() < self.nnz * 0.001: + # create new entries + i = i[mask] + j = j[mask] + self._insert_many(i, j, x[mask]) + # replace existing entries + mask = ~mask + self.data[offsets[mask]] = x[mask] + else: + # convert to coo for _set_diag + coo = self.tocoo() + coo._setdiag(values, k) + arrays = coo._coo_to_compressed(self._swap) + self.indptr, self.indices, self.data, _ = arrays + + def _prepare_indices(self, i, j): + M, N = self._swap(self.shape) + + def check_bounds(indices, bound): + idx = indices.max() + if idx >= bound: + raise IndexError('index (%d) out of range (>= %d)' % + (idx, bound)) + idx = indices.min() + if idx < -bound: + raise IndexError('index (%d) out of range (< -%d)' % + (idx, bound)) + + i = np.atleast_1d(np.asarray(i, dtype=self.indices.dtype)).ravel() + j = np.atleast_1d(np.asarray(j, dtype=self.indices.dtype)).ravel() + check_bounds(i, M) + check_bounds(j, N) + return i, j, M, N + + def _set_many(self, i, j, x): + """Sets value at each (i, j) to x + + Here (i,j) index major and minor respectively, and must not contain + duplicate entries. + """ + i, j, M, N = self._prepare_indices(i, j) + x = np.atleast_1d(np.asarray(x, dtype=self.dtype)).ravel() + + n_samples = x.size + offsets = np.empty(n_samples, dtype=self.indices.dtype) + ret = csr_sample_offsets(M, N, self.indptr, self.indices, n_samples, + i, j, offsets) + if ret == 1: + # rinse and repeat + self.sum_duplicates() + csr_sample_offsets(M, N, self.indptr, self.indices, n_samples, + i, j, offsets) + + if -1 not in offsets: + # only affects existing non-zero cells + self.data[offsets] = x + return + + else: + warn("Changing the sparsity structure of a {}_matrix is expensive." + " lil_matrix is more efficient.".format(self.format), + SparseEfficiencyWarning, stacklevel=3) + # replace where possible + mask = offsets > -1 + self.data[offsets[mask]] = x[mask] + # only insertions remain + mask = ~mask + i = i[mask] + i[i < 0] += M + j = j[mask] + j[j < 0] += N + self._insert_many(i, j, x[mask]) + + def _zero_many(self, i, j): + """Sets value at each (i, j) to zero, preserving sparsity structure. + + Here (i,j) index major and minor respectively. + """ + i, j, M, N = self._prepare_indices(i, j) + + n_samples = len(i) + offsets = np.empty(n_samples, dtype=self.indices.dtype) + ret = csr_sample_offsets(M, N, self.indptr, self.indices, n_samples, + i, j, offsets) + if ret == 1: + # rinse and repeat + self.sum_duplicates() + csr_sample_offsets(M, N, self.indptr, self.indices, n_samples, + i, j, offsets) + + # only assign zeros to the existing sparsity structure + self.data[offsets[offsets > -1]] = 0 + + def _insert_many(self, i, j, x): + """Inserts new nonzero at each (i, j) with value x + + Here (i,j) index major and minor respectively. + i, j and x must be non-empty, 1d arrays. + Inserts each major group (e.g. all entries per row) at a time. + Maintains has_sorted_indices property. + Modifies i, j, x in place. + """ + order = np.argsort(i, kind='mergesort') # stable for duplicates + i = i.take(order, mode='clip') + j = j.take(order, mode='clip') + x = x.take(order, mode='clip') + + do_sort = self.has_sorted_indices + + # Update index data type + idx_dtype = self._get_index_dtype((self.indices, self.indptr), + maxval=(self.indptr[-1] + x.size)) + self.indptr = np.asarray(self.indptr, dtype=idx_dtype) + self.indices = np.asarray(self.indices, dtype=idx_dtype) + i = np.asarray(i, dtype=idx_dtype) + j = np.asarray(j, dtype=idx_dtype) + + # Collate old and new in chunks by major index + indices_parts = [] + data_parts = [] + ui, ui_indptr = np.unique(i, return_index=True) + ui_indptr = np.append(ui_indptr, len(j)) + new_nnzs = np.diff(ui_indptr) + prev = 0 + for c, (ii, js, je) in enumerate(zip(ui, ui_indptr, ui_indptr[1:])): + # old entries + start = self.indptr[prev] + stop = self.indptr[ii] + indices_parts.append(self.indices[start:stop]) + data_parts.append(self.data[start:stop]) + + # handle duplicate j: keep last setting + uj, uj_indptr = np.unique(j[js:je][::-1], return_index=True) + if len(uj) == je - js: + indices_parts.append(j[js:je]) + data_parts.append(x[js:je]) + else: + indices_parts.append(j[js:je][::-1][uj_indptr]) + data_parts.append(x[js:je][::-1][uj_indptr]) + new_nnzs[c] = len(uj) + + prev = ii + + # remaining old entries + start = self.indptr[ii] + indices_parts.append(self.indices[start:]) + data_parts.append(self.data[start:]) + + # update attributes + self.indices = np.concatenate(indices_parts) + self.data = np.concatenate(data_parts) + nnzs = np.empty(self.indptr.shape, dtype=idx_dtype) + nnzs[0] = idx_dtype(0) + indptr_diff = np.diff(self.indptr) + indptr_diff[ui] += new_nnzs + nnzs[1:] = indptr_diff + self.indptr = np.cumsum(nnzs, out=nnzs) + + if do_sort: + # TODO: only sort where necessary + self.has_sorted_indices = False + self.sort_indices() + + self.check_format(full_check=False) + + ###################### + # Conversion methods # + ###################### + + def tocoo(self, copy=True): + major_dim, minor_dim = self._swap(self.shape) + minor_indices = self.indices + major_indices = np.empty(len(minor_indices), dtype=self.indices.dtype) + _sparsetools.expandptr(major_dim, self.indptr, major_indices) + coords = self._swap((major_indices, minor_indices)) + + return self._coo_container( + (self.data, coords), self.shape, copy=copy, dtype=self.dtype + ) + + tocoo.__doc__ = _spbase.tocoo.__doc__ + + def toarray(self, order=None, out=None): + if out is None and order is None: + order = self._swap('cf')[0] + out = self._process_toarray_args(order, out) + if not (out.flags.c_contiguous or out.flags.f_contiguous): + raise ValueError('Output array must be C or F contiguous') + # align ideal order with output array order + if out.flags.c_contiguous: + x = self.tocsr() + y = out + else: + x = self.tocsc() + y = out.T + M, N = x._swap(x.shape) + csr_todense(M, N, x.indptr, x.indices, x.data, y) + return out + + toarray.__doc__ = _spbase.toarray.__doc__ + + ############################################################## + # methods that examine or modify the internal data structure # + ############################################################## + + def eliminate_zeros(self): + """Remove zero entries from the array/matrix + + This is an *in place* operation. + """ + M, N = self._swap(self.shape) + _sparsetools.csr_eliminate_zeros(M, N, self.indptr, self.indices, + self.data) + self.prune() # nnz may have changed + + @property + def has_canonical_format(self) -> bool: + """Whether the array/matrix has sorted indices and no duplicates + + Returns + - True: if the above applies + - False: otherwise + + has_canonical_format implies has_sorted_indices, so if the latter flag + is False, so will the former be; if the former is found True, the + latter flag is also set. + """ + # first check to see if result was cached + if not getattr(self, '_has_sorted_indices', True): + # not sorted => not canonical + self._has_canonical_format = False + elif not hasattr(self, '_has_canonical_format'): + self.has_canonical_format = bool( + _sparsetools.csr_has_canonical_format( + len(self.indptr) - 1, self.indptr, self.indices) + ) + return self._has_canonical_format + + @has_canonical_format.setter + def has_canonical_format(self, val: bool): + self._has_canonical_format = bool(val) + if val: + self.has_sorted_indices = True + + def sum_duplicates(self): + """Eliminate duplicate entries by adding them together + + This is an *in place* operation. + """ + if self.has_canonical_format: + return + self.sort_indices() + + M, N = self._swap(self.shape) + _sparsetools.csr_sum_duplicates(M, N, self.indptr, self.indices, + self.data) + + self.prune() # nnz may have changed + self.has_canonical_format = True + + @property + def has_sorted_indices(self) -> bool: + """Whether the indices are sorted + + Returns + - True: if the indices of the array/matrix are in sorted order + - False: otherwise + """ + # first check to see if result was cached + if not hasattr(self, '_has_sorted_indices'): + self._has_sorted_indices = bool( + _sparsetools.csr_has_sorted_indices( + len(self.indptr) - 1, self.indptr, self.indices) + ) + return self._has_sorted_indices + + @has_sorted_indices.setter + def has_sorted_indices(self, val: bool): + self._has_sorted_indices = bool(val) + + + def sorted_indices(self): + """Return a copy of this array/matrix with sorted indices + """ + A = self.copy() + A.sort_indices() + return A + + # an alternative that has linear complexity is the following + # although the previous option is typically faster + # return self.toother().toother() + + def sort_indices(self): + """Sort the indices of this array/matrix *in place* + """ + + if not self.has_sorted_indices: + _sparsetools.csr_sort_indices(len(self.indptr) - 1, self.indptr, + self.indices, self.data) + self.has_sorted_indices = True + + def prune(self): + """Remove empty space after all non-zero elements. + """ + major_dim = self._swap(self.shape)[0] + + if len(self.indptr) != major_dim + 1: + raise ValueError('index pointer has invalid length') + if len(self.indices) < self.nnz: + raise ValueError('indices array has fewer than nnz elements') + if len(self.data) < self.nnz: + raise ValueError('data array has fewer than nnz elements') + + self.indices = _prune_array(self.indices[:self.nnz]) + self.data = _prune_array(self.data[:self.nnz]) + + def resize(self, *shape): + shape = check_shape(shape) + if hasattr(self, 'blocksize'): + bm, bn = self.blocksize + new_M, rm = divmod(shape[0], bm) + new_N, rn = divmod(shape[1], bn) + if rm or rn: + raise ValueError("shape must be divisible into {} blocks. " + "Got {}".format(self.blocksize, shape)) + M, N = self.shape[0] // bm, self.shape[1] // bn + else: + new_M, new_N = self._swap(shape) + M, N = self._swap(self.shape) + + if new_M < M: + self.indices = self.indices[:self.indptr[new_M]] + self.data = self.data[:self.indptr[new_M]] + self.indptr = self.indptr[:new_M + 1] + elif new_M > M: + self.indptr = np.resize(self.indptr, new_M + 1) + self.indptr[M + 1:].fill(self.indptr[M]) + + if new_N < N: + mask = self.indices < new_N + if not np.all(mask): + self.indices = self.indices[mask] + self.data = self.data[mask] + major_index, val = self._minor_reduce(np.add, mask) + self.indptr.fill(0) + self.indptr[1:][major_index] = val + np.cumsum(self.indptr, out=self.indptr) + + self._shape = shape + + resize.__doc__ = _spbase.resize.__doc__ + + ################### + # utility methods # + ################### + + # needed by _data_matrix + def _with_data(self, data, copy=True): + """Returns a matrix with the same sparsity structure as self, + but with different data. By default the structure arrays + (i.e. .indptr and .indices) are copied. + """ + if copy: + return self.__class__((data, self.indices.copy(), + self.indptr.copy()), + shape=self.shape, + dtype=data.dtype) + else: + return self.__class__((data, self.indices, self.indptr), + shape=self.shape, dtype=data.dtype) + + def _binopt(self, other, op): + """apply the binary operation fn to two sparse matrices.""" + other = self.__class__(other) + + # e.g. csr_plus_csr, csr_minus_csr, etc. + fn = getattr(_sparsetools, self.format + op + self.format) + + maxnnz = self.nnz + other.nnz + idx_dtype = self._get_index_dtype((self.indptr, self.indices, + other.indptr, other.indices), + maxval=maxnnz) + indptr = np.empty(self.indptr.shape, dtype=idx_dtype) + indices = np.empty(maxnnz, dtype=idx_dtype) + + bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_'] + if op in bool_ops: + data = np.empty(maxnnz, dtype=np.bool_) + else: + data = np.empty(maxnnz, dtype=upcast(self.dtype, other.dtype)) + + fn(self.shape[0], self.shape[1], + np.asarray(self.indptr, dtype=idx_dtype), + np.asarray(self.indices, dtype=idx_dtype), + self.data, + np.asarray(other.indptr, dtype=idx_dtype), + np.asarray(other.indices, dtype=idx_dtype), + other.data, + indptr, indices, data) + + A = self.__class__((data, indices, indptr), shape=self.shape) + A.prune() + + return A + + def _divide_sparse(self, other): + """ + Divide this matrix by a second sparse matrix. + """ + if other.shape != self.shape: + raise ValueError('inconsistent shapes') + + r = self._binopt(other, '_eldiv_') + + if np.issubdtype(r.dtype, np.inexact): + # Eldiv leaves entries outside the combined sparsity + # pattern empty, so they must be filled manually. + # Everything outside of other's sparsity is NaN, and everything + # inside it is either zero or defined by eldiv. + out = np.empty(self.shape, dtype=self.dtype) + out.fill(np.nan) + row, col = other.nonzero() + out[row, col] = 0 + r = r.tocoo() + out[r.row, r.col] = r.data + out = self._container(out) + else: + # integers types go with nan <-> 0 + out = r + + return out + + +def _process_slice(sl, num): + if sl is None: + i0, i1 = 0, num + elif isinstance(sl, slice): + i0, i1, stride = sl.indices(num) + if stride != 1: + raise ValueError('slicing with step != 1 not supported') + i0 = min(i0, i1) # give an empty slice when i0 > i1 + elif isintlike(sl): + if sl < 0: + sl += num + i0, i1 = sl, sl + 1 + if i0 < 0 or i1 > num: + raise IndexError('index out of bounds: 0 <= %d < %d <= %d' % + (i0, i1, num)) + else: + raise TypeError('expected slice or scalar') + + return i0, i1 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_construct.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_construct.py new file mode 100644 index 0000000000000000000000000000000000000000..6f5d3dd514d2684bfc6a53ae485c183e299f780f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_construct.py @@ -0,0 +1,1401 @@ +"""Functions to construct sparse matrices and arrays +""" + +__docformat__ = "restructuredtext en" + +__all__ = ['spdiags', 'eye', 'identity', 'kron', 'kronsum', + 'hstack', 'vstack', 'bmat', 'rand', 'random', 'diags', 'block_diag', + 'diags_array', 'block_array', 'eye_array', 'random_array'] + +import numbers +import math +import numpy as np + +from scipy._lib._util import check_random_state, rng_integers +from ._sputils import upcast, get_index_dtype, isscalarlike + +from ._sparsetools import csr_hstack +from ._bsr import bsr_matrix, bsr_array +from ._coo import coo_matrix, coo_array +from ._csc import csc_matrix, csc_array +from ._csr import csr_matrix, csr_array +from ._dia import dia_matrix, dia_array + +from ._base import issparse, sparray + + +def spdiags(data, diags, m=None, n=None, format=None): + """ + Return a sparse matrix from diagonals. + + Parameters + ---------- + data : array_like + Matrix diagonals stored row-wise + diags : sequence of int or an int + Diagonals to set: + + * k = 0 the main diagonal + * k > 0 the kth upper diagonal + * k < 0 the kth lower diagonal + m, n : int, tuple, optional + Shape of the result. If `n` is None and `m` is a given tuple, + the shape is this tuple. If omitted, the matrix is square and + its shape is len(data[0]). + format : str, optional + Format of the result. By default (format=None) an appropriate sparse + matrix format is returned. This choice is subject to change. + + .. warning:: + + This function returns a sparse matrix -- not a sparse array. + You are encouraged to use ``diags_array`` to take advantage + of the sparse array functionality. + + See Also + -------- + diags_array : more convenient form of this function + diags : matrix version of diags_array + dia_matrix : the sparse DIAgonal format. + + Examples + -------- + >>> import numpy as np + >>> from scipy.sparse import spdiags + >>> data = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]]) + >>> diags = np.array([0, -1, 2]) + >>> spdiags(data, diags, 4, 4).toarray() + array([[1, 0, 3, 0], + [1, 2, 0, 4], + [0, 2, 3, 0], + [0, 0, 3, 4]]) + + """ + if m is None and n is None: + m = n = len(data[0]) + elif n is None: + m, n = m + return dia_matrix((data, diags), shape=(m, n)).asformat(format) + + +def diags_array(diagonals, /, *, offsets=0, shape=None, format=None, dtype=None): + """ + Construct a sparse array from diagonals. + + Parameters + ---------- + diagonals : sequence of array_like + Sequence of arrays containing the array diagonals, + corresponding to `offsets`. + offsets : sequence of int or an int, optional + Diagonals to set: + - k = 0 the main diagonal (default) + - k > 0 the kth upper diagonal + - k < 0 the kth lower diagonal + shape : tuple of int, optional + Shape of the result. If omitted, a square array large enough + to contain the diagonals is returned. + format : {"dia", "csr", "csc", "lil", ...}, optional + Matrix format of the result. By default (format=None) an + appropriate sparse array format is returned. This choice is + subject to change. + dtype : dtype, optional + Data type of the array. + + Notes + ----- + The result from `diags_array` is the sparse equivalent of:: + + np.diag(diagonals[0], offsets[0]) + + ... + + np.diag(diagonals[k], offsets[k]) + + Repeated diagonal offsets are disallowed. + + .. versionadded:: 1.11 + + Examples + -------- + >>> from scipy.sparse import diags_array + >>> diagonals = [[1, 2, 3, 4], [1, 2, 3], [1, 2]] + >>> diags_array(diagonals, offsets=[0, -1, 2]).toarray() + array([[1, 0, 1, 0], + [1, 2, 0, 2], + [0, 2, 3, 0], + [0, 0, 3, 4]]) + + Broadcasting of scalars is supported (but shape needs to be + specified): + + >>> diags_array([1, -2, 1], offsets=[-1, 0, 1], shape=(4, 4)).toarray() + array([[-2., 1., 0., 0.], + [ 1., -2., 1., 0.], + [ 0., 1., -2., 1.], + [ 0., 0., 1., -2.]]) + + + If only one diagonal is wanted (as in `numpy.diag`), the following + works as well: + + >>> diags_array([1, 2, 3], offsets=1).toarray() + array([[ 0., 1., 0., 0.], + [ 0., 0., 2., 0.], + [ 0., 0., 0., 3.], + [ 0., 0., 0., 0.]]) + """ + # if offsets is not a sequence, assume that there's only one diagonal + if isscalarlike(offsets): + # now check that there's actually only one diagonal + if len(diagonals) == 0 or isscalarlike(diagonals[0]): + diagonals = [np.atleast_1d(diagonals)] + else: + raise ValueError("Different number of diagonals and offsets.") + else: + diagonals = list(map(np.atleast_1d, diagonals)) + + offsets = np.atleast_1d(offsets) + + # Basic check + if len(diagonals) != len(offsets): + raise ValueError("Different number of diagonals and offsets.") + + # Determine shape, if omitted + if shape is None: + m = len(diagonals[0]) + abs(int(offsets[0])) + shape = (m, m) + + # Determine data type, if omitted + if dtype is None: + dtype = np.common_type(*diagonals) + + # Construct data array + m, n = shape + + M = max([min(m + offset, n - offset) + max(0, offset) + for offset in offsets]) + M = max(0, M) + data_arr = np.zeros((len(offsets), M), dtype=dtype) + + K = min(m, n) + + for j, diagonal in enumerate(diagonals): + offset = offsets[j] + k = max(0, offset) + length = min(m + offset, n - offset, K) + if length < 0: + raise ValueError("Offset %d (index %d) out of bounds" % (offset, j)) + try: + data_arr[j, k:k+length] = diagonal[...,:length] + except ValueError as e: + if len(diagonal) != length and len(diagonal) != 1: + raise ValueError( + "Diagonal length (index %d: %d at offset %d) does not " + "agree with array size (%d, %d)." % ( + j, len(diagonal), offset, m, n)) from e + raise + + return dia_array((data_arr, offsets), shape=(m, n)).asformat(format) + + +def diags(diagonals, offsets=0, shape=None, format=None, dtype=None): + """ + Construct a sparse matrix from diagonals. + + .. warning:: + + This function returns a sparse matrix -- not a sparse array. + You are encouraged to use ``diags_array`` to take advantage + of the sparse array functionality. + + Parameters + ---------- + diagonals : sequence of array_like + Sequence of arrays containing the matrix diagonals, + corresponding to `offsets`. + offsets : sequence of int or an int, optional + Diagonals to set: + - k = 0 the main diagonal (default) + - k > 0 the kth upper diagonal + - k < 0 the kth lower diagonal + shape : tuple of int, optional + Shape of the result. If omitted, a square matrix large enough + to contain the diagonals is returned. + format : {"dia", "csr", "csc", "lil", ...}, optional + Matrix format of the result. By default (format=None) an + appropriate sparse matrix format is returned. This choice is + subject to change. + dtype : dtype, optional + Data type of the matrix. + + See Also + -------- + spdiags : construct matrix from diagonals + diags_array : construct sparse array instead of sparse matrix + + Notes + ----- + This function differs from `spdiags` in the way it handles + off-diagonals. + + The result from `diags` is the sparse equivalent of:: + + np.diag(diagonals[0], offsets[0]) + + ... + + np.diag(diagonals[k], offsets[k]) + + Repeated diagonal offsets are disallowed. + + .. versionadded:: 0.11 + + Examples + -------- + >>> from scipy.sparse import diags + >>> diagonals = [[1, 2, 3, 4], [1, 2, 3], [1, 2]] + >>> diags(diagonals, [0, -1, 2]).toarray() + array([[1, 0, 1, 0], + [1, 2, 0, 2], + [0, 2, 3, 0], + [0, 0, 3, 4]]) + + Broadcasting of scalars is supported (but shape needs to be + specified): + + >>> diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)).toarray() + array([[-2., 1., 0., 0.], + [ 1., -2., 1., 0.], + [ 0., 1., -2., 1.], + [ 0., 0., 1., -2.]]) + + + If only one diagonal is wanted (as in `numpy.diag`), the following + works as well: + + >>> diags([1, 2, 3], 1).toarray() + array([[ 0., 1., 0., 0.], + [ 0., 0., 2., 0.], + [ 0., 0., 0., 3.], + [ 0., 0., 0., 0.]]) + """ + A = diags_array(diagonals, offsets=offsets, shape=shape, dtype=dtype) + return dia_matrix(A).asformat(format) + + +def identity(n, dtype='d', format=None): + """Identity matrix in sparse format + + Returns an identity matrix with shape (n,n) using a given + sparse format and dtype. This differs from `eye_array` in + that it has a square shape with ones only on the main diagonal. + It is thus the multiplicative identity. `eye_array` allows + rectangular shapes and the diagonal can be offset from the main one. + + .. warning:: + + This function returns a sparse matrix -- not a sparse array. + You are encouraged to use ``eye_array`` to take advantage + of the sparse array functionality. + + Parameters + ---------- + n : int + Shape of the identity matrix. + dtype : dtype, optional + Data type of the matrix + format : str, optional + Sparse format of the result, e.g., format="csr", etc. + + Examples + -------- + >>> import scipy as sp + >>> sp.sparse.identity(3).toarray() + array([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + >>> sp.sparse.identity(3, dtype='int8', format='dia') + <3x3 sparse matrix of type '' + with 3 stored elements (1 diagonals) in DIAgonal format> + >>> sp.sparse.eye_array(3, dtype='int8', format='dia') + <3x3 sparse array of type '' + with 3 stored elements (1 diagonals) in DIAgonal format> + + """ + return eye(n, n, dtype=dtype, format=format) + + +def eye_array(m, n=None, *, k=0, dtype=float, format=None): + """Identity matrix in sparse array format + + Return a sparse array with ones on diagonal. + Specifically a sparse array (m x n) where the kth diagonal + is all ones and everything else is zeros. + + Parameters + ---------- + m : int or tuple of ints + Number of rows requested. + n : int, optional + Number of columns. Default: `m`. + k : int, optional + Diagonal to place ones on. Default: 0 (main diagonal). + dtype : dtype, optional + Data type of the array + format : str, optional (default: "dia") + Sparse format of the result, e.g., format="csr", etc. + + Examples + -------- + >>> import numpy as np + >>> import scipy as sp + >>> sp.sparse.eye_array(3).toarray() + array([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + >>> sp.sparse.eye_array(3, dtype=np.int8) + <3x3 sparse array of type '' + with 3 stored elements (1 diagonals) in DIAgonal format> + + """ + # TODO: delete next 15 lines [combine with _eye()] once spmatrix removed + return _eye(m, n, k, dtype, format) + + +def _eye(m, n, k, dtype, format, as_sparray=True): + if as_sparray: + csr_sparse = csr_array + csc_sparse = csc_array + coo_sparse = coo_array + diags_sparse = diags_array + else: + csr_sparse = csr_matrix + csc_sparse = csc_matrix + coo_sparse = coo_matrix + diags_sparse = diags + + if n is None: + n = m + m, n = int(m), int(n) + + if m == n and k == 0: + # fast branch for special formats + if format in ['csr', 'csc']: + idx_dtype = get_index_dtype(maxval=n) + indptr = np.arange(n+1, dtype=idx_dtype) + indices = np.arange(n, dtype=idx_dtype) + data = np.ones(n, dtype=dtype) + cls = {'csr': csr_sparse, 'csc': csc_sparse}[format] + return cls((data, indices, indptr), (n, n)) + + elif format == 'coo': + idx_dtype = get_index_dtype(maxval=n) + row = np.arange(n, dtype=idx_dtype) + col = np.arange(n, dtype=idx_dtype) + data = np.ones(n, dtype=dtype) + return coo_sparse((data, (row, col)), (n, n)) + + data = np.ones((1, max(0, min(m + k, n))), dtype=dtype) + return diags_sparse(data, offsets=[k], shape=(m, n), dtype=dtype).asformat(format) + + +def eye(m, n=None, k=0, dtype=float, format=None): + """Sparse matrix with ones on diagonal + + Returns a sparse matrix (m x n) where the kth diagonal + is all ones and everything else is zeros. + + Parameters + ---------- + m : int + Number of rows in the matrix. + n : int, optional + Number of columns. Default: `m`. + k : int, optional + Diagonal to place ones on. Default: 0 (main diagonal). + dtype : dtype, optional + Data type of the matrix. + format : str, optional + Sparse format of the result, e.g., format="csr", etc. + + .. warning:: + + This function returns a sparse matrix -- not a sparse array. + You are encouraged to use ``eye_array`` to take advantage + of the sparse array functionality. + + Examples + -------- + >>> import numpy as np + >>> import scipy as sp + >>> sp.sparse.eye(3).toarray() + array([[ 1., 0., 0.], + [ 0., 1., 0.], + [ 0., 0., 1.]]) + >>> sp.sparse.eye(3, dtype=np.int8) + <3x3 sparse matrix of type '' + with 3 stored elements (1 diagonals) in DIAgonal format> + + """ + return _eye(m, n, k, dtype, format, False) + + +def kron(A, B, format=None): + """kronecker product of sparse matrices A and B + + Parameters + ---------- + A : sparse or dense matrix + first matrix of the product + B : sparse or dense matrix + second matrix of the product + format : str, optional (default: 'bsr' or 'coo') + format of the result (e.g. "csr") + If None, choose 'bsr' for relatively dense array and 'coo' for others + + Returns + ------- + kronecker product in a sparse format. + Returns a sparse matrix unless either A or B is a + sparse array in which case returns a sparse array. + + Examples + -------- + >>> import numpy as np + >>> import scipy as sp + >>> A = sp.sparse.csr_array(np.array([[0, 2], [5, 0]])) + >>> B = sp.sparse.csr_array(np.array([[1, 2], [3, 4]])) + >>> sp.sparse.kron(A, B).toarray() + array([[ 0, 0, 2, 4], + [ 0, 0, 6, 8], + [ 5, 10, 0, 0], + [15, 20, 0, 0]]) + + >>> sp.sparse.kron(A, [[1, 2], [3, 4]]).toarray() + array([[ 0, 0, 2, 4], + [ 0, 0, 6, 8], + [ 5, 10, 0, 0], + [15, 20, 0, 0]]) + + """ + # TODO: delete next 10 lines and replace _sparse with _array when spmatrix removed + if isinstance(A, sparray) or isinstance(B, sparray): + # convert to local variables + bsr_sparse = bsr_array + csr_sparse = csr_array + coo_sparse = coo_array + else: # use spmatrix + bsr_sparse = bsr_matrix + csr_sparse = csr_matrix + coo_sparse = coo_matrix + + B = coo_sparse(B) + + # B is fairly dense, use BSR + if (format is None or format == "bsr") and 2*B.nnz >= B.shape[0] * B.shape[1]: + A = csr_sparse(A,copy=True) + output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1]) + + if A.nnz == 0 or B.nnz == 0: + # kronecker product is the zero matrix + return coo_sparse(output_shape).asformat(format) + + B = B.toarray() + data = A.data.repeat(B.size).reshape(-1,B.shape[0],B.shape[1]) + data = data * B + + return bsr_sparse((data,A.indices,A.indptr), shape=output_shape) + else: + # use COO + A = coo_sparse(A) + output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1]) + + if A.nnz == 0 or B.nnz == 0: + # kronecker product is the zero matrix + return coo_sparse(output_shape).asformat(format) + + # expand entries of a into blocks + row = A.row.repeat(B.nnz) + col = A.col.repeat(B.nnz) + data = A.data.repeat(B.nnz) + + if max(A.shape[0]*B.shape[0], A.shape[1]*B.shape[1]) > np.iinfo('int32').max: + row = row.astype(np.int64) + col = col.astype(np.int64) + + row *= B.shape[0] + col *= B.shape[1] + + # increment block indices + row,col = row.reshape(-1,B.nnz),col.reshape(-1,B.nnz) + row += B.row + col += B.col + row,col = row.reshape(-1),col.reshape(-1) + + # compute block entries + data = data.reshape(-1,B.nnz) * B.data + data = data.reshape(-1) + + return coo_sparse((data,(row,col)), shape=output_shape).asformat(format) + + +def kronsum(A, B, format=None): + """kronecker sum of square sparse matrices A and B + + Kronecker sum of two sparse matrices is a sum of two Kronecker + products kron(I_n,A) + kron(B,I_m) where A has shape (m,m) + and B has shape (n,n) and I_m and I_n are identity matrices + of shape (m,m) and (n,n), respectively. + + Parameters + ---------- + A + square matrix + B + square matrix + format : str + format of the result (e.g. "csr") + + Returns + ------- + kronecker sum in a sparse matrix format + + """ + # TODO: delete next 8 lines and replace _sparse with _array when spmatrix removed + if isinstance(A, sparray) or isinstance(B, sparray): + # convert to local variables + coo_sparse = coo_array + identity_sparse = eye_array + else: + coo_sparse = coo_matrix + identity_sparse = identity + + A = coo_sparse(A) + B = coo_sparse(B) + + if A.shape[0] != A.shape[1]: + raise ValueError('A is not square') + + if B.shape[0] != B.shape[1]: + raise ValueError('B is not square') + + dtype = upcast(A.dtype, B.dtype) + + I_n = identity_sparse(A.shape[0], dtype=dtype) + I_m = identity_sparse(B.shape[0], dtype=dtype) + L = kron(I_m, A, format='coo') + R = kron(B, I_n, format='coo') + + return (L + R).asformat(format) + + +def _compressed_sparse_stack(blocks, axis, return_spmatrix): + """ + Stacking fast path for CSR/CSC matrices or arrays + (i) vstack for CSR, (ii) hstack for CSC. + """ + other_axis = 1 if axis == 0 else 0 + data = np.concatenate([b.data for b in blocks]) + constant_dim = blocks[0].shape[other_axis] + idx_dtype = get_index_dtype(arrays=[b.indptr for b in blocks], + maxval=max(data.size, constant_dim)) + indices = np.empty(data.size, dtype=idx_dtype) + indptr = np.empty(sum(b.shape[axis] for b in blocks) + 1, dtype=idx_dtype) + last_indptr = idx_dtype(0) + sum_dim = 0 + sum_indices = 0 + for b in blocks: + if b.shape[other_axis] != constant_dim: + raise ValueError(f'incompatible dimensions for axis {other_axis}') + indices[sum_indices:sum_indices+b.indices.size] = b.indices + sum_indices += b.indices.size + idxs = slice(sum_dim, sum_dim + b.shape[axis]) + indptr[idxs] = b.indptr[:-1] + indptr[idxs] += last_indptr + sum_dim += b.shape[axis] + last_indptr += b.indptr[-1] + indptr[-1] = last_indptr + # TODO remove this if-structure when sparse matrices removed + if return_spmatrix: + if axis == 0: + return csr_matrix((data, indices, indptr), + shape=(sum_dim, constant_dim)) + else: + return csc_matrix((data, indices, indptr), + shape=(constant_dim, sum_dim)) + + if axis == 0: + return csr_array((data, indices, indptr), + shape=(sum_dim, constant_dim)) + else: + return csc_array((data, indices, indptr), + shape=(constant_dim, sum_dim)) + + +def _stack_along_minor_axis(blocks, axis): + """ + Stacking fast path for CSR/CSC matrices along the minor axis + (i) hstack for CSR, (ii) vstack for CSC. + """ + n_blocks = len(blocks) + if n_blocks == 0: + raise ValueError('Missing block matrices') + + if n_blocks == 1: + return blocks[0] + + # check for incompatible dimensions + other_axis = 1 if axis == 0 else 0 + other_axis_dims = {b.shape[other_axis] for b in blocks} + if len(other_axis_dims) > 1: + raise ValueError(f'Mismatching dimensions along axis {other_axis}: ' + f'{other_axis_dims}') + constant_dim, = other_axis_dims + + # Do the stacking + indptr_list = [b.indptr for b in blocks] + data_cat = np.concatenate([b.data for b in blocks]) + + # Need to check if any indices/indptr, would be too large post- + # concatenation for np.int32: + # - The max value of indices is the output array's stacking-axis length - 1 + # - The max value in indptr is the number of non-zero entries. This is + # exceedingly unlikely to require int64, but is checked out of an + # abundance of caution. + sum_dim = sum(b.shape[axis] for b in blocks) + nnz = sum(len(b.indices) for b in blocks) + idx_dtype = get_index_dtype(maxval=max(sum_dim - 1, nnz)) + stack_dim_cat = np.array([b.shape[axis] for b in blocks], dtype=idx_dtype) + if data_cat.size > 0: + indptr_cat = np.concatenate(indptr_list).astype(idx_dtype) + indices_cat = (np.concatenate([b.indices for b in blocks]) + .astype(idx_dtype)) + indptr = np.empty(constant_dim + 1, dtype=idx_dtype) + indices = np.empty_like(indices_cat) + data = np.empty_like(data_cat) + csr_hstack(n_blocks, constant_dim, stack_dim_cat, + indptr_cat, indices_cat, data_cat, + indptr, indices, data) + else: + indptr = np.zeros(constant_dim + 1, dtype=idx_dtype) + indices = np.empty(0, dtype=idx_dtype) + data = np.empty(0, dtype=data_cat.dtype) + + if axis == 0: + return blocks[0]._csc_container((data, indices, indptr), + shape=(sum_dim, constant_dim)) + else: + return blocks[0]._csr_container((data, indices, indptr), + shape=(constant_dim, sum_dim)) + + +def hstack(blocks, format=None, dtype=None): + """ + Stack sparse matrices horizontally (column wise) + + Parameters + ---------- + blocks + sequence of sparse matrices with compatible shapes + format : str + sparse format of the result (e.g., "csr") + by default an appropriate sparse matrix format is returned. + This choice is subject to change. + dtype : dtype, optional + The data-type of the output matrix. If not given, the dtype is + determined from that of `blocks`. + + Returns + ------- + new_array : sparse matrix or array + If any block in blocks is a sparse array, return a sparse array. + Otherwise return a sparse matrix. + + If you want a sparse array built from blocks that are not sparse + arrays, use `block(hstack(blocks))` or convert one block + e.g. `blocks[0] = csr_array(blocks[0])`. + + See Also + -------- + vstack : stack sparse matrices vertically (row wise) + + Examples + -------- + >>> from scipy.sparse import coo_matrix, hstack + >>> A = coo_matrix([[1, 2], [3, 4]]) + >>> B = coo_matrix([[5], [6]]) + >>> hstack([A,B]).toarray() + array([[1, 2, 5], + [3, 4, 6]]) + + """ + blocks = np.asarray(blocks, dtype='object') + if any(isinstance(b, sparray) for b in blocks.flat): + return _block([blocks], format, dtype) + else: + return _block([blocks], format, dtype, return_spmatrix=True) + + +def vstack(blocks, format=None, dtype=None): + """ + Stack sparse arrays vertically (row wise) + + Parameters + ---------- + blocks + sequence of sparse arrays with compatible shapes + format : str, optional + sparse format of the result (e.g., "csr") + by default an appropriate sparse array format is returned. + This choice is subject to change. + dtype : dtype, optional + The data-type of the output array. If not given, the dtype is + determined from that of `blocks`. + + Returns + ------- + new_array : sparse matrix or array + If any block in blocks is a sparse array, return a sparse array. + Otherwise return a sparse matrix. + + If you want a sparse array built from blocks that are not sparse + arrays, use `block(vstack(blocks))` or convert one block + e.g. `blocks[0] = csr_array(blocks[0])`. + + See Also + -------- + hstack : stack sparse matrices horizontally (column wise) + + Examples + -------- + >>> from scipy.sparse import coo_array, vstack + >>> A = coo_array([[1, 2], [3, 4]]) + >>> B = coo_array([[5, 6]]) + >>> vstack([A, B]).toarray() + array([[1, 2], + [3, 4], + [5, 6]]) + + """ + blocks = np.asarray(blocks, dtype='object') + if any(isinstance(b, sparray) for b in blocks.flat): + return _block([[b] for b in blocks], format, dtype) + else: + return _block([[b] for b in blocks], format, dtype, return_spmatrix=True) + + +def bmat(blocks, format=None, dtype=None): + """ + Build a sparse array or matrix from sparse sub-blocks + + Note: `block_array` is preferred over `bmat`. They are the same function + except that `bmat` can return a deprecated sparse matrix. + `bmat` returns a coo_matrix if none of the inputs are a sparse array. + + .. warning:: + + This function returns a sparse matrix -- not a sparse array. + You are encouraged to use ``block_array`` to take advantage + of the sparse array functionality. + + Parameters + ---------- + blocks : array_like + Grid of sparse matrices with compatible shapes. + An entry of None implies an all-zero matrix. + format : {'bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil'}, optional + The sparse format of the result (e.g. "csr"). By default an + appropriate sparse matrix format is returned. + This choice is subject to change. + dtype : dtype, optional + The data-type of the output matrix. If not given, the dtype is + determined from that of `blocks`. + + Returns + ------- + bmat : sparse matrix or array + If any block in blocks is a sparse array, return a sparse array. + Otherwise return a sparse matrix. + + If you want a sparse array built from blocks that are not sparse + arrays, use `block_array()`. + + See Also + -------- + block_array + + Examples + -------- + >>> from scipy.sparse import coo_array, bmat + >>> A = coo_array([[1, 2], [3, 4]]) + >>> B = coo_array([[5], [6]]) + >>> C = coo_array([[7]]) + >>> bmat([[A, B], [None, C]]).toarray() + array([[1, 2, 5], + [3, 4, 6], + [0, 0, 7]]) + + >>> bmat([[A, None], [None, C]]).toarray() + array([[1, 2, 0], + [3, 4, 0], + [0, 0, 7]]) + + """ + blocks = np.asarray(blocks, dtype='object') + if any(isinstance(b, sparray) for b in blocks.flat): + return _block(blocks, format, dtype) + else: + return _block(blocks, format, dtype, return_spmatrix=True) + + +def block_array(blocks, *, format=None, dtype=None): + """ + Build a sparse array from sparse sub-blocks + + Parameters + ---------- + blocks : array_like + Grid of sparse arrays with compatible shapes. + An entry of None implies an all-zero array. + format : {'bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil'}, optional + The sparse format of the result (e.g. "csr"). By default an + appropriate sparse array format is returned. + This choice is subject to change. + dtype : dtype, optional + The data-type of the output array. If not given, the dtype is + determined from that of `blocks`. + + Returns + ------- + block : sparse array + + See Also + -------- + block_diag : specify blocks along the main diagonals + diags : specify (possibly offset) diagonals + + Examples + -------- + >>> from scipy.sparse import coo_array, block_array + >>> A = coo_array([[1, 2], [3, 4]]) + >>> B = coo_array([[5], [6]]) + >>> C = coo_array([[7]]) + >>> block_array([[A, B], [None, C]]).toarray() + array([[1, 2, 5], + [3, 4, 6], + [0, 0, 7]]) + + >>> block_array([[A, None], [None, C]]).toarray() + array([[1, 2, 0], + [3, 4, 0], + [0, 0, 7]]) + + """ + return _block(blocks, format, dtype) + + +def _block(blocks, format, dtype, return_spmatrix=False): + blocks = np.asarray(blocks, dtype='object') + + if blocks.ndim != 2: + raise ValueError('blocks must be 2-D') + + M,N = blocks.shape + + # check for fast path cases + if (format in (None, 'csr') and + all(issparse(b) and b.format == 'csr' for b in blocks.flat) + ): + if N > 1: + # stack along columns (axis 1): must have shape (M, 1) + blocks = [[_stack_along_minor_axis(blocks[b, :], 1)] for b in range(M)] + blocks = np.asarray(blocks, dtype='object') + + # stack along rows (axis 0): + A = _compressed_sparse_stack(blocks[:, 0], 0, return_spmatrix) + if dtype is not None: + A = A.astype(dtype) + return A + elif (format in (None, 'csc') and + all(issparse(b) and b.format == 'csc' for b in blocks.flat) + ): + if M > 1: + # stack along rows (axis 0): must have shape (1, N) + blocks = [[_stack_along_minor_axis(blocks[:, b], 0) for b in range(N)]] + blocks = np.asarray(blocks, dtype='object') + + # stack along columns (axis 1): + A = _compressed_sparse_stack(blocks[0, :], 1, return_spmatrix) + if dtype is not None: + A = A.astype(dtype) + return A + + block_mask = np.zeros(blocks.shape, dtype=bool) + brow_lengths = np.zeros(M, dtype=np.int64) + bcol_lengths = np.zeros(N, dtype=np.int64) + + # convert everything to COO format + for i in range(M): + for j in range(N): + if blocks[i,j] is not None: + A = coo_array(blocks[i,j]) + blocks[i,j] = A + block_mask[i,j] = True + + if brow_lengths[i] == 0: + brow_lengths[i] = A.shape[0] + elif brow_lengths[i] != A.shape[0]: + msg = (f'blocks[{i},:] has incompatible row dimensions. ' + f'Got blocks[{i},{j}].shape[0] == {A.shape[0]}, ' + f'expected {brow_lengths[i]}.') + raise ValueError(msg) + + if bcol_lengths[j] == 0: + bcol_lengths[j] = A.shape[1] + elif bcol_lengths[j] != A.shape[1]: + msg = (f'blocks[:,{j}] has incompatible column ' + f'dimensions. ' + f'Got blocks[{i},{j}].shape[1] == {A.shape[1]}, ' + f'expected {bcol_lengths[j]}.') + raise ValueError(msg) + + nnz = sum(block.nnz for block in blocks[block_mask]) + if dtype is None: + all_dtypes = [blk.dtype for blk in blocks[block_mask]] + dtype = upcast(*all_dtypes) if all_dtypes else None + + row_offsets = np.append(0, np.cumsum(brow_lengths)) + col_offsets = np.append(0, np.cumsum(bcol_lengths)) + + shape = (row_offsets[-1], col_offsets[-1]) + + data = np.empty(nnz, dtype=dtype) + idx_dtype = get_index_dtype(maxval=max(shape)) + row = np.empty(nnz, dtype=idx_dtype) + col = np.empty(nnz, dtype=idx_dtype) + + nnz = 0 + ii, jj = np.nonzero(block_mask) + for i, j in zip(ii, jj): + B = blocks[i, j] + idx = slice(nnz, nnz + B.nnz) + data[idx] = B.data + np.add(B.row, row_offsets[i], out=row[idx], dtype=idx_dtype) + np.add(B.col, col_offsets[j], out=col[idx], dtype=idx_dtype) + nnz += B.nnz + + if return_spmatrix: + return coo_matrix((data, (row, col)), shape=shape).asformat(format) + return coo_array((data, (row, col)), shape=shape).asformat(format) + + +def block_diag(mats, format=None, dtype=None): + """ + Build a block diagonal sparse matrix or array from provided matrices. + + Parameters + ---------- + mats : sequence of matrices or arrays + Input matrices or arrays. + format : str, optional + The sparse format of the result (e.g., "csr"). If not given, the result + is returned in "coo" format. + dtype : dtype specifier, optional + The data-type of the output. If not given, the dtype is + determined from that of `blocks`. + + Returns + ------- + res : sparse matrix or array + If at least one input is a sparse array, the output is a sparse array. + Otherwise the output is a sparse matrix. + + Notes + ----- + + .. versionadded:: 0.11.0 + + See Also + -------- + block_array + diags_array + + Examples + -------- + >>> from scipy.sparse import coo_array, block_diag + >>> A = coo_array([[1, 2], [3, 4]]) + >>> B = coo_array([[5], [6]]) + >>> C = coo_array([[7]]) + >>> block_diag((A, B, C)).toarray() + array([[1, 2, 0, 0], + [3, 4, 0, 0], + [0, 0, 5, 0], + [0, 0, 6, 0], + [0, 0, 0, 7]]) + + """ + if any(isinstance(a, sparray) for a in mats): + container = coo_array + else: + container = coo_matrix + + row = [] + col = [] + data = [] + r_idx = 0 + c_idx = 0 + for a in mats: + if isinstance(a, (list, numbers.Number)): + a = coo_array(np.atleast_2d(a)) + if issparse(a): + a = a.tocoo() + nrows, ncols = a._shape_as_2d + row.append(a.row + r_idx) + col.append(a.col + c_idx) + data.append(a.data) + else: + nrows, ncols = a.shape + a_row, a_col = np.divmod(np.arange(nrows*ncols), ncols) + row.append(a_row + r_idx) + col.append(a_col + c_idx) + data.append(a.ravel()) + r_idx += nrows + c_idx += ncols + row = np.concatenate(row) + col = np.concatenate(col) + data = np.concatenate(data) + return container((data, (row, col)), + shape=(r_idx, c_idx), + dtype=dtype).asformat(format) + + +def random_array(shape, *, density=0.01, format='coo', dtype=None, + random_state=None, data_sampler=None): + """Return a sparse array of uniformly random numbers in [0, 1) + + Returns a sparse array with the given shape and density + where values are generated uniformly randomly in the range [0, 1). + + .. warning:: + + Since numpy 1.17, passing a ``np.random.Generator`` (e.g. + ``np.random.default_rng``) for ``random_state`` will lead to much + faster execution times. + + A much slower implementation is used by default for backwards + compatibility. + + Parameters + ---------- + shape : int or tuple of ints + shape of the array + density : real, optional (default: 0.01) + density of the generated matrix: density equal to one means a full + matrix, density of 0 means a matrix with no non-zero items. + format : str, optional (default: 'coo') + sparse matrix format. + dtype : dtype, optional (default: np.float64) + type of the returned matrix values. + random_state : {None, int, `Generator`, `RandomState`}, optional + A random number generator to determine nonzero structure. We recommend using + a `numpy.random.Generator` manually provided for every call as it is much + faster than RandomState. + + - If `None` (or `np.random`), the `numpy.random.RandomState` + singleton is used. + - If an int, a new ``Generator`` instance is used, + seeded with the int. + - If a ``Generator`` or ``RandomState`` instance then + that instance is used. + + This random state will be used for sampling `indices` (the sparsity + structure), and by default for the data values too (see `data_sampler`). + + data_sampler : callable, optional (default depends on dtype) + Sampler of random data values with keyword arg `size`. + This function should take a single keyword argument `size` specifying + the length of its returned ndarray. It is used to generate the nonzero + values in the matrix after the locations of those values are chosen. + By default, uniform [0, 1) random values are used unless `dtype` is + an integer (default uniform integers from that dtype) or + complex (default uniform over the unit square in the complex plane). + For these, the `random_state` rng is used e.g. `rng.uniform(size=size)`. + + Returns + ------- + res : sparse array + + Examples + -------- + + Passing a ``np.random.Generator`` instance for better performance: + + >>> import numpy as np + >>> import scipy as sp + >>> rng = np.random.default_rng() + + Default sampling uniformly from [0, 1): + + >>> S = sp.sparse.random_array((3, 4), density=0.25, random_state=rng) + + Providing a sampler for the values: + + >>> rvs = sp.stats.poisson(25, loc=10).rvs + >>> S = sp.sparse.random_array((3, 4), density=0.25, + ... random_state=rng, data_sampler=rvs) + >>> S.toarray() + array([[ 36., 0., 33., 0.], # random + [ 0., 0., 0., 0.], + [ 0., 0., 36., 0.]]) + + Building a custom distribution. + This example builds a squared normal from np.random: + + >>> def np_normal_squared(size=None, random_state=rng): + ... return random_state.standard_normal(size) ** 2 + >>> S = sp.sparse.random_array((3, 4), density=0.25, random_state=rng, + ... data_sampler=np_normal_squared) + + Or we can build it from sp.stats style rvs functions: + + >>> def sp_stats_normal_squared(size=None, random_state=rng): + ... std_normal = sp.stats.distributions.norm_gen().rvs + ... return std_normal(size=size, random_state=random_state) ** 2 + >>> S = sp.sparse.random_array((3, 4), density=0.25, random_state=rng, + ... data_sampler=sp_stats_normal_squared) + + Or we can subclass sp.stats rv_continous or rv_discrete: + + >>> class NormalSquared(sp.stats.rv_continuous): + ... def _rvs(self, size=None, random_state=rng): + ... return random_state.standard_normal(size) ** 2 + >>> X = NormalSquared() + >>> Y = X().rvs + >>> S = sp.sparse.random_array((3, 4), density=0.25, + ... random_state=rng, data_sampler=Y) + """ + # Use the more efficient RNG by default. + if random_state is None: + random_state = np.random.default_rng() + data, ind = _random(shape, density, format, dtype, random_state, data_sampler) + return coo_array((data, ind), shape=shape).asformat(format) + + +def _random(shape, density=0.01, format=None, dtype=None, + random_state=None, data_sampler=None): + if density < 0 or density > 1: + raise ValueError("density expected to be 0 <= density <= 1") + + tot_prod = math.prod(shape) # use `math` for when prod is >= 2**64 + + # Number of non zero values + size = int(round(density * tot_prod)) + + rng = check_random_state(random_state) + + if data_sampler is None: + if np.issubdtype(dtype, np.integer): + def data_sampler(size): + return rng_integers(rng, + np.iinfo(dtype).min, + np.iinfo(dtype).max, + size, + dtype=dtype) + elif np.issubdtype(dtype, np.complexfloating): + def data_sampler(size): + return (rng.uniform(size=size) + + rng.uniform(size=size) * 1j) + else: + data_sampler = rng.uniform + + # rng.choice uses int64 if first arg is an int + if tot_prod < np.iinfo(np.int64).max: + raveled_ind = rng.choice(tot_prod, size=size, replace=False) + ind = np.unravel_index(raveled_ind, shape=shape, order='F') + else: + # for ravel indices bigger than dtype max, use sets to remove duplicates + ndim = len(shape) + seen = set() + while len(seen) < size: + dsize = size - len(seen) + seen.update(map(tuple, rng_integers(rng, shape, size=(dsize, ndim)))) + ind = tuple(np.array(list(seen)).T) + + # size kwarg allows eg data_sampler=partial(np.random.poisson, lam=5) + vals = data_sampler(size=size).astype(dtype, copy=False) + return vals, ind + + +def random(m, n, density=0.01, format='coo', dtype=None, + random_state=None, data_rvs=None): + """Generate a sparse matrix of the given shape and density with randomly + distributed values. + + .. warning:: + + Since numpy 1.17, passing a ``np.random.Generator`` (e.g. + ``np.random.default_rng``) for ``random_state`` will lead to much + faster execution times. + + A much slower implementation is used by default for backwards + compatibility. + + .. warning:: + + This function returns a sparse matrix -- not a sparse array. + You are encouraged to use ``random_array`` to take advantage of the + sparse array functionality. + + Parameters + ---------- + m, n : int + shape of the matrix + density : real, optional + density of the generated matrix: density equal to one means a full + matrix, density of 0 means a matrix with no non-zero items. + format : str, optional + sparse matrix format. + dtype : dtype, optional + type of the returned matrix values. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + - If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + - If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + - If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + + This random state will be used for sampling the sparsity structure, but + not necessarily for sampling the values of the structurally nonzero + entries of the matrix. + data_rvs : callable, optional + Samples a requested number of random values. + This function should take a single argument specifying the length + of the ndarray that it will return. The structurally nonzero entries + of the sparse random matrix will be taken from the array sampled + by this function. By default, uniform [0, 1) random values will be + sampled using the same random state as is used for sampling + the sparsity structure. + + Returns + ------- + res : sparse matrix + + See Also + -------- + random_array : constructs sparse arrays instead of sparse matrices + + Examples + -------- + + Passing a ``np.random.Generator`` instance for better performance: + + >>> import scipy as sp + >>> import numpy as np + >>> rng = np.random.default_rng() + >>> S = sp.sparse.random(3, 4, density=0.25, random_state=rng) + + Providing a sampler for the values: + + >>> rvs = sp.stats.poisson(25, loc=10).rvs + >>> S = sp.sparse.random(3, 4, density=0.25, random_state=rng, data_rvs=rvs) + >>> S.toarray() + array([[ 36., 0., 33., 0.], # random + [ 0., 0., 0., 0.], + [ 0., 0., 36., 0.]]) + + Building a custom distribution. + This example builds a squared normal from np.random: + + >>> def np_normal_squared(size=None, random_state=rng): + ... return random_state.standard_normal(size) ** 2 + >>> S = sp.sparse.random(3, 4, density=0.25, random_state=rng, + ... data_rvs=np_normal_squared) + + Or we can build it from sp.stats style rvs functions: + + >>> def sp_stats_normal_squared(size=None, random_state=rng): + ... std_normal = sp.stats.distributions.norm_gen().rvs + ... return std_normal(size=size, random_state=random_state) ** 2 + >>> S = sp.sparse.random(3, 4, density=0.25, random_state=rng, + ... data_rvs=sp_stats_normal_squared) + + Or we can subclass sp.stats rv_continous or rv_discrete: + + >>> class NormalSquared(sp.stats.rv_continuous): + ... def _rvs(self, size=None, random_state=rng): + ... return random_state.standard_normal(size) ** 2 + >>> X = NormalSquared() + >>> Y = X() # get a frozen version of the distribution + >>> S = sp.sparse.random(3, 4, density=0.25, random_state=rng, data_rvs=Y.rvs) + """ + if n is None: + n = m + m, n = int(m), int(n) + # make keyword syntax work for data_rvs e.g. data_rvs(size=7) + if data_rvs is not None: + def data_rvs_kw(size): + return data_rvs(size) + else: + data_rvs_kw = None + vals, ind = _random((m, n), density, format, dtype, random_state, data_rvs_kw) + return coo_matrix((vals, ind), shape=(m, n)).asformat(format) + + +def rand(m, n, density=0.01, format="coo", dtype=None, random_state=None): + """Generate a sparse matrix of the given shape and density with uniformly + distributed values. + + .. warning:: + + This function returns a sparse matrix -- not a sparse array. + You are encouraged to use ``random_array`` to take advantage + of the sparse array functionality. + + Parameters + ---------- + m, n : int + shape of the matrix + density : real, optional + density of the generated matrix: density equal to one means a full + matrix, density of 0 means a matrix with no non-zero items. + format : str, optional + sparse matrix format. + dtype : dtype, optional + type of the returned matrix values. + random_state : {None, int, `numpy.random.Generator`, + `numpy.random.RandomState`}, optional + + If `seed` is None (or `np.random`), the `numpy.random.RandomState` + singleton is used. + If `seed` is an int, a new ``RandomState`` instance is used, + seeded with `seed`. + If `seed` is already a ``Generator`` or ``RandomState`` instance then + that instance is used. + + Returns + ------- + res : sparse matrix + + Notes + ----- + Only float types are supported for now. + + See Also + -------- + random : Similar function allowing a custom random data sampler + random_array : Similar to random() but returns a sparse array + + Examples + -------- + >>> from scipy.sparse import rand + >>> matrix = rand(3, 4, density=0.25, format="csr", random_state=42) + >>> matrix + <3x4 sparse matrix of type '' + with 3 stored elements in Compressed Sparse Row format> + >>> matrix.toarray() + array([[0.05641158, 0. , 0. , 0.65088847], # random + [0. , 0. , 0. , 0.14286682], + [0. , 0. , 0. , 0. ]]) + + """ + return random(m, n, density, format, dtype, random_state) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_csparsetools.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_csparsetools.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..fa6683236a9a5038d7ccb466199811a2e8e0c92c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_csparsetools.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_csr.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_csr.py new file mode 100644 index 0000000000000000000000000000000000000000..37c6ffacd8145a42ee74b5d71c1c736b7bb508e0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_csr.py @@ -0,0 +1,491 @@ +"""Compressed Sparse Row matrix format""" + +__docformat__ = "restructuredtext en" + +__all__ = ['csr_array', 'csr_matrix', 'isspmatrix_csr'] + +import numpy as np + +from ._matrix import spmatrix +from ._base import _spbase, sparray +from ._sparsetools import (csr_tocsc, csr_tobsr, csr_count_blocks, + get_csr_submatrix) +from ._sputils import upcast + +from ._compressed import _cs_matrix + + +class _csr_base(_cs_matrix): + _format = 'csr' + + def transpose(self, axes=None, copy=False): + if axes is not None and axes != (1, 0): + raise ValueError("Sparse arrays/matrices do not support " + "an 'axes' parameter because swapping " + "dimensions is the only logical permutation.") + + M, N = self.shape + return self._csc_container((self.data, self.indices, + self.indptr), shape=(N, M), copy=copy) + + transpose.__doc__ = _spbase.transpose.__doc__ + + def tolil(self, copy=False): + lil = self._lil_container(self.shape, dtype=self.dtype) + + self.sum_duplicates() + ptr,ind,dat = self.indptr,self.indices,self.data + rows, data = lil.rows, lil.data + + for n in range(self.shape[0]): + start = ptr[n] + end = ptr[n+1] + rows[n] = ind[start:end].tolist() + data[n] = dat[start:end].tolist() + + return lil + + tolil.__doc__ = _spbase.tolil.__doc__ + + def tocsr(self, copy=False): + if copy: + return self.copy() + else: + return self + + tocsr.__doc__ = _spbase.tocsr.__doc__ + + def tocsc(self, copy=False): + idx_dtype = self._get_index_dtype((self.indptr, self.indices), + maxval=max(self.nnz, self.shape[0])) + indptr = np.empty(self.shape[1] + 1, dtype=idx_dtype) + indices = np.empty(self.nnz, dtype=idx_dtype) + data = np.empty(self.nnz, dtype=upcast(self.dtype)) + + csr_tocsc(self.shape[0], self.shape[1], + self.indptr.astype(idx_dtype), + self.indices.astype(idx_dtype), + self.data, + indptr, + indices, + data) + + A = self._csc_container((data, indices, indptr), shape=self.shape) + A.has_sorted_indices = True + return A + + tocsc.__doc__ = _spbase.tocsc.__doc__ + + def tobsr(self, blocksize=None, copy=True): + if blocksize is None: + from ._spfuncs import estimate_blocksize + return self.tobsr(blocksize=estimate_blocksize(self)) + + elif blocksize == (1,1): + arg1 = (self.data.reshape(-1,1,1),self.indices,self.indptr) + return self._bsr_container(arg1, shape=self.shape, copy=copy) + + else: + R,C = blocksize + M,N = self.shape + + if R < 1 or C < 1 or M % R != 0 or N % C != 0: + raise ValueError('invalid blocksize %s' % blocksize) + + blks = csr_count_blocks(M,N,R,C,self.indptr,self.indices) + + idx_dtype = self._get_index_dtype((self.indptr, self.indices), + maxval=max(N//C, blks)) + indptr = np.empty(M//R+1, dtype=idx_dtype) + indices = np.empty(blks, dtype=idx_dtype) + data = np.zeros((blks,R,C), dtype=self.dtype) + + csr_tobsr(M, N, R, C, + self.indptr.astype(idx_dtype), + self.indices.astype(idx_dtype), + self.data, + indptr, indices, data.ravel()) + + return self._bsr_container( + (data, indices, indptr), shape=self.shape + ) + + tobsr.__doc__ = _spbase.tobsr.__doc__ + + # these functions are used by the parent class (_cs_matrix) + # to remove redundancy between csc_matrix and csr_array + @staticmethod + def _swap(x): + """swap the members of x if this is a column-oriented matrix + """ + return x + + def __iter__(self): + indptr = np.zeros(2, dtype=self.indptr.dtype) + shape = (1, self.shape[1]) + i0 = 0 + for i1 in self.indptr[1:]: + indptr[1] = i1 - i0 + indices = self.indices[i0:i1] + data = self.data[i0:i1] + yield self.__class__( + (data, indices, indptr), shape=shape, copy=True + ) + i0 = i1 + + def _getrow(self, i): + """Returns a copy of row i of the matrix, as a (1 x n) + CSR matrix (row vector). + """ + M, N = self.shape + i = int(i) + if i < 0: + i += M + if i < 0 or i >= M: + raise IndexError('index (%d) out of range' % i) + indptr, indices, data = get_csr_submatrix( + M, N, self.indptr, self.indices, self.data, i, i + 1, 0, N) + return self.__class__((data, indices, indptr), shape=(1, N), + dtype=self.dtype, copy=False) + + def _getcol(self, i): + """Returns a copy of column i of the matrix, as a (m x 1) + CSR matrix (column vector). + """ + M, N = self.shape + i = int(i) + if i < 0: + i += N + if i < 0 or i >= N: + raise IndexError('index (%d) out of range' % i) + indptr, indices, data = get_csr_submatrix( + M, N, self.indptr, self.indices, self.data, 0, M, i, i + 1) + return self.__class__((data, indices, indptr), shape=(M, 1), + dtype=self.dtype, copy=False) + + def _get_intXarray(self, row, col): + return self._getrow(row)._minor_index_fancy(col) + + def _get_intXslice(self, row, col): + if col.step in (1, None): + return self._get_submatrix(row, col, copy=True) + # TODO: uncomment this once it's faster: + # return self._getrow(row)._minor_slice(col) + + M, N = self.shape + start, stop, stride = col.indices(N) + + ii, jj = self.indptr[row:row+2] + row_indices = self.indices[ii:jj] + row_data = self.data[ii:jj] + + if stride > 0: + ind = (row_indices >= start) & (row_indices < stop) + else: + ind = (row_indices <= start) & (row_indices > stop) + + if abs(stride) > 1: + ind &= (row_indices - start) % stride == 0 + + row_indices = (row_indices[ind] - start) // stride + row_data = row_data[ind] + row_indptr = np.array([0, len(row_indices)]) + + if stride < 0: + row_data = row_data[::-1] + row_indices = abs(row_indices[::-1]) + + shape = (1, max(0, int(np.ceil(float(stop - start) / stride)))) + return self.__class__((row_data, row_indices, row_indptr), shape=shape, + dtype=self.dtype, copy=False) + + def _get_sliceXint(self, row, col): + if row.step in (1, None): + return self._get_submatrix(row, col, copy=True) + return self._major_slice(row)._get_submatrix(minor=col) + + def _get_sliceXarray(self, row, col): + return self._major_slice(row)._minor_index_fancy(col) + + def _get_arrayXint(self, row, col): + return self._major_index_fancy(row)._get_submatrix(minor=col) + + def _get_arrayXslice(self, row, col): + if col.step not in (1, None): + col = np.arange(*col.indices(self.shape[1])) + return self._get_arrayXarray(row, col) + return self._major_index_fancy(row)._get_submatrix(minor=col) + + +def isspmatrix_csr(x): + """Is `x` of csr_matrix type? + + Parameters + ---------- + x + object to check for being a csr matrix + + Returns + ------- + bool + True if `x` is a csr matrix, False otherwise + + Examples + -------- + >>> from scipy.sparse import csr_array, csr_matrix, coo_matrix, isspmatrix_csr + >>> isspmatrix_csr(csr_matrix([[5]])) + True + >>> isspmatrix_csr(csr_array([[5]])) + False + >>> isspmatrix_csr(coo_matrix([[5]])) + False + """ + return isinstance(x, csr_matrix) + + +# This namespace class separates array from matrix with isinstance +class csr_array(_csr_base, sparray): + """ + Compressed Sparse Row array. + + This can be instantiated in several ways: + csr_array(D) + where D is a 2-D ndarray + + csr_array(S) + with another sparse array or matrix S (equivalent to S.tocsr()) + + csr_array((M, N), [dtype]) + to construct an empty array with shape (M, N) + dtype is optional, defaulting to dtype='d'. + + csr_array((data, (row_ind, col_ind)), [shape=(M, N)]) + where ``data``, ``row_ind`` and ``col_ind`` satisfy the + relationship ``a[row_ind[k], col_ind[k]] = data[k]``. + + csr_array((data, indices, indptr), [shape=(M, N)]) + is the standard CSR representation where the column indices for + row i are stored in ``indices[indptr[i]:indptr[i+1]]`` and their + corresponding values are stored in ``data[indptr[i]:indptr[i+1]]``. + If the shape parameter is not supplied, the array dimensions + are inferred from the index arrays. + + Attributes + ---------- + dtype : dtype + Data type of the array + shape : 2-tuple + Shape of the array + ndim : int + Number of dimensions (this is always 2) + nnz + size + data + CSR format data array of the array + indices + CSR format index array of the array + indptr + CSR format index pointer array of the array + has_sorted_indices + has_canonical_format + T + + Notes + ----- + + Sparse arrays can be used in arithmetic operations: they support + addition, subtraction, multiplication, division, and matrix power. + + Advantages of the CSR format + - efficient arithmetic operations CSR + CSR, CSR * CSR, etc. + - efficient row slicing + - fast matrix vector products + + Disadvantages of the CSR format + - slow column slicing operations (consider CSC) + - changes to the sparsity structure are expensive (consider LIL or DOK) + + Canonical Format + - Within each row, indices are sorted by column. + - There are no duplicate entries. + + Examples + -------- + + >>> import numpy as np + >>> from scipy.sparse import csr_array + >>> csr_array((3, 4), dtype=np.int8).toarray() + array([[0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]], dtype=int8) + + >>> row = np.array([0, 0, 1, 2, 2, 2]) + >>> col = np.array([0, 2, 2, 0, 1, 2]) + >>> data = np.array([1, 2, 3, 4, 5, 6]) + >>> csr_array((data, (row, col)), shape=(3, 3)).toarray() + array([[1, 0, 2], + [0, 0, 3], + [4, 5, 6]]) + + >>> indptr = np.array([0, 2, 3, 6]) + >>> indices = np.array([0, 2, 2, 0, 1, 2]) + >>> data = np.array([1, 2, 3, 4, 5, 6]) + >>> csr_array((data, indices, indptr), shape=(3, 3)).toarray() + array([[1, 0, 2], + [0, 0, 3], + [4, 5, 6]]) + + Duplicate entries are summed together: + + >>> row = np.array([0, 1, 2, 0]) + >>> col = np.array([0, 1, 1, 0]) + >>> data = np.array([1, 2, 4, 8]) + >>> csr_array((data, (row, col)), shape=(3, 3)).toarray() + array([[9, 0, 0], + [0, 2, 0], + [0, 4, 0]]) + + As an example of how to construct a CSR array incrementally, + the following snippet builds a term-document array from texts: + + >>> docs = [["hello", "world", "hello"], ["goodbye", "cruel", "world"]] + >>> indptr = [0] + >>> indices = [] + >>> data = [] + >>> vocabulary = {} + >>> for d in docs: + ... for term in d: + ... index = vocabulary.setdefault(term, len(vocabulary)) + ... indices.append(index) + ... data.append(1) + ... indptr.append(len(indices)) + ... + >>> csr_array((data, indices, indptr), dtype=int).toarray() + array([[2, 1, 0, 0], + [0, 1, 1, 1]]) + + """ + + +class csr_matrix(spmatrix, _csr_base): + """ + Compressed Sparse Row matrix. + + This can be instantiated in several ways: + csr_matrix(D) + where D is a 2-D ndarray + + csr_matrix(S) + with another sparse array or matrix S (equivalent to S.tocsr()) + + csr_matrix((M, N), [dtype]) + to construct an empty matrix with shape (M, N) + dtype is optional, defaulting to dtype='d'. + + csr_matrix((data, (row_ind, col_ind)), [shape=(M, N)]) + where ``data``, ``row_ind`` and ``col_ind`` satisfy the + relationship ``a[row_ind[k], col_ind[k]] = data[k]``. + + csr_matrix((data, indices, indptr), [shape=(M, N)]) + is the standard CSR representation where the column indices for + row i are stored in ``indices[indptr[i]:indptr[i+1]]`` and their + corresponding values are stored in ``data[indptr[i]:indptr[i+1]]``. + If the shape parameter is not supplied, the matrix dimensions + are inferred from the index arrays. + + Attributes + ---------- + dtype : dtype + Data type of the matrix + shape : 2-tuple + Shape of the matrix + ndim : int + Number of dimensions (this is always 2) + nnz + size + data + CSR format data array of the matrix + indices + CSR format index array of the matrix + indptr + CSR format index pointer array of the matrix + has_sorted_indices + has_canonical_format + T + + Notes + ----- + + Sparse matrices can be used in arithmetic operations: they support + addition, subtraction, multiplication, division, and matrix power. + + Advantages of the CSR format + - efficient arithmetic operations CSR + CSR, CSR * CSR, etc. + - efficient row slicing + - fast matrix vector products + + Disadvantages of the CSR format + - slow column slicing operations (consider CSC) + - changes to the sparsity structure are expensive (consider LIL or DOK) + + Canonical Format + - Within each row, indices are sorted by column. + - There are no duplicate entries. + + Examples + -------- + + >>> import numpy as np + >>> from scipy.sparse import csr_matrix + >>> csr_matrix((3, 4), dtype=np.int8).toarray() + array([[0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]], dtype=int8) + + >>> row = np.array([0, 0, 1, 2, 2, 2]) + >>> col = np.array([0, 2, 2, 0, 1, 2]) + >>> data = np.array([1, 2, 3, 4, 5, 6]) + >>> csr_matrix((data, (row, col)), shape=(3, 3)).toarray() + array([[1, 0, 2], + [0, 0, 3], + [4, 5, 6]]) + + >>> indptr = np.array([0, 2, 3, 6]) + >>> indices = np.array([0, 2, 2, 0, 1, 2]) + >>> data = np.array([1, 2, 3, 4, 5, 6]) + >>> csr_matrix((data, indices, indptr), shape=(3, 3)).toarray() + array([[1, 0, 2], + [0, 0, 3], + [4, 5, 6]]) + + Duplicate entries are summed together: + + >>> row = np.array([0, 1, 2, 0]) + >>> col = np.array([0, 1, 1, 0]) + >>> data = np.array([1, 2, 4, 8]) + >>> csr_matrix((data, (row, col)), shape=(3, 3)).toarray() + array([[9, 0, 0], + [0, 2, 0], + [0, 4, 0]]) + + As an example of how to construct a CSR matrix incrementally, + the following snippet builds a term-document matrix from texts: + + >>> docs = [["hello", "world", "hello"], ["goodbye", "cruel", "world"]] + >>> indptr = [0] + >>> indices = [] + >>> data = [] + >>> vocabulary = {} + >>> for d in docs: + ... for term in d: + ... index = vocabulary.setdefault(term, len(vocabulary)) + ... indices.append(index) + ... data.append(1) + ... indptr.append(len(indices)) + ... + >>> csr_matrix((data, indices, indptr), dtype=int).toarray() + array([[2, 1, 0, 0], + [0, 1, 1, 1]]) + + """ + diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_data.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_data.py new file mode 100644 index 0000000000000000000000000000000000000000..408661ccb4a37aaecbdfc6ae6557ebff468000ed --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_data.py @@ -0,0 +1,506 @@ +"""Base class for sparse matrice with a .data attribute + + subclasses must provide a _with_data() method that + creates a new matrix with the same sparsity pattern + as self but with a different data array + +""" + +import numpy as np + +from ._base import _spbase, _ufuncs_with_fixed_point_at_zero +from ._sputils import isscalarlike, validateaxis + +__all__ = [] + + +# TODO implement all relevant operations +# use .data.__methods__() instead of /=, *=, etc. +class _data_matrix(_spbase): + def __init__(self): + _spbase.__init__(self) + + @property + def dtype(self): + return self.data.dtype + + @dtype.setter + def dtype(self, newtype): + self.data.dtype = newtype + + def _deduped_data(self): + if hasattr(self, 'sum_duplicates'): + self.sum_duplicates() + return self.data + + def __abs__(self): + return self._with_data(abs(self._deduped_data())) + + def __round__(self, ndigits=0): + return self._with_data(np.around(self._deduped_data(), decimals=ndigits)) + + def _real(self): + return self._with_data(self.data.real) + + def _imag(self): + return self._with_data(self.data.imag) + + def __neg__(self): + if self.dtype.kind == 'b': + raise NotImplementedError('negating a boolean sparse array is not ' + 'supported') + return self._with_data(-self.data) + + def __imul__(self, other): # self *= other + if isscalarlike(other): + self.data *= other + return self + else: + return NotImplemented + + def __itruediv__(self, other): # self /= other + if isscalarlike(other): + recip = 1.0 / other + self.data *= recip + return self + else: + return NotImplemented + + def astype(self, dtype, casting='unsafe', copy=True): + dtype = np.dtype(dtype) + if self.dtype != dtype: + matrix = self._with_data( + self.data.astype(dtype, casting=casting, copy=True), + copy=True + ) + return matrix._with_data(matrix._deduped_data(), copy=False) + elif copy: + return self.copy() + else: + return self + + astype.__doc__ = _spbase.astype.__doc__ + + def conjugate(self, copy=True): + if np.issubdtype(self.dtype, np.complexfloating): + return self._with_data(self.data.conjugate(), copy=copy) + elif copy: + return self.copy() + else: + return self + + conjugate.__doc__ = _spbase.conjugate.__doc__ + + def copy(self): + return self._with_data(self.data.copy(), copy=True) + + copy.__doc__ = _spbase.copy.__doc__ + + def count_nonzero(self): + return np.count_nonzero(self._deduped_data()) + + count_nonzero.__doc__ = _spbase.count_nonzero.__doc__ + + def power(self, n, dtype=None): + """ + This function performs element-wise power. + + Parameters + ---------- + n : scalar + n is a non-zero scalar (nonzero avoids dense ones creation) + If zero power is desired, special case it to use `np.ones` + + dtype : If dtype is not specified, the current dtype will be preserved. + + Raises + ------ + NotImplementedError : if n is a zero scalar + If zero power is desired, special case it to use + `np.ones(A.shape, dtype=A.dtype)` + """ + if not isscalarlike(n): + raise NotImplementedError("input is not scalar") + if not n: + raise NotImplementedError( + "zero power is not supported as it would densify the matrix.\n" + "Use `np.ones(A.shape, dtype=A.dtype)` for this case." + ) + + data = self._deduped_data() + if dtype is not None: + data = data.astype(dtype) + return self._with_data(data ** n) + + ########################### + # Multiplication handlers # + ########################### + + def _mul_scalar(self, other): + return self._with_data(self.data * other) + + +# Add the numpy unary ufuncs for which func(0) = 0 to _data_matrix. +for npfunc in _ufuncs_with_fixed_point_at_zero: + name = npfunc.__name__ + + def _create_method(op): + def method(self): + result = op(self._deduped_data()) + return self._with_data(result, copy=True) + + method.__doc__ = (f"Element-wise {name}.\n\n" + f"See `numpy.{name}` for more information.") + method.__name__ = name + + return method + + setattr(_data_matrix, name, _create_method(npfunc)) + + +def _find_missing_index(ind, n): + for k, a in enumerate(ind): + if k != a: + return k + + k += 1 + if k < n: + return k + else: + return -1 + + +class _minmax_mixin: + """Mixin for min and max methods. + + These are not implemented for dia_matrix, hence the separate class. + """ + + def _min_or_max_axis(self, axis, min_or_max): + N = self.shape[axis] + if N == 0: + raise ValueError("zero-size array to reduction operation") + M = self.shape[1 - axis] + idx_dtype = self._get_index_dtype(maxval=M) + + mat = self.tocsc() if axis == 0 else self.tocsr() + mat.sum_duplicates() + + major_index, value = mat._minor_reduce(min_or_max) + not_full = np.diff(mat.indptr)[major_index] < N + value[not_full] = min_or_max(value[not_full], 0) + + mask = value != 0 + major_index = np.compress(mask, major_index) + value = np.compress(mask, value) + + if axis == 0: + return self._coo_container( + (value, (np.zeros(len(value), dtype=idx_dtype), major_index)), + dtype=self.dtype, shape=(1, M) + ) + else: + return self._coo_container( + (value, (major_index, np.zeros(len(value), dtype=idx_dtype))), + dtype=self.dtype, shape=(M, 1) + ) + + def _min_or_max(self, axis, out, min_or_max): + if out is not None: + raise ValueError("Sparse arrays do not support an 'out' parameter.") + + validateaxis(axis) + if self.ndim == 1: + if axis not in (None, 0, -1): + raise ValueError("axis out of range") + axis = None # avoid calling special axis case. no impact on 1d + + if axis is None: + if 0 in self.shape: + raise ValueError("zero-size array to reduction operation") + + zero = self.dtype.type(0) + if self.nnz == 0: + return zero + m = min_or_max.reduce(self._deduped_data().ravel()) + if self.nnz != np.prod(self.shape): + m = min_or_max(zero, m) + return m + + if axis < 0: + axis += 2 + + if (axis == 0) or (axis == 1): + return self._min_or_max_axis(axis, min_or_max) + else: + raise ValueError("axis out of range") + + def _arg_min_or_max_axis(self, axis, argmin_or_argmax, compare): + if self.shape[axis] == 0: + raise ValueError("Cannot apply the operation along a zero-sized dimension.") + + if axis < 0: + axis += 2 + + zero = self.dtype.type(0) + + mat = self.tocsc() if axis == 0 else self.tocsr() + mat.sum_duplicates() + + ret_size, line_size = mat._swap(mat.shape) + ret = np.zeros(ret_size, dtype=int) + + nz_lines, = np.nonzero(np.diff(mat.indptr)) + for i in nz_lines: + p, q = mat.indptr[i:i + 2] + data = mat.data[p:q] + indices = mat.indices[p:q] + extreme_index = argmin_or_argmax(data) + extreme_value = data[extreme_index] + if compare(extreme_value, zero) or q - p == line_size: + ret[i] = indices[extreme_index] + else: + zero_ind = _find_missing_index(indices, line_size) + if extreme_value == zero: + ret[i] = min(extreme_index, zero_ind) + else: + ret[i] = zero_ind + + if axis == 1: + ret = ret.reshape(-1, 1) + + return self._ascontainer(ret) + + def _arg_min_or_max(self, axis, out, argmin_or_argmax, compare): + if out is not None: + raise ValueError("Sparse types do not support an 'out' parameter.") + + validateaxis(axis) + + if self.ndim == 1: + if axis not in (None, 0, -1): + raise ValueError("axis out of range") + axis = None # avoid calling special axis case. no impact on 1d + + if axis is not None: + return self._arg_min_or_max_axis(axis, argmin_or_argmax, compare) + + if 0 in self.shape: + raise ValueError("Cannot apply the operation to an empty matrix.") + + if self.nnz == 0: + return 0 + + zero = self.dtype.type(0) + mat = self.tocoo() + # Convert to canonical form: no duplicates, sorted indices. + mat.sum_duplicates() + extreme_index = argmin_or_argmax(mat.data) + extreme_value = mat.data[extreme_index] + num_col = mat.shape[-1] + + # If the min value is less than zero, or max is greater than zero, + # then we do not need to worry about implicit zeros. + if compare(extreme_value, zero): + # cast to Python int to avoid overflow and RuntimeError + return int(mat.row[extreme_index]) * num_col + int(mat.col[extreme_index]) + + # Cheap test for the rare case where we have no implicit zeros. + size = np.prod(self.shape) + if size == mat.nnz: + return int(mat.row[extreme_index]) * num_col + int(mat.col[extreme_index]) + + # At this stage, any implicit zero could be the min or max value. + # After sum_duplicates(), the `row` and `col` arrays are guaranteed to + # be sorted in C-order, which means the linearized indices are sorted. + linear_indices = mat.row * num_col + mat.col + first_implicit_zero_index = _find_missing_index(linear_indices, size) + if extreme_value == zero: + return min(first_implicit_zero_index, extreme_index) + return first_implicit_zero_index + + def max(self, axis=None, out=None): + """ + Return the maximum of the array/matrix or maximum along an axis. + This takes all elements into account, not just the non-zero ones. + + Parameters + ---------- + axis : {-2, -1, 0, 1, None} optional + Axis along which the sum is computed. The default is to + compute the maximum over all elements, returning + a scalar (i.e., `axis` = `None`). + + out : None, optional + This argument is in the signature *solely* for NumPy + compatibility reasons. Do not pass in anything except + for the default value, as this argument is not used. + + Returns + ------- + amax : coo_matrix or scalar + Maximum of `a`. If `axis` is None, the result is a scalar value. + If `axis` is given, the result is a sparse.coo_matrix of dimension + ``a.ndim - 1``. + + See Also + -------- + min : The minimum value of a sparse array/matrix along a given axis. + numpy.matrix.max : NumPy's implementation of 'max' for matrices + + """ + return self._min_or_max(axis, out, np.maximum) + + def min(self, axis=None, out=None): + """ + Return the minimum of the array/matrix or maximum along an axis. + This takes all elements into account, not just the non-zero ones. + + Parameters + ---------- + axis : {-2, -1, 0, 1, None} optional + Axis along which the sum is computed. The default is to + compute the minimum over all elements, returning + a scalar (i.e., `axis` = `None`). + + out : None, optional + This argument is in the signature *solely* for NumPy + compatibility reasons. Do not pass in anything except for + the default value, as this argument is not used. + + Returns + ------- + amin : coo_matrix or scalar + Minimum of `a`. If `axis` is None, the result is a scalar value. + If `axis` is given, the result is a sparse.coo_matrix of dimension + ``a.ndim - 1``. + + See Also + -------- + max : The maximum value of a sparse array/matrix along a given axis. + numpy.matrix.min : NumPy's implementation of 'min' for matrices + + """ + return self._min_or_max(axis, out, np.minimum) + + def nanmax(self, axis=None, out=None): + """ + Return the maximum of the array/matrix or maximum along an axis, ignoring any + NaNs. This takes all elements into account, not just the non-zero + ones. + + .. versionadded:: 1.11.0 + + Parameters + ---------- + axis : {-2, -1, 0, 1, None} optional + Axis along which the maximum is computed. The default is to + compute the maximum over all elements, returning + a scalar (i.e., `axis` = `None`). + + out : None, optional + This argument is in the signature *solely* for NumPy + compatibility reasons. Do not pass in anything except + for the default value, as this argument is not used. + + Returns + ------- + amax : coo_matrix or scalar + Maximum of `a`. If `axis` is None, the result is a scalar value. + If `axis` is given, the result is a sparse.coo_matrix of dimension + ``a.ndim - 1``. + + See Also + -------- + nanmin : The minimum value of a sparse array/matrix along a given axis, + ignoring NaNs. + max : The maximum value of a sparse array/matrix along a given axis, + propagating NaNs. + numpy.nanmax : NumPy's implementation of 'nanmax'. + + """ + return self._min_or_max(axis, out, np.fmax) + + def nanmin(self, axis=None, out=None): + """ + Return the minimum of the array/matrix or minimum along an axis, ignoring any + NaNs. This takes all elements into account, not just the non-zero + ones. + + .. versionadded:: 1.11.0 + + Parameters + ---------- + axis : {-2, -1, 0, 1, None} optional + Axis along which the minimum is computed. The default is to + compute the minimum over all elements, returning + a scalar (i.e., `axis` = `None`). + + out : None, optional + This argument is in the signature *solely* for NumPy + compatibility reasons. Do not pass in anything except for + the default value, as this argument is not used. + + Returns + ------- + amin : coo_matrix or scalar + Minimum of `a`. If `axis` is None, the result is a scalar value. + If `axis` is given, the result is a sparse.coo_matrix of dimension + ``a.ndim - 1``. + + See Also + -------- + nanmax : The maximum value of a sparse array/matrix along a given axis, + ignoring NaNs. + min : The minimum value of a sparse array/matrix along a given axis, + propagating NaNs. + numpy.nanmin : NumPy's implementation of 'nanmin'. + + """ + return self._min_or_max(axis, out, np.fmin) + + def argmax(self, axis=None, out=None): + """Return indices of maximum elements along an axis. + + Implicit zero elements are also taken into account. If there are + several maximum values, the index of the first occurrence is returned. + + Parameters + ---------- + axis : {-2, -1, 0, 1, None}, optional + Axis along which the argmax is computed. If None (default), index + of the maximum element in the flatten data is returned. + out : None, optional + This argument is in the signature *solely* for NumPy + compatibility reasons. Do not pass in anything except for + the default value, as this argument is not used. + + Returns + ------- + ind : numpy.matrix or int + Indices of maximum elements. If matrix, its size along `axis` is 1. + """ + return self._arg_min_or_max(axis, out, np.argmax, np.greater) + + def argmin(self, axis=None, out=None): + """Return indices of minimum elements along an axis. + + Implicit zero elements are also taken into account. If there are + several minimum values, the index of the first occurrence is returned. + + Parameters + ---------- + axis : {-2, -1, 0, 1, None}, optional + Axis along which the argmin is computed. If None (default), index + of the minimum element in the flatten data is returned. + out : None, optional + This argument is in the signature *solely* for NumPy + compatibility reasons. Do not pass in anything except for + the default value, as this argument is not used. + + Returns + ------- + ind : numpy.matrix or int + Indices of minimum elements. If matrix, its size along `axis` is 1. + """ + return self._arg_min_or_max(axis, out, np.argmin, np.less) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_dia.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_dia.py new file mode 100644 index 0000000000000000000000000000000000000000..26512832b81d525b9fa1e7cee08c99195517098e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_dia.py @@ -0,0 +1,563 @@ +"""Sparse DIAgonal format""" + +__docformat__ = "restructuredtext en" + +__all__ = ['dia_array', 'dia_matrix', 'isspmatrix_dia'] + +import numpy as np + +from .._lib._util import copy_if_needed +from ._matrix import spmatrix +from ._base import issparse, _formats, _spbase, sparray +from ._data import _data_matrix +from ._sputils import ( + isshape, upcast_char, getdtype, get_sum_dtype, validateaxis, check_shape +) +from ._sparsetools import dia_matvec + + +class _dia_base(_data_matrix): + _format = 'dia' + + def __init__(self, arg1, shape=None, dtype=None, copy=False): + _data_matrix.__init__(self) + + if issparse(arg1): + if arg1.format == "dia": + if copy: + arg1 = arg1.copy() + self.data = arg1.data + self.offsets = arg1.offsets + self._shape = check_shape(arg1.shape) + else: + if arg1.format == self.format and copy: + A = arg1.copy() + else: + A = arg1.todia() + self.data = A.data + self.offsets = A.offsets + self._shape = check_shape(A.shape) + elif isinstance(arg1, tuple): + if isshape(arg1): + # It's a tuple of matrix dimensions (M, N) + # create empty matrix + self._shape = check_shape(arg1) + self.data = np.zeros((0,0), getdtype(dtype, default=float)) + idx_dtype = self._get_index_dtype(maxval=max(self.shape)) + self.offsets = np.zeros((0), dtype=idx_dtype) + else: + try: + # Try interpreting it as (data, offsets) + data, offsets = arg1 + except Exception as e: + message = 'unrecognized form for dia_array constructor' + raise ValueError(message) from e + else: + if shape is None: + raise ValueError('expected a shape argument') + if not copy: + copy = copy_if_needed + self.data = np.atleast_2d(np.array(arg1[0], dtype=dtype, copy=copy)) + offsets = np.array(arg1[1], + dtype=self._get_index_dtype(maxval=max(shape)), + copy=copy) + self.offsets = np.atleast_1d(offsets) + self._shape = check_shape(shape) + else: + #must be dense, convert to COO first, then to DIA + try: + arg1 = np.asarray(arg1) + except Exception as e: + raise ValueError("unrecognized form for" + " %s_matrix constructor" % self.format) from e + A = self._coo_container(arg1, dtype=dtype, shape=shape).todia() + self.data = A.data + self.offsets = A.offsets + self._shape = check_shape(A.shape) + + if dtype is not None: + self.data = self.data.astype(dtype) + + #check format + if self.offsets.ndim != 1: + raise ValueError('offsets array must have rank 1') + + if self.data.ndim != 2: + raise ValueError('data array must have rank 2') + + if self.data.shape[0] != len(self.offsets): + raise ValueError('number of diagonals (%d) ' + 'does not match the number of offsets (%d)' + % (self.data.shape[0], len(self.offsets))) + + if len(np.unique(self.offsets)) != len(self.offsets): + raise ValueError('offset array contains duplicate values') + + def __repr__(self): + _, fmt = _formats[self.format] + sparse_cls = 'array' if isinstance(self, sparray) else 'matrix' + shape_str = 'x'.join(str(x) for x in self.shape) + ndiag = self.data.shape[0] + return ( + f"<{shape_str} sparse {sparse_cls} of type '{self.dtype.type}'\n" + f"\twith {self.nnz} stored elements ({ndiag} diagonals) in {fmt} format>" + ) + + def _data_mask(self): + """Returns a mask of the same shape as self.data, where + mask[i,j] is True when data[i,j] corresponds to a stored element.""" + num_rows, num_cols = self.shape + offset_inds = np.arange(self.data.shape[1]) + row = offset_inds - self.offsets[:,None] + mask = (row >= 0) + mask &= (row < num_rows) + mask &= (offset_inds < num_cols) + return mask + + def count_nonzero(self): + mask = self._data_mask() + return np.count_nonzero(self.data[mask]) + + def _getnnz(self, axis=None): + if axis is not None: + raise NotImplementedError("_getnnz over an axis is not implemented " + "for DIA format") + M,N = self.shape + nnz = 0 + for k in self.offsets: + if k > 0: + nnz += min(M,N-k) + else: + nnz += min(M+k,N) + return int(nnz) + + _getnnz.__doc__ = _spbase._getnnz.__doc__ + count_nonzero.__doc__ = _spbase.count_nonzero.__doc__ + + def sum(self, axis=None, dtype=None, out=None): + validateaxis(axis) + + if axis is not None and axis < 0: + axis += 2 + + res_dtype = get_sum_dtype(self.dtype) + num_rows, num_cols = self.shape + ret = None + + if axis == 0: + mask = self._data_mask() + x = (self.data * mask).sum(axis=0) + if x.shape[0] == num_cols: + res = x + else: + res = np.zeros(num_cols, dtype=x.dtype) + res[:x.shape[0]] = x + ret = self._ascontainer(res, dtype=res_dtype) + + else: + row_sums = np.zeros((num_rows, 1), dtype=res_dtype) + one = np.ones(num_cols, dtype=res_dtype) + dia_matvec(num_rows, num_cols, len(self.offsets), + self.data.shape[1], self.offsets, self.data, one, row_sums) + + row_sums = self._ascontainer(row_sums) + + if axis is None: + return row_sums.sum(dtype=dtype, out=out) + + ret = self._ascontainer(row_sums.sum(axis=axis)) + + if out is not None and out.shape != ret.shape: + raise ValueError("dimensions do not match") + + return ret.sum(axis=(), dtype=dtype, out=out) + + sum.__doc__ = _spbase.sum.__doc__ + + def _add_sparse(self, other): + + # Check if other is also of type dia_array + if not isinstance(other, type(self)): + # If other is not of type dia_array, default to + # converting to csr_matrix, as is done in the _add_sparse + # method of parent class _spbase + return self.tocsr()._add_sparse(other) + + # The task is to compute m = self + other + # Start by making a copy of self, of the datatype + # that should result from adding self and other + dtype = np.promote_types(self.dtype, other.dtype) + m = self.astype(dtype, copy=True) + + # Then, add all the stored diagonals of other. + for d in other.offsets: + # Check if the diagonal has already been added. + if d in m.offsets: + # If the diagonal is already there, we need to take + # the sum of the existing and the new + m.setdiag(m.diagonal(d) + other.diagonal(d), d) + else: + m.setdiag(other.diagonal(d), d) + return m + + def _matmul_vector(self, other): + x = other + + y = np.zeros(self.shape[0], dtype=upcast_char(self.dtype.char, + x.dtype.char)) + + L = self.data.shape[1] + + M,N = self.shape + + dia_matvec(M,N, len(self.offsets), L, self.offsets, self.data, + x.ravel(), y.ravel()) + + return y + + def _setdiag(self, values, k=0): + M, N = self.shape + + if values.ndim == 0: + # broadcast + values_n = np.inf + else: + values_n = len(values) + + if k < 0: + n = min(M + k, N, values_n) + min_index = 0 + max_index = n + else: + n = min(M, N - k, values_n) + min_index = k + max_index = k + n + + if values.ndim != 0: + # allow also longer sequences + values = values[:n] + + data_rows, data_cols = self.data.shape + if k in self.offsets: + if max_index > data_cols: + data = np.zeros((data_rows, max_index), dtype=self.data.dtype) + data[:, :data_cols] = self.data + self.data = data + self.data[self.offsets == k, min_index:max_index] = values + else: + self.offsets = np.append(self.offsets, self.offsets.dtype.type(k)) + m = max(max_index, data_cols) + data = np.zeros((data_rows + 1, m), dtype=self.data.dtype) + data[:-1, :data_cols] = self.data + data[-1, min_index:max_index] = values + self.data = data + + def todia(self, copy=False): + if copy: + return self.copy() + else: + return self + + todia.__doc__ = _spbase.todia.__doc__ + + def transpose(self, axes=None, copy=False): + if axes is not None and axes != (1, 0): + raise ValueError("Sparse arrays/matrices do not support " + "an 'axes' parameter because swapping " + "dimensions is the only logical permutation.") + + num_rows, num_cols = self.shape + max_dim = max(self.shape) + + # flip diagonal offsets + offsets = -self.offsets + + # re-align the data matrix + r = np.arange(len(offsets), dtype=np.intc)[:, None] + c = np.arange(num_rows, dtype=np.intc) - (offsets % max_dim)[:, None] + pad_amount = max(0, max_dim-self.data.shape[1]) + data = np.hstack((self.data, np.zeros((self.data.shape[0], pad_amount), + dtype=self.data.dtype))) + data = data[r, c] + return self._dia_container((data, offsets), shape=( + num_cols, num_rows), copy=copy) + + transpose.__doc__ = _spbase.transpose.__doc__ + + def diagonal(self, k=0): + rows, cols = self.shape + if k <= -rows or k >= cols: + return np.empty(0, dtype=self.data.dtype) + idx, = np.nonzero(self.offsets == k) + first_col = max(0, k) + last_col = min(rows + k, cols) + result_size = last_col - first_col + if idx.size == 0: + return np.zeros(result_size, dtype=self.data.dtype) + result = self.data[idx[0], first_col:last_col] + padding = result_size - len(result) + if padding > 0: + result = np.pad(result, (0, padding), mode='constant') + return result + + diagonal.__doc__ = _spbase.diagonal.__doc__ + + def tocsc(self, copy=False): + if self.nnz == 0: + return self._csc_container(self.shape, dtype=self.dtype) + + num_rows, num_cols = self.shape + num_offsets, offset_len = self.data.shape + offset_inds = np.arange(offset_len) + + row = offset_inds - self.offsets[:,None] + mask = (row >= 0) + mask &= (row < num_rows) + mask &= (offset_inds < num_cols) + mask &= (self.data != 0) + + idx_dtype = self._get_index_dtype(maxval=max(self.shape)) + indptr = np.zeros(num_cols + 1, dtype=idx_dtype) + indptr[1:offset_len+1] = np.cumsum(mask.sum(axis=0)[:num_cols]) + if offset_len < num_cols: + indptr[offset_len+1:] = indptr[offset_len] + indices = row.T[mask.T].astype(idx_dtype, copy=False) + data = self.data.T[mask.T] + return self._csc_container((data, indices, indptr), shape=self.shape, + dtype=self.dtype) + + tocsc.__doc__ = _spbase.tocsc.__doc__ + + def tocoo(self, copy=False): + num_rows, num_cols = self.shape + num_offsets, offset_len = self.data.shape + offset_inds = np.arange(offset_len) + + row = offset_inds - self.offsets[:,None] + mask = (row >= 0) + mask &= (row < num_rows) + mask &= (offset_inds < num_cols) + mask &= (self.data != 0) + row = row[mask] + col = np.tile(offset_inds, num_offsets)[mask.ravel()] + idx_dtype = self._get_index_dtype( + arrays=(self.offsets,), maxval=max(self.shape) + ) + row = row.astype(idx_dtype, copy=False) + col = col.astype(idx_dtype, copy=False) + data = self.data[mask] + # Note: this cannot set has_canonical_format=True, because despite the + # lack of duplicates, we do not generate sorted indices. + return self._coo_container( + (data, (row, col)), shape=self.shape, dtype=self.dtype, copy=False + ) + + tocoo.__doc__ = _spbase.tocoo.__doc__ + + # needed by _data_matrix + def _with_data(self, data, copy=True): + """Returns a matrix with the same sparsity structure as self, + but with different data. By default the structure arrays are copied. + """ + if copy: + return self._dia_container( + (data, self.offsets.copy()), shape=self.shape + ) + else: + return self._dia_container( + (data, self.offsets), shape=self.shape + ) + + def resize(self, *shape): + shape = check_shape(shape) + M, N = shape + # we do not need to handle the case of expanding N + self.data = self.data[:, :N] + + if (M > self.shape[0] and + np.any(self.offsets + self.shape[0] < self.data.shape[1])): + # explicitly clear values that were previously hidden + mask = (self.offsets[:, None] + self.shape[0] <= + np.arange(self.data.shape[1])) + self.data[mask] = 0 + + self._shape = shape + + resize.__doc__ = _spbase.resize.__doc__ + + +def isspmatrix_dia(x): + """Is `x` of dia_matrix type? + + Parameters + ---------- + x + object to check for being a dia matrix + + Returns + ------- + bool + True if `x` is a dia matrix, False otherwise + + Examples + -------- + >>> from scipy.sparse import dia_array, dia_matrix, coo_matrix, isspmatrix_dia + >>> isspmatrix_dia(dia_matrix([[5]])) + True + >>> isspmatrix_dia(dia_array([[5]])) + False + >>> isspmatrix_dia(coo_matrix([[5]])) + False + """ + return isinstance(x, dia_matrix) + + +# This namespace class separates array from matrix with isinstance +class dia_array(_dia_base, sparray): + """ + Sparse array with DIAgonal storage. + + This can be instantiated in several ways: + dia_array(D) + where D is a 2-D ndarray + + dia_array(S) + with another sparse array or matrix S (equivalent to S.todia()) + + dia_array((M, N), [dtype]) + to construct an empty array with shape (M, N), + dtype is optional, defaulting to dtype='d'. + + dia_array((data, offsets), shape=(M, N)) + where the ``data[k,:]`` stores the diagonal entries for + diagonal ``offsets[k]`` (See example below) + + Attributes + ---------- + dtype : dtype + Data type of the array + shape : 2-tuple + Shape of the array + ndim : int + Number of dimensions (this is always 2) + nnz + size + data + DIA format data array of the array + offsets + DIA format offset array of the array + T + + Notes + ----- + + Sparse arrays can be used in arithmetic operations: they support + addition, subtraction, multiplication, division, and matrix power. + + Examples + -------- + + >>> import numpy as np + >>> from scipy.sparse import dia_array + >>> dia_array((3, 4), dtype=np.int8).toarray() + array([[0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]], dtype=int8) + + >>> data = np.array([[1, 2, 3, 4]]).repeat(3, axis=0) + >>> offsets = np.array([0, -1, 2]) + >>> dia_array((data, offsets), shape=(4, 4)).toarray() + array([[1, 0, 3, 0], + [1, 2, 0, 4], + [0, 2, 3, 0], + [0, 0, 3, 4]]) + + >>> from scipy.sparse import dia_array + >>> n = 10 + >>> ex = np.ones(n) + >>> data = np.array([ex, 2 * ex, ex]) + >>> offsets = np.array([-1, 0, 1]) + >>> dia_array((data, offsets), shape=(n, n)).toarray() + array([[2., 1., 0., ..., 0., 0., 0.], + [1., 2., 1., ..., 0., 0., 0.], + [0., 1., 2., ..., 0., 0., 0.], + ..., + [0., 0., 0., ..., 2., 1., 0.], + [0., 0., 0., ..., 1., 2., 1.], + [0., 0., 0., ..., 0., 1., 2.]]) + """ + + +class dia_matrix(spmatrix, _dia_base): + """ + Sparse matrix with DIAgonal storage. + + This can be instantiated in several ways: + dia_matrix(D) + where D is a 2-D ndarray + + dia_matrix(S) + with another sparse array or matrix S (equivalent to S.todia()) + + dia_matrix((M, N), [dtype]) + to construct an empty matrix with shape (M, N), + dtype is optional, defaulting to dtype='d'. + + dia_matrix((data, offsets), shape=(M, N)) + where the ``data[k,:]`` stores the diagonal entries for + diagonal ``offsets[k]`` (See example below) + + Attributes + ---------- + dtype : dtype + Data type of the matrix + shape : 2-tuple + Shape of the matrix + ndim : int + Number of dimensions (this is always 2) + nnz + size + data + DIA format data array of the matrix + offsets + DIA format offset array of the matrix + T + + Notes + ----- + + Sparse matrices can be used in arithmetic operations: they support + addition, subtraction, multiplication, division, and matrix power. + + Examples + -------- + + >>> import numpy as np + >>> from scipy.sparse import dia_matrix + >>> dia_matrix((3, 4), dtype=np.int8).toarray() + array([[0, 0, 0, 0], + [0, 0, 0, 0], + [0, 0, 0, 0]], dtype=int8) + + >>> data = np.array([[1, 2, 3, 4]]).repeat(3, axis=0) + >>> offsets = np.array([0, -1, 2]) + >>> dia_matrix((data, offsets), shape=(4, 4)).toarray() + array([[1, 0, 3, 0], + [1, 2, 0, 4], + [0, 2, 3, 0], + [0, 0, 3, 4]]) + + >>> from scipy.sparse import dia_matrix + >>> n = 10 + >>> ex = np.ones(n) + >>> data = np.array([ex, 2 * ex, ex]) + >>> offsets = np.array([-1, 0, 1]) + >>> dia_matrix((data, offsets), shape=(n, n)).toarray() + array([[2., 1., 0., ..., 0., 0., 0.], + [1., 2., 1., ..., 0., 0., 0.], + [0., 1., 2., ..., 0., 0., 0.], + ..., + [0., 0., 0., ..., 2., 1., 0.], + [0., 0., 0., ..., 1., 2., 1.], + [0., 0., 0., ..., 0., 1., 2.]]) + """ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_dok.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_dok.py new file mode 100644 index 0000000000000000000000000000000000000000..c5d7cd60242a96ae336f19d9cc31f1342b1c28dd --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_dok.py @@ -0,0 +1,672 @@ +"""Dictionary Of Keys based matrix""" + +__docformat__ = "restructuredtext en" + +__all__ = ['dok_array', 'dok_matrix', 'isspmatrix_dok'] + +import itertools +import numpy as np + +from ._matrix import spmatrix +from ._base import _spbase, sparray, issparse +from ._index import IndexMixin +from ._sputils import (isdense, getdtype, isshape, isintlike, isscalarlike, + upcast, upcast_scalar, check_shape) + + +class _dok_base(_spbase, IndexMixin, dict): + _format = 'dok' + + def __init__(self, arg1, shape=None, dtype=None, copy=False): + _spbase.__init__(self) + + is_array = isinstance(self, sparray) + if isinstance(arg1, tuple) and isshape(arg1, allow_1d=is_array): + self._shape = check_shape(arg1, allow_1d=is_array) + self._dict = {} + self.dtype = getdtype(dtype, default=float) + elif issparse(arg1): # Sparse ctor + if arg1.format == self.format: + arg1 = arg1.copy() if copy else arg1 + else: + arg1 = arg1.todok() + + if dtype is not None: + arg1 = arg1.astype(dtype, copy=False) + + self._dict = arg1._dict + self._shape = check_shape(arg1.shape, allow_1d=is_array) + self.dtype = arg1.dtype + else: # Dense ctor + try: + arg1 = np.asarray(arg1) + except Exception as e: + raise TypeError('Invalid input format.') from e + + if arg1.ndim > 2: + raise TypeError('Expected rank <=2 dense array or matrix.') + + if arg1.ndim == 1: + if dtype is not None: + arg1 = arg1.astype(dtype) + self._dict = {i: v for i, v in enumerate(arg1) if v != 0} + self.dtype = arg1.dtype + else: + d = self._coo_container(arg1, dtype=dtype).todok() + self._dict = d._dict + self.dtype = d.dtype + self._shape = check_shape(arg1.shape, allow_1d=is_array) + + def update(self, val): + # Prevent direct usage of update + raise NotImplementedError("Direct update to DOK sparse format is not allowed.") + + def _getnnz(self, axis=None): + if axis is not None: + raise NotImplementedError( + "_getnnz over an axis is not implemented for DOK format." + ) + return len(self._dict) + + def count_nonzero(self): + return sum(x != 0 for x in self.values()) + + _getnnz.__doc__ = _spbase._getnnz.__doc__ + count_nonzero.__doc__ = _spbase.count_nonzero.__doc__ + + def __len__(self): + return len(self._dict) + + def __contains__(self, key): + return key in self._dict + + def setdefault(self, key, default=None, /): + return self._dict.setdefault(key, default) + + def __delitem__(self, key, /): + del self._dict[key] + + def clear(self): + return self._dict.clear() + + def pop(self, key, default=None, /): + return self._dict.pop(key, default) + + def __reversed__(self): + raise TypeError("reversed is not defined for dok_array type") + + def __or__(self, other): + type_names = f"{type(self).__name__} and {type(other).__name__}" + raise TypeError(f"unsupported operand type for |: {type_names}") + + def __ror__(self, other): + type_names = f"{type(self).__name__} and {type(other).__name__}" + raise TypeError(f"unsupported operand type for |: {type_names}") + + def __ior__(self, other): + type_names = f"{type(self).__name__} and {type(other).__name__}" + raise TypeError(f"unsupported operand type for |: {type_names}") + + def popitem(self): + return self._dict.popitem() + + def items(self): + return self._dict.items() + + def keys(self): + return self._dict.keys() + + def values(self): + return self._dict.values() + + def get(self, key, default=0.0): + """This provides dict.get method functionality with type checking""" + if key in self._dict: + return self._dict[key] + if isintlike(key) and self.ndim == 1: + key = (key,) + if self.ndim != len(key): + raise IndexError(f'Index {key} length needs to match self.shape') + try: + for i in key: + assert isintlike(i) + except (AssertionError, TypeError, ValueError) as e: + raise IndexError('Index must be or consist of integers.') from e + key = tuple(i + M if i < 0 else i for i, M in zip(key, self.shape)) + if any(i < 0 or i >= M for i, M in zip(key, self.shape)): + raise IndexError('Index out of bounds.') + if self.ndim == 1: + key = key[0] + return self._dict.get(key, default) + + # override IndexMixin.__getitem__ for 1d case until fully implemented + def __getitem__(self, key): + if self.ndim == 2: + return super().__getitem__(key) + + if isinstance(key, tuple) and len(key) == 1: + key = key[0] + INT_TYPES = (int, np.integer) + if isinstance(key, INT_TYPES): + if key < 0: + key += self.shape[-1] + if key < 0 or key >= self.shape[-1]: + raise IndexError('index value out of bounds') + return self._get_int(key) + else: + raise IndexError('array/slice index for 1d dok_array not yet supported') + + # 1D get methods + def _get_int(self, idx): + return self._dict.get(idx, self.dtype.type(0)) + + # 2D get methods + def _get_intXint(self, row, col): + return self._dict.get((row, col), self.dtype.type(0)) + + def _get_intXslice(self, row, col): + return self._get_sliceXslice(slice(row, row + 1), col) + + def _get_sliceXint(self, row, col): + return self._get_sliceXslice(row, slice(col, col + 1)) + + def _get_sliceXslice(self, row, col): + row_start, row_stop, row_step = row.indices(self.shape[0]) + col_start, col_stop, col_step = col.indices(self.shape[1]) + row_range = range(row_start, row_stop, row_step) + col_range = range(col_start, col_stop, col_step) + shape = (len(row_range), len(col_range)) + # Switch paths only when advantageous + # (count the iterations in the loops, adjust for complexity) + if len(self) >= 2 * shape[0] * shape[1]: + # O(nr*nc) path: loop over + return self._get_columnXarray(row_range, col_range) + # O(nnz) path: loop over entries of self + newdok = self._dok_container(shape, dtype=self.dtype) + for key in self.keys(): + i, ri = divmod(int(key[0]) - row_start, row_step) + if ri != 0 or i < 0 or i >= shape[0]: + continue + j, rj = divmod(int(key[1]) - col_start, col_step) + if rj != 0 or j < 0 or j >= shape[1]: + continue + newdok._dict[i, j] = self._dict[key] + return newdok + + def _get_intXarray(self, row, col): + col = col.squeeze() + return self._get_columnXarray([row], col) + + def _get_arrayXint(self, row, col): + row = row.squeeze() + return self._get_columnXarray(row, [col]) + + def _get_sliceXarray(self, row, col): + row = list(range(*row.indices(self.shape[0]))) + return self._get_columnXarray(row, col) + + def _get_arrayXslice(self, row, col): + col = list(range(*col.indices(self.shape[1]))) + return self._get_columnXarray(row, col) + + def _get_columnXarray(self, row, col): + # outer indexing + newdok = self._dok_container((len(row), len(col)), dtype=self.dtype) + + for i, r in enumerate(row): + for j, c in enumerate(col): + v = self._dict.get((r, c), 0) + if v: + newdok._dict[i, j] = v + return newdok + + def _get_arrayXarray(self, row, col): + # inner indexing + i, j = map(np.atleast_2d, np.broadcast_arrays(row, col)) + newdok = self._dok_container(i.shape, dtype=self.dtype) + + for key in itertools.product(range(i.shape[0]), range(i.shape[1])): + v = self._dict.get((i[key], j[key]), 0) + if v: + newdok._dict[key] = v + return newdok + + # override IndexMixin.__setitem__ for 1d case until fully implemented + def __setitem__(self, key, value): + if self.ndim == 2: + return super().__setitem__(key, value) + + if isinstance(key, tuple) and len(key) == 1: + key = key[0] + INT_TYPES = (int, np.integer) + if isinstance(key, INT_TYPES): + if key < 0: + key += self.shape[-1] + if key < 0 or key >= self.shape[-1]: + raise IndexError('index value out of bounds') + return self._set_int(key, value) + else: + raise IndexError('array index for 1d dok_array not yet provided') + + # 1D set methods + def _set_int(self, idx, x): + if x: + self._dict[idx] = x + elif idx in self._dict: + del self._dict[idx] + + # 2D set methods + def _set_intXint(self, row, col, x): + key = (row, col) + if x: + self._dict[key] = x + elif key in self._dict: + del self._dict[key] + + def _set_arrayXarray(self, row, col, x): + row = list(map(int, row.ravel())) + col = list(map(int, col.ravel())) + x = x.ravel() + self._dict.update(zip(zip(row, col), x)) + + for i in np.nonzero(x == 0)[0]: + key = (row[i], col[i]) + if self._dict[key] == 0: + # may have been superseded by later update + del self._dict[key] + + def __add__(self, other): + if isscalarlike(other): + res_dtype = upcast_scalar(self.dtype, other) + new = self._dok_container(self.shape, dtype=res_dtype) + # Add this scalar to each element. + for key in itertools.product(*[range(d) for d in self.shape]): + aij = self._dict.get(key, 0) + other + if aij: + new[key] = aij + elif issparse(other): + if other.shape != self.shape: + raise ValueError("Matrix dimensions are not equal.") + res_dtype = upcast(self.dtype, other.dtype) + new = self._dok_container(self.shape, dtype=res_dtype) + new._dict = self._dict.copy() + if other.format == "dok": + o_items = other.items() + else: + other = other.tocoo() + if self.ndim == 1: + o_items = zip(other.coords[0], other.data) + else: + o_items = zip(zip(*other.coords), other.data) + with np.errstate(over='ignore'): + new._dict.update((k, new[k] + v) for k, v in o_items) + elif isdense(other): + new = self.todense() + other + else: + return NotImplemented + return new + + def __radd__(self, other): + return self + other # addition is comutative + + def __neg__(self): + if self.dtype.kind == 'b': + raise NotImplementedError( + 'Negating a sparse boolean matrix is not supported.' + ) + new = self._dok_container(self.shape, dtype=self.dtype) + new._dict.update((k, -v) for k, v in self.items()) + return new + + def _mul_scalar(self, other): + res_dtype = upcast_scalar(self.dtype, other) + # Multiply this scalar by every element. + new = self._dok_container(self.shape, dtype=res_dtype) + new._dict.update(((k, v * other) for k, v in self.items())) + return new + + def _matmul_vector(self, other): + res_dtype = upcast(self.dtype, other.dtype) + + # vector @ vector + if self.ndim == 1: + if issparse(other): + if other.format == "dok": + keys = self.keys() & other.keys() + else: + keys = self.keys() & other.tocoo().coords[0] + return res_dtype(sum(self._dict[k] * other._dict[k] for k in keys)) + elif isdense(other): + return res_dtype(sum(other[k] * v for k, v in self.items())) + else: + return NotImplemented + + # matrix @ vector + result = np.zeros(self.shape[0], dtype=res_dtype) + for (i, j), v in self.items(): + result[i] += v * other[j] + return result + + def _matmul_multivector(self, other): + result_dtype = upcast(self.dtype, other.dtype) + # vector @ multivector + if self.ndim == 1: + # works for other 1d or 2d + return sum(v * other[j] for j, v in self._dict.items()) + + # matrix @ multivector + M = self.shape[0] + new_shape = (M,) if other.ndim == 1 else (M, other.shape[1]) + result = np.zeros(new_shape, dtype=result_dtype) + for (i, j), v in self.items(): + result[i] += v * other[j] + return result + + def __imul__(self, other): + if isscalarlike(other): + self._dict.update((k, v * other) for k, v in self.items()) + return self + return NotImplemented + + def __truediv__(self, other): + if isscalarlike(other): + res_dtype = upcast_scalar(self.dtype, other) + new = self._dok_container(self.shape, dtype=res_dtype) + new._dict.update(((k, v / other) for k, v in self.items())) + return new + return self.tocsr() / other + + def __itruediv__(self, other): + if isscalarlike(other): + self._dict.update((k, v / other) for k, v in self.items()) + return self + return NotImplemented + + def __reduce__(self): + # this approach is necessary because __setstate__ is called after + # __setitem__ upon unpickling and since __init__ is not called there + # is no shape attribute hence it is not possible to unpickle it. + return dict.__reduce__(self) + + def diagonal(self, k=0): + if self.ndim == 2: + return super().diagonal(k) + raise ValueError("diagonal requires two dimensions") + + def transpose(self, axes=None, copy=False): + if self.ndim == 1: + return self.copy() + + if axes is not None and axes != (1, 0): + raise ValueError( + "Sparse arrays/matrices do not support " + "an 'axes' parameter because swapping " + "dimensions is the only logical permutation." + ) + + M, N = self.shape + new = self._dok_container((N, M), dtype=self.dtype, copy=copy) + new._dict.update((((right, left), val) for (left, right), val in self.items())) + return new + + transpose.__doc__ = _spbase.transpose.__doc__ + + def conjtransp(self): + """Return the conjugate transpose.""" + if self.ndim == 1: + new = self.tocoo() + new.data = new.data.conjugate() + return new + M, N = self.shape + new = self._dok_container((N, M), dtype=self.dtype) + new._dict = {(right, left): np.conj(val) for (left, right), val in self.items()} + return new + + def copy(self): + new = self._dok_container(self.shape, dtype=self.dtype) + new._dict.update(self._dict) + return new + + copy.__doc__ = _spbase.copy.__doc__ + + @classmethod + def fromkeys(cls, iterable, value=1, /): + tmp = dict.fromkeys(iterable, value) + if isinstance(next(iter(tmp)), tuple): + shape = tuple(max(idx) + 1 for idx in zip(*tmp)) + else: + shape = (max(tmp) + 1,) + result = cls(shape, dtype=type(value)) + result._dict = tmp + return result + + def tocoo(self, copy=False): + nnz = self.nnz + if nnz == 0: + return self._coo_container(self.shape, dtype=self.dtype) + + idx_dtype = self._get_index_dtype(maxval=max(self.shape)) + data = np.fromiter(self.values(), dtype=self.dtype, count=nnz) + # handle 1d keys specially b/c not a tuple + inds = zip(*self.keys()) if self.ndim > 1 else (self.keys(),) + coords = tuple(np.fromiter(ix, dtype=idx_dtype, count=nnz) for ix in inds) + A = self._coo_container((data, coords), shape=self.shape, dtype=self.dtype) + A.has_canonical_format = True + return A + + tocoo.__doc__ = _spbase.tocoo.__doc__ + + def todok(self, copy=False): + if copy: + return self.copy() + return self + + todok.__doc__ = _spbase.todok.__doc__ + + def tocsc(self, copy=False): + if self.ndim == 1: + raise NotImplementedError("tocsr() not valid for 1d sparse array") + return self.tocoo(copy=False).tocsc(copy=copy) + + tocsc.__doc__ = _spbase.tocsc.__doc__ + + def resize(self, *shape): + is_array = isinstance(self, sparray) + shape = check_shape(shape, allow_1d=is_array) + if len(shape) != len(self.shape): + # TODO implement resize across dimensions + raise NotImplementedError + + if self.ndim == 1: + newN = shape[-1] + for i in list(self._dict): + if i >= newN: + del self._dict[i] + self._shape = shape + return + + newM, newN = shape + M, N = self.shape + if newM < M or newN < N: + # Remove all elements outside new dimensions + for i, j in list(self.keys()): + if i >= newM or j >= newN: + del self._dict[i, j] + self._shape = shape + + resize.__doc__ = _spbase.resize.__doc__ + + # Added for 1d to avoid `tocsr` from _base.py + def astype(self, dtype, casting='unsafe', copy=True): + dtype = np.dtype(dtype) + if self.dtype != dtype: + result = self._dok_container(self.shape, dtype=dtype) + data = np.array(list(self._dict.values()), dtype=dtype) + result._dict = dict(zip(self._dict, data)) + return result + elif copy: + return self.copy() + return self + + +def isspmatrix_dok(x): + """Is `x` of dok_array type? + + Parameters + ---------- + x + object to check for being a dok matrix + + Returns + ------- + bool + True if `x` is a dok matrix, False otherwise + + Examples + -------- + >>> from scipy.sparse import dok_array, dok_matrix, coo_matrix, isspmatrix_dok + >>> isspmatrix_dok(dok_matrix([[5]])) + True + >>> isspmatrix_dok(dok_array([[5]])) + False + >>> isspmatrix_dok(coo_matrix([[5]])) + False + """ + return isinstance(x, dok_matrix) + + +# This namespace class separates array from matrix with isinstance +class dok_array(_dok_base, sparray): + """ + Dictionary Of Keys based sparse array. + + This is an efficient structure for constructing sparse + arrays incrementally. + + This can be instantiated in several ways: + dok_array(D) + where D is a 2-D ndarray + + dok_array(S) + with another sparse array or matrix S (equivalent to S.todok()) + + dok_array((M,N), [dtype]) + create the array with initial shape (M,N) + dtype is optional, defaulting to dtype='d' + + Attributes + ---------- + dtype : dtype + Data type of the array + shape : 2-tuple + Shape of the array + ndim : int + Number of dimensions (this is always 2) + nnz + Number of nonzero elements + size + T + + Notes + ----- + + Sparse arrays can be used in arithmetic operations: they support + addition, subtraction, multiplication, division, and matrix power. + + - Allows for efficient O(1) access of individual elements. + - Duplicates are not allowed. + - Can be efficiently converted to a coo_array once constructed. + + Examples + -------- + >>> import numpy as np + >>> from scipy.sparse import dok_array + >>> S = dok_array((5, 5), dtype=np.float32) + >>> for i in range(5): + ... for j in range(5): + ... S[i, j] = i + j # Update element + + """ + + +class dok_matrix(spmatrix, _dok_base): + """ + Dictionary Of Keys based sparse matrix. + + This is an efficient structure for constructing sparse + matrices incrementally. + + This can be instantiated in several ways: + dok_matrix(D) + where D is a 2-D ndarray + + dok_matrix(S) + with another sparse array or matrix S (equivalent to S.todok()) + + dok_matrix((M,N), [dtype]) + create the matrix with initial shape (M,N) + dtype is optional, defaulting to dtype='d' + + Attributes + ---------- + dtype : dtype + Data type of the matrix + shape : 2-tuple + Shape of the matrix + ndim : int + Number of dimensions (this is always 2) + nnz + Number of nonzero elements + size + T + + Notes + ----- + + Sparse matrices can be used in arithmetic operations: they support + addition, subtraction, multiplication, division, and matrix power. + + - Allows for efficient O(1) access of individual elements. + - Duplicates are not allowed. + - Can be efficiently converted to a coo_matrix once constructed. + + Examples + -------- + >>> import numpy as np + >>> from scipy.sparse import dok_matrix + >>> S = dok_matrix((5, 5), dtype=np.float32) + >>> for i in range(5): + ... for j in range(5): + ... S[i, j] = i + j # Update element + + """ + + def set_shape(self, shape): + new_matrix = self.reshape(shape, copy=False).asformat(self.format) + self.__dict__ = new_matrix.__dict__ + + def get_shape(self): + """Get shape of a sparse matrix.""" + return self._shape + + shape = property(fget=get_shape, fset=set_shape) + + def __reversed__(self): + return self._dict.__reversed__() + + def __or__(self, other): + if isinstance(other, _dok_base): + return self._dict | other._dict + return self._dict | other + + def __ror__(self, other): + if isinstance(other, _dok_base): + return self._dict | other._dict + return self._dict | other + + def __ior__(self, other): + if isinstance(other, _dok_base): + self._dict |= other._dict + else: + self._dict |= other + return self diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_index.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_index.py new file mode 100644 index 0000000000000000000000000000000000000000..c0fc3d01b0ebd153703a76af431626d958b7de64 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_index.py @@ -0,0 +1,392 @@ +"""Indexing mixin for sparse array/matrix classes. +""" +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np +from ._sputils import isintlike + +if TYPE_CHECKING: + import numpy.typing as npt + +INT_TYPES = (int, np.integer) + + +def _broadcast_arrays(a, b): + """ + Same as np.broadcast_arrays(a, b) but old writeability rules. + + NumPy >= 1.17.0 transitions broadcast_arrays to return + read-only arrays. Set writeability explicitly to avoid warnings. + Retain the old writeability rules, as our Cython code assumes + the old behavior. + """ + x, y = np.broadcast_arrays(a, b) + x.flags.writeable = a.flags.writeable + y.flags.writeable = b.flags.writeable + return x, y + + +class IndexMixin: + """ + This class provides common dispatching and validation logic for indexing. + """ + def _raise_on_1d_array_slice(self): + """We do not currently support 1D sparse arrays. + + This function is called each time that a 1D array would + result, raising an error instead. + + Once 1D sparse arrays are implemented, it should be removed. + """ + from scipy.sparse import sparray + + if isinstance(self, sparray): + raise NotImplementedError( + 'We have not yet implemented 1D sparse slices; ' + 'please index using explicit indices, e.g. `x[:, [0]]`' + ) + + def __getitem__(self, key): + row, col = self._validate_indices(key) + + # Dispatch to specialized methods. + if isinstance(row, INT_TYPES): + if isinstance(col, INT_TYPES): + return self._get_intXint(row, col) + elif isinstance(col, slice): + self._raise_on_1d_array_slice() + return self._get_intXslice(row, col) + elif col.ndim == 1: + self._raise_on_1d_array_slice() + return self._get_intXarray(row, col) + elif col.ndim == 2: + return self._get_intXarray(row, col) + raise IndexError('index results in >2 dimensions') + elif isinstance(row, slice): + if isinstance(col, INT_TYPES): + self._raise_on_1d_array_slice() + return self._get_sliceXint(row, col) + elif isinstance(col, slice): + if row == slice(None) and row == col: + return self.copy() + return self._get_sliceXslice(row, col) + elif col.ndim == 1: + return self._get_sliceXarray(row, col) + raise IndexError('index results in >2 dimensions') + elif row.ndim == 1: + if isinstance(col, INT_TYPES): + self._raise_on_1d_array_slice() + return self._get_arrayXint(row, col) + elif isinstance(col, slice): + return self._get_arrayXslice(row, col) + else: # row.ndim == 2 + if isinstance(col, INT_TYPES): + return self._get_arrayXint(row, col) + elif isinstance(col, slice): + raise IndexError('index results in >2 dimensions') + elif row.shape[1] == 1 and (col.ndim == 1 or col.shape[0] == 1): + # special case for outer indexing + return self._get_columnXarray(row[:,0], col.ravel()) + + # The only remaining case is inner (fancy) indexing + row, col = _broadcast_arrays(row, col) + if row.shape != col.shape: + raise IndexError('number of row and column indices differ') + if row.size == 0: + return self.__class__(np.atleast_2d(row).shape, dtype=self.dtype) + return self._get_arrayXarray(row, col) + + def __setitem__(self, key, x): + row, col = self._validate_indices(key) + + if isinstance(row, INT_TYPES) and isinstance(col, INT_TYPES): + x = np.asarray(x, dtype=self.dtype) + if x.size != 1: + raise ValueError('Trying to assign a sequence to an item') + self._set_intXint(row, col, x.flat[0]) + return + + if isinstance(row, slice): + row = np.arange(*row.indices(self.shape[0]))[:, None] + else: + row = np.atleast_1d(row) + + if isinstance(col, slice): + col = np.arange(*col.indices(self.shape[1]))[None, :] + if row.ndim == 1: + row = row[:, None] + else: + col = np.atleast_1d(col) + + i, j = _broadcast_arrays(row, col) + if i.shape != j.shape: + raise IndexError('number of row and column indices differ') + + from ._base import issparse + if issparse(x): + if i.ndim == 1: + # Inner indexing, so treat them like row vectors. + i = i[None] + j = j[None] + broadcast_row = x.shape[0] == 1 and i.shape[0] != 1 + broadcast_col = x.shape[1] == 1 and i.shape[1] != 1 + if not ((broadcast_row or x.shape[0] == i.shape[0]) and + (broadcast_col or x.shape[1] == i.shape[1])): + raise ValueError('shape mismatch in assignment') + if x.shape[0] == 0 or x.shape[1] == 0: + return + x = x.tocoo(copy=True) + x.sum_duplicates() + self._set_arrayXarray_sparse(i, j, x) + else: + # Make x and i into the same shape + x = np.asarray(x, dtype=self.dtype) + if x.squeeze().shape != i.squeeze().shape: + x = np.broadcast_to(x, i.shape) + if x.size == 0: + return + x = x.reshape(i.shape) + self._set_arrayXarray(i, j, x) + + def _validate_indices(self, key): + # First, check if indexing with single boolean matrix. + from ._base import _spbase + if (isinstance(key, (_spbase, np.ndarray)) and + key.ndim == 2 and key.dtype.kind == 'b'): + if key.shape != self.shape: + raise IndexError('boolean index shape does not match array shape') + row, col = key.nonzero() + else: + row, col = _unpack_index(key) + M, N = self.shape + + def _validate_bool_idx( + idx: npt.NDArray[np.bool_], + axis_size: int, + axis_name: str + ) -> npt.NDArray[np.int_]: + if len(idx) != axis_size: + raise IndexError( + f"boolean {axis_name} index has incorrect length: {len(idx)} " + f"instead of {axis_size}" + ) + return _boolean_index_to_array(idx) + + if isintlike(row): + row = int(row) + if row < -M or row >= M: + raise IndexError('row index (%d) out of range' % row) + if row < 0: + row += M + elif (bool_row := _compatible_boolean_index(row)) is not None: + row = _validate_bool_idx(bool_row, M, "row") + elif not isinstance(row, slice): + row = self._asindices(row, M) + + if isintlike(col): + col = int(col) + if col < -N or col >= N: + raise IndexError('column index (%d) out of range' % col) + if col < 0: + col += N + elif (bool_col := _compatible_boolean_index(col)) is not None: + col = _validate_bool_idx(bool_col, N, "column") + elif not isinstance(col, slice): + col = self._asindices(col, N) + + return row, col + + def _asindices(self, idx, length): + """Convert `idx` to a valid index for an axis with a given length. + + Subclasses that need special validation can override this method. + """ + try: + x = np.asarray(idx) + except (ValueError, TypeError, MemoryError) as e: + raise IndexError('invalid index') from e + + if x.ndim not in (1, 2): + raise IndexError('Index dimension must be 1 or 2') + + if x.size == 0: + return x + + # Check bounds + max_indx = x.max() + if max_indx >= length: + raise IndexError('index (%d) out of range' % max_indx) + + min_indx = x.min() + if min_indx < 0: + if min_indx < -length: + raise IndexError('index (%d) out of range' % min_indx) + if x is idx or not x.flags.owndata: + x = x.copy() + x[x < 0] += length + return x + + def _getrow(self, i): + """Return a copy of row i of the matrix, as a (1 x n) row vector. + """ + M, N = self.shape + i = int(i) + if i < -M or i >= M: + raise IndexError('index (%d) out of range' % i) + if i < 0: + i += M + return self._get_intXslice(i, slice(None)) + + def _getcol(self, i): + """Return a copy of column i of the matrix, as a (m x 1) column vector. + """ + M, N = self.shape + i = int(i) + if i < -N or i >= N: + raise IndexError('index (%d) out of range' % i) + if i < 0: + i += N + return self._get_sliceXint(slice(None), i) + + def _get_intXint(self, row, col): + raise NotImplementedError() + + def _get_intXarray(self, row, col): + raise NotImplementedError() + + def _get_intXslice(self, row, col): + raise NotImplementedError() + + def _get_sliceXint(self, row, col): + raise NotImplementedError() + + def _get_sliceXslice(self, row, col): + raise NotImplementedError() + + def _get_sliceXarray(self, row, col): + raise NotImplementedError() + + def _get_arrayXint(self, row, col): + raise NotImplementedError() + + def _get_arrayXslice(self, row, col): + raise NotImplementedError() + + def _get_columnXarray(self, row, col): + raise NotImplementedError() + + def _get_arrayXarray(self, row, col): + raise NotImplementedError() + + def _set_intXint(self, row, col, x): + raise NotImplementedError() + + def _set_arrayXarray(self, row, col, x): + raise NotImplementedError() + + def _set_arrayXarray_sparse(self, row, col, x): + # Fall back to densifying x + x = np.asarray(x.toarray(), dtype=self.dtype) + x, _ = _broadcast_arrays(x, row) + self._set_arrayXarray(row, col, x) + + +def _unpack_index(index) -> tuple[ + int | slice | npt.NDArray[np.bool_ | np.int_], + int | slice | npt.NDArray[np.bool_ | np.int_] +]: + """ Parse index. Always return a tuple of the form (row, col). + Valid type for row/col is integer, slice, array of bool, or array of integers. + """ + # Parse any ellipses. + index = _check_ellipsis(index) + + # Next, parse the tuple or object + if isinstance(index, tuple): + if len(index) == 2: + row, col = index + elif len(index) == 1: + row, col = index[0], slice(None) + else: + raise IndexError('invalid number of indices') + else: + idx = _compatible_boolean_index(index) + if idx is None: + row, col = index, slice(None) + elif idx.ndim < 2: + return idx, slice(None) + elif idx.ndim == 2: + return idx.nonzero() + # Next, check for validity and transform the index as needed. + from ._base import issparse + if issparse(row) or issparse(col): + # Supporting sparse boolean indexing with both row and col does + # not work because spmatrix.ndim is always 2. + raise IndexError( + 'Indexing with sparse matrices is not supported ' + 'except boolean indexing where matrix and index ' + 'are equal shapes.') + return row, col + + +def _check_ellipsis(index): + """Process indices with Ellipsis. Returns modified index.""" + if index is Ellipsis: + return (slice(None), slice(None)) + + if not isinstance(index, tuple): + return index + + # Find any Ellipsis objects. + ellipsis_indices = [i for i, v in enumerate(index) if v is Ellipsis] + if not ellipsis_indices: + return index + if len(ellipsis_indices) > 1: + raise IndexError("an index can only have a single ellipsis ('...')") + + # Replace the Ellipsis object with 0, 1, or 2 null-slices as needed. + i, = ellipsis_indices + num_slices = max(0, 3 - len(index)) + return index[:i] + (slice(None),) * num_slices + index[i + 1:] + + +def _maybe_bool_ndarray(idx): + """Returns a compatible array if elements are boolean. + """ + idx = np.asanyarray(idx) + if idx.dtype.kind == 'b': + return idx + return None + + +def _first_element_bool(idx, max_dim=2): + """Returns True if first element of the incompatible + array type is boolean. + """ + if max_dim < 1: + return None + try: + first = next(iter(idx), None) + except TypeError: + return None + if isinstance(first, bool): + return True + return _first_element_bool(first, max_dim-1) + + +def _compatible_boolean_index(idx): + """Returns a boolean index array that can be converted to + integer array. Returns None if no such array exists. + """ + # Presence of attribute `ndim` indicates a compatible array type. + if hasattr(idx, 'ndim') or _first_element_bool(idx): + return _maybe_bool_ndarray(idx) + return None + + +def _boolean_index_to_array(idx): + if idx.ndim > 1: + raise IndexError('invalid index shape') + return np.where(idx)[0] diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_matrix.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_matrix.py new file mode 100644 index 0000000000000000000000000000000000000000..1ab8749423833b78f7efc17feb6e1a8e6405408a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_matrix.py @@ -0,0 +1,113 @@ +class spmatrix: + """This class provides a base class for all sparse matrix classes. + + It cannot be instantiated. Most of the work is provided by subclasses. + """ + + @property + def _bsr_container(self): + from ._bsr import bsr_matrix + return bsr_matrix + + @property + def _coo_container(self): + from ._coo import coo_matrix + return coo_matrix + + @property + def _csc_container(self): + from ._csc import csc_matrix + return csc_matrix + + @property + def _csr_container(self): + from ._csr import csr_matrix + return csr_matrix + + @property + def _dia_container(self): + from ._dia import dia_matrix + return dia_matrix + + @property + def _dok_container(self): + from ._dok import dok_matrix + return dok_matrix + + @property + def _lil_container(self): + from ._lil import lil_matrix + return lil_matrix + + # Restore matrix multiplication + def __mul__(self, other): + return self._matmul_dispatch(other) + + def __rmul__(self, other): + return self._rmatmul_dispatch(other) + + # Restore matrix power + def __pow__(self, power): + from .linalg import matrix_power + + return matrix_power(self, power) + + ## Backward compatibility + + def set_shape(self, shape): + """Set the shape of the matrix in-place""" + # Make sure copy is False since this is in place + # Make sure format is unchanged because we are doing a __dict__ swap + new_self = self.reshape(shape, copy=False).asformat(self.format) + self.__dict__ = new_self.__dict__ + + def get_shape(self): + """Get the shape of the matrix""" + return self._shape + + shape = property(fget=get_shape, fset=set_shape, + doc="Shape of the matrix") + + def asfptype(self): + """Upcast matrix to a floating point format (if necessary)""" + return self._asfptype() + + def getmaxprint(self): + """Maximum number of elements to display when printed.""" + return self._getmaxprint() + + def getformat(self): + """Matrix storage format""" + return self.format + + def getnnz(self, axis=None): + """Number of stored values, including explicit zeros. + + Parameters + ---------- + axis : None, 0, or 1 + Select between the number of values across the whole array, in + each column, or in each row. + """ + return self._getnnz(axis=axis) + + def getH(self): + """Return the Hermitian transpose of this matrix. + + See Also + -------- + numpy.matrix.getH : NumPy's implementation of `getH` for matrices + """ + return self.conjugate().transpose() + + def getcol(self, j): + """Returns a copy of column j of the matrix, as an (m x 1) sparse + matrix (column vector). + """ + return self._getcol(j) + + def getrow(self, i): + """Returns a copy of row i of the matrix, as a (1 x n) sparse + matrix (row vector). + """ + return self._getrow(i) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_matrix_io.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_matrix_io.py new file mode 100644 index 0000000000000000000000000000000000000000..e115260afb9f625a68f6b14d8750d3650603bd11 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_matrix_io.py @@ -0,0 +1,167 @@ +import numpy as np +import scipy as sp + +__all__ = ['save_npz', 'load_npz'] + + +# Make loading safe vs. malicious input +PICKLE_KWARGS = dict(allow_pickle=False) + + +def save_npz(file, matrix, compressed=True): + """ Save a sparse matrix or array to a file using ``.npz`` format. + + Parameters + ---------- + file : str or file-like object + Either the file name (string) or an open file (file-like object) + where the data will be saved. If file is a string, the ``.npz`` + extension will be appended to the file name if it is not already + there. + matrix: spmatrix or sparray + The sparse matrix or array to save. + Supported formats: ``csc``, ``csr``, ``bsr``, ``dia`` or ``coo``. + compressed : bool, optional + Allow compressing the file. Default: True + + See Also + -------- + scipy.sparse.load_npz: Load a sparse matrix from a file using ``.npz`` format. + numpy.savez: Save several arrays into a ``.npz`` archive. + numpy.savez_compressed : Save several arrays into a compressed ``.npz`` archive. + + Examples + -------- + Store sparse matrix to disk, and load it again: + + >>> import numpy as np + >>> import scipy as sp + >>> sparse_matrix = sp.sparse.csc_matrix([[0, 0, 3], [4, 0, 0]]) + >>> sparse_matrix + <2x3 sparse matrix of type '' + with 2 stored elements in Compressed Sparse Column format> + >>> sparse_matrix.toarray() + array([[0, 0, 3], + [4, 0, 0]], dtype=int64) + + >>> sp.sparse.save_npz('/tmp/sparse_matrix.npz', sparse_matrix) + >>> sparse_matrix = sp.sparse.load_npz('/tmp/sparse_matrix.npz') + + >>> sparse_matrix + <2x3 sparse matrix of type '' + with 2 stored elements in Compressed Sparse Column format> + >>> sparse_matrix.toarray() + array([[0, 0, 3], + [4, 0, 0]], dtype=int64) + """ + arrays_dict = {} + if matrix.format in ('csc', 'csr', 'bsr'): + arrays_dict.update(indices=matrix.indices, indptr=matrix.indptr) + elif matrix.format == 'dia': + arrays_dict.update(offsets=matrix.offsets) + elif matrix.format == 'coo': + arrays_dict.update(row=matrix.row, col=matrix.col) + else: + msg = f'Save is not implemented for sparse matrix of format {matrix.format}.' + raise NotImplementedError(msg) + arrays_dict.update( + format=matrix.format.encode('ascii'), + shape=matrix.shape, + data=matrix.data + ) + if isinstance(matrix, sp.sparse.sparray): + arrays_dict.update(_is_array=True) + if compressed: + np.savez_compressed(file, **arrays_dict) + else: + np.savez(file, **arrays_dict) + + +def load_npz(file): + """ Load a sparse array/matrix from a file using ``.npz`` format. + + Parameters + ---------- + file : str or file-like object + Either the file name (string) or an open file (file-like object) + where the data will be loaded. + + Returns + ------- + result : csc_array, csr_array, bsr_array, dia_array or coo_array + A sparse array/matrix containing the loaded data. + + Raises + ------ + OSError + If the input file does not exist or cannot be read. + + See Also + -------- + scipy.sparse.save_npz: Save a sparse array/matrix to a file using ``.npz`` format. + numpy.load: Load several arrays from a ``.npz`` archive. + + Examples + -------- + Store sparse array/matrix to disk, and load it again: + + >>> import numpy as np + >>> import scipy as sp + >>> sparse_array = sp.sparse.csc_array([[0, 0, 3], [4, 0, 0]]) + >>> sparse_array + <2x3 sparse array of type '' + with 2 stored elements in Compressed Sparse Column format> + >>> sparse_array.toarray() + array([[0, 0, 3], + [4, 0, 0]], dtype=int64) + + >>> sp.sparse.save_npz('/tmp/sparse_array.npz', sparse_array) + >>> sparse_array = sp.sparse.load_npz('/tmp/sparse_array.npz') + + >>> sparse_array + <2x3 sparse array of type '' + with 2 stored elements in Compressed Sparse Column format> + >>> sparse_array.toarray() + array([[0, 0, 3], + [4, 0, 0]], dtype=int64) + + In this example we force the result to be csr_array from csr_matrix + >>> sparse_matrix = sp.sparse.csc_matrix([[0, 0, 3], [4, 0, 0]]) + >>> sp.sparse.save_npz('/tmp/sparse_matrix.npz', sparse_matrix) + >>> tmp = sp.sparse.load_npz('/tmp/sparse_matrix.npz') + >>> sparse_array = sp.sparse.csr_array(tmp) + """ + with np.load(file, **PICKLE_KWARGS) as loaded: + sparse_format = loaded.get('format') + if sparse_format is None: + raise ValueError(f'The file {file} does not contain ' + f'a sparse array or matrix.') + sparse_format = sparse_format.item() + + if not isinstance(sparse_format, str): + # Play safe with Python 2 vs 3 backward compatibility; + # files saved with SciPy < 1.0.0 may contain unicode or bytes. + sparse_format = sparse_format.decode('ascii') + + if loaded.get('_is_array'): + sparse_type = sparse_format + '_array' + else: + sparse_type = sparse_format + '_matrix' + + try: + cls = getattr(sp.sparse, f'{sparse_type}') + except AttributeError as e: + raise ValueError(f'Unknown format "{sparse_type}"') from e + + if sparse_format in ('csc', 'csr', 'bsr'): + return cls((loaded['data'], loaded['indices'], loaded['indptr']), + shape=loaded['shape']) + elif sparse_format == 'dia': + return cls((loaded['data'], loaded['offsets']), + shape=loaded['shape']) + elif sparse_format == 'coo': + return cls((loaded['data'], (loaded['row'], loaded['col'])), + shape=loaded['shape']) + else: + raise NotImplementedError(f'Load is not implemented for ' + f'sparse matrix of format {sparse_format}.') diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_sputils.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_sputils.py new file mode 100644 index 0000000000000000000000000000000000000000..fa515606006d5084799cd6ac8578e1f88ed51bb9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/_sputils.py @@ -0,0 +1,451 @@ +""" Utility functions for sparse matrix module +""" + +import sys +from typing import Any, Literal, Optional, Union +import operator +import numpy as np +from math import prod +import scipy.sparse as sp +from scipy._lib._util import np_long, np_ulong + + +__all__ = ['upcast', 'getdtype', 'getdata', 'isscalarlike', 'isintlike', + 'isshape', 'issequence', 'isdense', 'ismatrix', 'get_sum_dtype'] + +supported_dtypes = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, + np.uintc, np_long, np_ulong, np.longlong, np.ulonglong, + np.float32, np.float64, np.longdouble, + np.complex64, np.complex128, np.clongdouble] + +_upcast_memo = {} + + +def upcast(*args): + """Returns the nearest supported sparse dtype for the + combination of one or more types. + + upcast(t0, t1, ..., tn) -> T where T is a supported dtype + + Examples + -------- + >>> from scipy.sparse._sputils import upcast + >>> upcast('int32') + + >>> upcast('bool') + + >>> upcast('int32','float32') + + >>> upcast('bool',complex,float) + + + """ + + t = _upcast_memo.get(hash(args)) + if t is not None: + return t + + upcast = np.result_type(*args) + + for t in supported_dtypes: + if np.can_cast(upcast, t): + _upcast_memo[hash(args)] = t + return t + + raise TypeError(f'no supported conversion for types: {args!r}') + + +def upcast_char(*args): + """Same as `upcast` but taking dtype.char as input (faster).""" + t = _upcast_memo.get(args) + if t is not None: + return t + t = upcast(*map(np.dtype, args)) + _upcast_memo[args] = t + return t + + +def upcast_scalar(dtype, scalar): + """Determine data type for binary operation between an array of + type `dtype` and a scalar. + """ + return (np.array([0], dtype=dtype) * scalar).dtype + + +def downcast_intp_index(arr): + """ + Down-cast index array to np.intp dtype if it is of a larger dtype. + + Raise an error if the array contains a value that is too large for + intp. + """ + if arr.dtype.itemsize > np.dtype(np.intp).itemsize: + if arr.size == 0: + return arr.astype(np.intp) + maxval = arr.max() + minval = arr.min() + if maxval > np.iinfo(np.intp).max or minval < np.iinfo(np.intp).min: + raise ValueError("Cannot deal with arrays with indices larger " + "than the machine maximum address size " + "(e.g. 64-bit indices on 32-bit machine).") + return arr.astype(np.intp) + return arr + + +def to_native(A): + """ + Ensure that the data type of the NumPy array `A` has native byte order. + + `A` must be a NumPy array. If the data type of `A` does not have native + byte order, a copy of `A` with a native byte order is returned. Otherwise + `A` is returned. + """ + dt = A.dtype + if dt.isnative: + # Don't call `asarray()` if A is already native, to avoid unnecessarily + # creating a view of the input array. + return A + return np.asarray(A, dtype=dt.newbyteorder('native')) + + +def getdtype(dtype, a=None, default=None): + """Function used to simplify argument processing. If 'dtype' is not + specified (is None), returns a.dtype; otherwise returns a np.dtype + object created from the specified dtype argument. If 'dtype' and 'a' + are both None, construct a data type out of the 'default' parameter. + Furthermore, 'dtype' must be in 'allowed' set. + """ + # TODO is this really what we want? + if dtype is None: + try: + newdtype = a.dtype + except AttributeError as e: + if default is not None: + newdtype = np.dtype(default) + else: + raise TypeError("could not interpret data type") from e + else: + newdtype = np.dtype(dtype) + if newdtype == np.object_: + raise ValueError( + "object dtype is not supported by sparse matrices" + ) + + return newdtype + + +def getdata(obj, dtype=None, copy=False) -> np.ndarray: + """ + This is a wrapper of `np.array(obj, dtype=dtype, copy=copy)` + that will generate a warning if the result is an object array. + """ + data = np.array(obj, dtype=dtype, copy=copy) + # Defer to getdtype for checking that the dtype is OK. + # This is called for the validation only; we don't need the return value. + getdtype(data.dtype) + return data + + +def get_index_dtype(arrays=(), maxval=None, check_contents=False): + """ + Based on input (integer) arrays `a`, determine a suitable index data + type that can hold the data in the arrays. + + Parameters + ---------- + arrays : tuple of array_like + Input arrays whose types/contents to check + maxval : float, optional + Maximum value needed + check_contents : bool, optional + Whether to check the values in the arrays and not just their types. + Default: False (check only the types) + + Returns + ------- + dtype : dtype + Suitable index data type (int32 or int64) + + """ + + int32min = np.int32(np.iinfo(np.int32).min) + int32max = np.int32(np.iinfo(np.int32).max) + + # not using intc directly due to misinteractions with pythran + dtype = np.int32 if np.intc().itemsize == 4 else np.int64 + if maxval is not None: + maxval = np.int64(maxval) + if maxval > int32max: + dtype = np.int64 + + if isinstance(arrays, np.ndarray): + arrays = (arrays,) + + for arr in arrays: + arr = np.asarray(arr) + if not np.can_cast(arr.dtype, np.int32): + if check_contents: + if arr.size == 0: + # a bigger type not needed + continue + elif np.issubdtype(arr.dtype, np.integer): + maxval = arr.max() + minval = arr.min() + if minval >= int32min and maxval <= int32max: + # a bigger type not needed + continue + + dtype = np.int64 + break + + return dtype + + +def get_sum_dtype(dtype: np.dtype) -> np.dtype: + """Mimic numpy's casting for np.sum""" + if dtype.kind == 'u' and np.can_cast(dtype, np.uint): + return np.uint + if np.can_cast(dtype, np.int_): + return np.int_ + return dtype + + +def isscalarlike(x) -> bool: + """Is x either a scalar, an array scalar, or a 0-dim array?""" + return np.isscalar(x) or (isdense(x) and x.ndim == 0) + + +def isintlike(x) -> bool: + """Is x appropriate as an index into a sparse matrix? Returns True + if it can be cast safely to a machine int. + """ + # Fast-path check to eliminate non-scalar values. operator.index would + # catch this case too, but the exception catching is slow. + if np.ndim(x) != 0: + return False + try: + operator.index(x) + except (TypeError, ValueError): + try: + loose_int = bool(int(x) == x) + except (TypeError, ValueError): + return False + if loose_int: + msg = "Inexact indices into sparse matrices are not allowed" + raise ValueError(msg) + return loose_int + return True + + +def isshape(x, nonneg=False, *, allow_1d=False) -> bool: + """Is x a valid tuple of dimensions? + + If nonneg, also checks that the dimensions are non-negative. + If allow_1d, shapes of length 1 or 2 are allowed. + """ + ndim = len(x) + if ndim != 2 and not (allow_1d and ndim == 1): + return False + for d in x: + if not isintlike(d): + return False + if nonneg and d < 0: + return False + return True + + +def issequence(t) -> bool: + return ((isinstance(t, (list, tuple)) and + (len(t) == 0 or np.isscalar(t[0]))) or + (isinstance(t, np.ndarray) and (t.ndim == 1))) + + +def ismatrix(t) -> bool: + return ((isinstance(t, (list, tuple)) and + len(t) > 0 and issequence(t[0])) or + (isinstance(t, np.ndarray) and t.ndim == 2)) + + +def isdense(x) -> bool: + return isinstance(x, np.ndarray) + + +def validateaxis(axis) -> None: + if axis is None: + return + axis_type = type(axis) + + # In NumPy, you can pass in tuples for 'axis', but they are + # not very useful for sparse matrices given their limited + # dimensions, so let's make it explicit that they are not + # allowed to be passed in + if axis_type == tuple: + raise TypeError("Tuples are not accepted for the 'axis' parameter. " + "Please pass in one of the following: " + "{-2, -1, 0, 1, None}.") + + # If not a tuple, check that the provided axis is actually + # an integer and raise a TypeError similar to NumPy's + if not np.issubdtype(np.dtype(axis_type), np.integer): + raise TypeError(f"axis must be an integer, not {axis_type.__name__}") + + if not (-2 <= axis <= 1): + raise ValueError("axis out of range") + + +def check_shape(args, current_shape=None, *, allow_1d=False) -> tuple[int, ...]: + """Imitate numpy.matrix handling of shape arguments + + Parameters + ---------- + args : array_like + Data structures providing information about the shape of the sparse array. + current_shape : tuple, optional + The current shape of the sparse array or matrix. + If None (default), the current shape will be inferred from args. + allow_1d : bool, optional + If True, then 1-D or 2-D arrays are accepted. + If False (default), then only 2-D arrays are accepted and an error is + raised otherwise. + + Returns + ------- + new_shape: tuple + The new shape after validation. + """ + if len(args) == 0: + raise TypeError("function missing 1 required positional argument: " + "'shape'") + if len(args) == 1: + try: + shape_iter = iter(args[0]) + except TypeError: + new_shape = (operator.index(args[0]), ) + else: + new_shape = tuple(operator.index(arg) for arg in shape_iter) + else: + new_shape = tuple(operator.index(arg) for arg in args) + + if current_shape is None: + if allow_1d: + if len(new_shape) not in (1, 2): + raise ValueError('shape must be a 1- or 2-tuple of positive ' + 'integers') + elif len(new_shape) != 2: + raise ValueError('shape must be a 2-tuple of positive integers') + if any(d < 0 for d in new_shape): + raise ValueError("'shape' elements cannot be negative") + else: + # Check the current size only if needed + current_size = prod(current_shape) + + # Check for negatives + negative_indexes = [i for i, x in enumerate(new_shape) if x < 0] + if not negative_indexes: + new_size = prod(new_shape) + if new_size != current_size: + raise ValueError('cannot reshape array of size {} into shape {}' + .format(current_size, new_shape)) + elif len(negative_indexes) == 1: + skip = negative_indexes[0] + specified = prod(new_shape[:skip] + new_shape[skip+1:]) + unspecified, remainder = divmod(current_size, specified) + if remainder != 0: + err_shape = tuple('newshape' if x < 0 else x for x in new_shape) + raise ValueError('cannot reshape array of size {} into shape {}' + ''.format(current_size, err_shape)) + new_shape = new_shape[:skip] + (unspecified,) + new_shape[skip+1:] + else: + raise ValueError('can only specify one unknown dimension') + + if len(new_shape) != 2 and not (allow_1d and len(new_shape) == 1): + raise ValueError('matrix shape must be two-dimensional') + + return new_shape + + +def check_reshape_kwargs(kwargs): + """Unpack keyword arguments for reshape function. + + This is useful because keyword arguments after star arguments are not + allowed in Python 2, but star keyword arguments are. This function unpacks + 'order' and 'copy' from the star keyword arguments (with defaults) and + throws an error for any remaining. + """ + + order = kwargs.pop('order', 'C') + copy = kwargs.pop('copy', False) + if kwargs: # Some unused kwargs remain + raise TypeError('reshape() got unexpected keywords arguments: {}' + .format(', '.join(kwargs.keys()))) + return order, copy + + +def is_pydata_spmatrix(m) -> bool: + """ + Check whether object is pydata/sparse matrix, avoiding importing the module. + """ + base_cls = getattr(sys.modules.get('sparse'), 'SparseArray', None) + return base_cls is not None and isinstance(m, base_cls) + + +def convert_pydata_sparse_to_scipy( + arg: Any, target_format: Optional[Literal["csc", "csr"]] = None +) -> Union[Any, "sp.spmatrix"]: + """ + Convert a pydata/sparse array to scipy sparse matrix, + pass through anything else. + """ + if is_pydata_spmatrix(arg): + arg = arg.to_scipy_sparse() + if target_format is not None: + arg = arg.asformat(target_format) + elif arg.format not in ("csc", "csr"): + arg = arg.tocsc() + return arg + + +############################################################################### +# Wrappers for NumPy types that are deprecated + +# Numpy versions of these functions raise deprecation warnings, the +# ones below do not. + +def matrix(*args, **kwargs): + return np.array(*args, **kwargs).view(np.matrix) + + +def asmatrix(data, dtype=None): + if isinstance(data, np.matrix) and (dtype is None or data.dtype == dtype): + return data + return np.asarray(data, dtype=dtype).view(np.matrix) + +############################################################################### + + +def _todata(s) -> np.ndarray: + """Access nonzero values, possibly after summing duplicates. + + Parameters + ---------- + s : sparse array + Input sparse array. + + Returns + ------- + data: ndarray + Nonzero values of the array, with shape (s.nnz,) + + """ + if isinstance(s, sp._data._data_matrix): + return s._deduped_data() + + if isinstance(s, sp.dok_array): + return np.fromiter(s.values(), dtype=s.dtype, count=s.nnz) + + if isinstance(s, sp.lil_array): + data = np.empty(s.nnz, dtype=s.dtype) + sp._csparsetools.lil_flatten_to_array(s.data, data) + return data + + return s.tocoo()._deduped_data() diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/base.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/base.py new file mode 100644 index 0000000000000000000000000000000000000000..d0a427e4570e07cc71e9e45bf98c7cf61798125b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/base.py @@ -0,0 +1,33 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.sparse` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'MAXPRINT', + 'SparseEfficiencyWarning', + 'SparseFormatWarning', + 'SparseWarning', + 'asmatrix', + 'check_reshape_kwargs', + 'check_shape', + 'get_sum_dtype', + 'isdense', + 'isscalarlike', + 'issparse', + 'isspmatrix', + 'spmatrix', + 'validateaxis', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="sparse", module="base", + private_modules=["_base"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/bsr.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/bsr.py new file mode 100644 index 0000000000000000000000000000000000000000..c686301a78fc3e2221600eb06035a5cb12898cdb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/bsr.py @@ -0,0 +1,36 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.sparse` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'bsr_matmat', + 'bsr_matrix', + 'bsr_matvec', + 'bsr_matvecs', + 'bsr_sort_indices', + 'bsr_tocsr', + 'bsr_transpose', + 'check_shape', + 'csr_matmat_maxnnz', + 'getdata', + 'getdtype', + 'isshape', + 'isspmatrix_bsr', + 'spmatrix', + 'to_native', + 'upcast', + 'warn', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="sparse", module="bsr", + private_modules=["_bsr"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/compressed.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/compressed.py new file mode 100644 index 0000000000000000000000000000000000000000..e6dc8a73e5ab527cfe0b73d558dae25047cfb98b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/compressed.py @@ -0,0 +1,43 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.sparse` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'IndexMixin', + 'SparseEfficiencyWarning', + 'check_shape', + 'csr_column_index1', + 'csr_column_index2', + 'csr_row_index', + 'csr_row_slice', + 'csr_sample_offsets', + 'csr_sample_values', + 'csr_todense', + 'downcast_intp_index', + 'get_csr_submatrix', + 'get_sum_dtype', + 'getdtype', + 'is_pydata_spmatrix', + 'isdense', + 'isintlike', + 'isscalarlike', + 'isshape', + 'operator', + 'to_native', + 'upcast', + 'upcast_char', + 'warn', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="sparse", module="compressed", + private_modules=["_compressed"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/coo.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/coo.py new file mode 100644 index 0000000000000000000000000000000000000000..bda2da3d09a676ab79739331a21ba26102bb90ae --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/coo.py @@ -0,0 +1,37 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.sparse` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'SparseEfficiencyWarning', + 'check_reshape_kwargs', + 'check_shape', + 'coo_matrix', + 'coo_matvec', + 'coo_tocsr', + 'coo_todense', + 'downcast_intp_index', + 'getdata', + 'getdtype', + 'isshape', + 'isspmatrix_coo', + 'operator', + 'spmatrix', + 'to_native', + 'upcast', + 'upcast_char', + 'warn', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="sparse", module="coo", + private_modules=["_coo"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csc.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csc.py new file mode 100644 index 0000000000000000000000000000000000000000..d140b841e0724155f8602a4215836e2c8a7fad72 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csc.py @@ -0,0 +1,25 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.sparse` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'csc_matrix', + 'csc_tocsr', + 'expandptr', + 'isspmatrix_csc', + 'spmatrix', + 'upcast', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="sparse", module="csc", + private_modules=["_csc"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/__init__.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..dd64891ea2c94c91823762ebb24d387fb54a3dd0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/__init__.py @@ -0,0 +1,208 @@ +r""" +Compressed sparse graph routines (:mod:`scipy.sparse.csgraph`) +============================================================== + +.. currentmodule:: scipy.sparse.csgraph + +Fast graph algorithms based on sparse matrix representations. + +Contents +-------- + +.. autosummary:: + :toctree: generated/ + + connected_components -- determine connected components of a graph + laplacian -- compute the laplacian of a graph + shortest_path -- compute the shortest path between points on a positive graph + dijkstra -- use Dijkstra's algorithm for shortest path + floyd_warshall -- use the Floyd-Warshall algorithm for shortest path + bellman_ford -- use the Bellman-Ford algorithm for shortest path + johnson -- use Johnson's algorithm for shortest path + breadth_first_order -- compute a breadth-first order of nodes + depth_first_order -- compute a depth-first order of nodes + breadth_first_tree -- construct the breadth-first tree from a given node + depth_first_tree -- construct a depth-first tree from a given node + minimum_spanning_tree -- construct the minimum spanning tree of a graph + reverse_cuthill_mckee -- compute permutation for reverse Cuthill-McKee ordering + maximum_flow -- solve the maximum flow problem for a graph + maximum_bipartite_matching -- compute a maximum matching of a bipartite graph + min_weight_full_bipartite_matching - compute a minimum weight full matching of a bipartite graph + structural_rank -- compute the structural rank of a graph + NegativeCycleError + +.. autosummary:: + :toctree: generated/ + + construct_dist_matrix + csgraph_from_dense + csgraph_from_masked + csgraph_masked_from_dense + csgraph_to_dense + csgraph_to_masked + reconstruct_path + +Graph Representations +--------------------- +This module uses graphs which are stored in a matrix format. A +graph with N nodes can be represented by an (N x N) adjacency matrix G. +If there is a connection from node i to node j, then G[i, j] = w, where +w is the weight of the connection. For nodes i and j which are +not connected, the value depends on the representation: + +- for dense array representations, non-edges are represented by + G[i, j] = 0, infinity, or NaN. + +- for dense masked representations (of type np.ma.MaskedArray), non-edges + are represented by masked values. This can be useful when graphs with + zero-weight edges are desired. + +- for sparse array representations, non-edges are represented by + non-entries in the matrix. This sort of sparse representation also + allows for edges with zero weights. + +As a concrete example, imagine that you would like to represent the following +undirected graph:: + + G + + (0) + / \ + 1 2 + / \ + (2) (1) + +This graph has three nodes, where node 0 and 1 are connected by an edge of +weight 2, and nodes 0 and 2 are connected by an edge of weight 1. +We can construct the dense, masked, and sparse representations as follows, +keeping in mind that an undirected graph is represented by a symmetric matrix:: + + >>> import numpy as np + >>> G_dense = np.array([[0, 2, 1], + ... [2, 0, 0], + ... [1, 0, 0]]) + >>> G_masked = np.ma.masked_values(G_dense, 0) + >>> from scipy.sparse import csr_matrix + >>> G_sparse = csr_matrix(G_dense) + +This becomes more difficult when zero edges are significant. For example, +consider the situation when we slightly modify the above graph:: + + G2 + + (0) + / \ + 0 2 + / \ + (2) (1) + +This is identical to the previous graph, except nodes 0 and 2 are connected +by an edge of zero weight. In this case, the dense representation above +leads to ambiguities: how can non-edges be represented if zero is a meaningful +value? In this case, either a masked or sparse representation must be used +to eliminate the ambiguity:: + + >>> import numpy as np + >>> G2_data = np.array([[np.inf, 2, 0 ], + ... [2, np.inf, np.inf], + ... [0, np.inf, np.inf]]) + >>> G2_masked = np.ma.masked_invalid(G2_data) + >>> from scipy.sparse.csgraph import csgraph_from_dense + >>> # G2_sparse = csr_matrix(G2_data) would give the wrong result + >>> G2_sparse = csgraph_from_dense(G2_data, null_value=np.inf) + >>> G2_sparse.data + array([ 2., 0., 2., 0.]) + +Here we have used a utility routine from the csgraph submodule in order to +convert the dense representation to a sparse representation which can be +understood by the algorithms in submodule. By viewing the data array, we +can see that the zero values are explicitly encoded in the graph. + +Directed vs. undirected +^^^^^^^^^^^^^^^^^^^^^^^ +Matrices may represent either directed or undirected graphs. This is +specified throughout the csgraph module by a boolean keyword. Graphs are +assumed to be directed by default. In a directed graph, traversal from node +i to node j can be accomplished over the edge G[i, j], but not the edge +G[j, i]. Consider the following dense graph:: + + >>> import numpy as np + >>> G_dense = np.array([[0, 1, 0], + ... [2, 0, 3], + ... [0, 4, 0]]) + +When ``directed=True`` we get the graph:: + + ---1--> ---3--> + (0) (1) (2) + <--2--- <--4--- + +In a non-directed graph, traversal from node i to node j can be +accomplished over either G[i, j] or G[j, i]. If both edges are not null, +and the two have unequal weights, then the smaller of the two is used. + +So for the same graph, when ``directed=False`` we get the graph:: + + (0)--1--(1)--3--(2) + +Note that a symmetric matrix will represent an undirected graph, regardless +of whether the 'directed' keyword is set to True or False. In this case, +using ``directed=True`` generally leads to more efficient computation. + +The routines in this module accept as input either scipy.sparse representations +(csr, csc, or lil format), masked representations, or dense representations +with non-edges indicated by zeros, infinities, and NaN entries. +""" # noqa: E501 + +__docformat__ = "restructuredtext en" + +__all__ = ['connected_components', + 'laplacian', + 'shortest_path', + 'floyd_warshall', + 'dijkstra', + 'bellman_ford', + 'johnson', + 'breadth_first_order', + 'depth_first_order', + 'breadth_first_tree', + 'depth_first_tree', + 'minimum_spanning_tree', + 'reverse_cuthill_mckee', + 'maximum_flow', + 'maximum_bipartite_matching', + 'min_weight_full_bipartite_matching', + 'structural_rank', + 'construct_dist_matrix', + 'reconstruct_path', + 'csgraph_masked_from_dense', + 'csgraph_from_dense', + 'csgraph_from_masked', + 'csgraph_to_dense', + 'csgraph_to_masked', + 'NegativeCycleError'] + +from ._laplacian import laplacian +from ._shortest_path import ( + shortest_path, floyd_warshall, dijkstra, bellman_ford, johnson, + NegativeCycleError +) +from ._traversal import ( + breadth_first_order, depth_first_order, breadth_first_tree, + depth_first_tree, connected_components +) +from ._min_spanning_tree import minimum_spanning_tree +from ._flow import maximum_flow +from ._matching import ( + maximum_bipartite_matching, min_weight_full_bipartite_matching +) +from ._reordering import reverse_cuthill_mckee, structural_rank +from ._tools import ( + construct_dist_matrix, reconstruct_path, csgraph_from_dense, + csgraph_to_dense, csgraph_masked_from_dense, csgraph_from_masked, + csgraph_to_masked +) + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..254f748ede3c48c9e635244c426e39f2f71f4adf Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/__pycache__/_laplacian.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/__pycache__/_laplacian.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64979ca67d5a66bdac2910ac5935b1d15d3d08df Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/__pycache__/_laplacian.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/__pycache__/_validation.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/__pycache__/_validation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2cb478b396332e1287198f0a578ec5a512e86b5b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/__pycache__/_validation.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/_flow.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/_flow.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..eacb49387062ba5a71d148157159268be62ddf0a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/_flow.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/_laplacian.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/_laplacian.py new file mode 100644 index 0000000000000000000000000000000000000000..9c3d1f7972d7e16b9d600bcbc40f57fb776b657c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/_laplacian.py @@ -0,0 +1,562 @@ +""" +Laplacian of a compressed-sparse graph +""" + +import numpy as np +from scipy.sparse import issparse +from scipy.sparse.linalg import LinearOperator +from scipy.sparse._sputils import convert_pydata_sparse_to_scipy, is_pydata_spmatrix + + +############################################################################### +# Graph laplacian +def laplacian( + csgraph, + normed=False, + return_diag=False, + use_out_degree=False, + *, + copy=True, + form="array", + dtype=None, + symmetrized=False, +): + """ + Return the Laplacian of a directed graph. + + Parameters + ---------- + csgraph : array_like or sparse matrix, 2 dimensions + compressed-sparse graph, with shape (N, N). + normed : bool, optional + If True, then compute symmetrically normalized Laplacian. + Default: False. + return_diag : bool, optional + If True, then also return an array related to vertex degrees. + Default: False. + use_out_degree : bool, optional + If True, then use out-degree instead of in-degree. + This distinction matters only if the graph is asymmetric. + Default: False. + copy: bool, optional + If False, then change `csgraph` in place if possible, + avoiding doubling the memory use. + Default: True, for backward compatibility. + form: 'array', or 'function', or 'lo' + Determines the format of the output Laplacian: + + * 'array' is a numpy array; + * 'function' is a pointer to evaluating the Laplacian-vector + or Laplacian-matrix product; + * 'lo' results in the format of the `LinearOperator`. + + Choosing 'function' or 'lo' always avoids doubling + the memory use, ignoring `copy` value. + Default: 'array', for backward compatibility. + dtype: None or one of numeric numpy dtypes, optional + The dtype of the output. If ``dtype=None``, the dtype of the + output matches the dtype of the input csgraph, except for + the case ``normed=True`` and integer-like csgraph, where + the output dtype is 'float' allowing accurate normalization, + but dramatically increasing the memory use. + Default: None, for backward compatibility. + symmetrized: bool, optional + If True, then the output Laplacian is symmetric/Hermitian. + The symmetrization is done by ``csgraph + csgraph.T.conj`` + without dividing by 2 to preserve integer dtypes if possible + prior to the construction of the Laplacian. + The symmetrization will increase the memory footprint of + sparse matrices unless the sparsity pattern is symmetric or + `form` is 'function' or 'lo'. + Default: False, for backward compatibility. + + Returns + ------- + lap : ndarray, or sparse matrix, or `LinearOperator` + The N x N Laplacian of csgraph. It will be a NumPy array (dense) + if the input was dense, or a sparse matrix otherwise, or + the format of a function or `LinearOperator` if + `form` equals 'function' or 'lo', respectively. + diag : ndarray, optional + The length-N main diagonal of the Laplacian matrix. + For the normalized Laplacian, this is the array of square roots + of vertex degrees or 1 if the degree is zero. + + Notes + ----- + The Laplacian matrix of a graph is sometimes referred to as the + "Kirchhoff matrix" or just the "Laplacian", and is useful in many + parts of spectral graph theory. + In particular, the eigen-decomposition of the Laplacian can give + insight into many properties of the graph, e.g., + is commonly used for spectral data embedding and clustering. + + The constructed Laplacian doubles the memory use if ``copy=True`` and + ``form="array"`` which is the default. + Choosing ``copy=False`` has no effect unless ``form="array"`` + or the matrix is sparse in the ``coo`` format, or dense array, except + for the integer input with ``normed=True`` that forces the float output. + + Sparse input is reformatted into ``coo`` if ``form="array"``, + which is the default. + + If the input adjacency matrix is not symmetric, the Laplacian is + also non-symmetric unless ``symmetrized=True`` is used. + + Diagonal entries of the input adjacency matrix are ignored and + replaced with zeros for the purpose of normalization where ``normed=True``. + The normalization uses the inverse square roots of row-sums of the input + adjacency matrix, and thus may fail if the row-sums contain + negative or complex with a non-zero imaginary part values. + + The normalization is symmetric, making the normalized Laplacian also + symmetric if the input csgraph was symmetric. + + References + ---------- + .. [1] Laplacian matrix. https://en.wikipedia.org/wiki/Laplacian_matrix + + Examples + -------- + >>> import numpy as np + >>> from scipy.sparse import csgraph + + Our first illustration is the symmetric graph + + >>> G = np.arange(4) * np.arange(4)[:, np.newaxis] + >>> G + array([[0, 0, 0, 0], + [0, 1, 2, 3], + [0, 2, 4, 6], + [0, 3, 6, 9]]) + + and its symmetric Laplacian matrix + + >>> csgraph.laplacian(G) + array([[ 0, 0, 0, 0], + [ 0, 5, -2, -3], + [ 0, -2, 8, -6], + [ 0, -3, -6, 9]]) + + The non-symmetric graph + + >>> G = np.arange(9).reshape(3, 3) + >>> G + array([[0, 1, 2], + [3, 4, 5], + [6, 7, 8]]) + + has different row- and column sums, resulting in two varieties + of the Laplacian matrix, using an in-degree, which is the default + + >>> L_in_degree = csgraph.laplacian(G) + >>> L_in_degree + array([[ 9, -1, -2], + [-3, 8, -5], + [-6, -7, 7]]) + + or alternatively an out-degree + + >>> L_out_degree = csgraph.laplacian(G, use_out_degree=True) + >>> L_out_degree + array([[ 3, -1, -2], + [-3, 8, -5], + [-6, -7, 13]]) + + Constructing a symmetric Laplacian matrix, one can add the two as + + >>> L_in_degree + L_out_degree.T + array([[ 12, -4, -8], + [ -4, 16, -12], + [ -8, -12, 20]]) + + or use the ``symmetrized=True`` option + + >>> csgraph.laplacian(G, symmetrized=True) + array([[ 12, -4, -8], + [ -4, 16, -12], + [ -8, -12, 20]]) + + that is equivalent to symmetrizing the original graph + + >>> csgraph.laplacian(G + G.T) + array([[ 12, -4, -8], + [ -4, 16, -12], + [ -8, -12, 20]]) + + The goal of normalization is to make the non-zero diagonal entries + of the Laplacian matrix to be all unit, also scaling off-diagonal + entries correspondingly. The normalization can be done manually, e.g., + + >>> G = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]]) + >>> L, d = csgraph.laplacian(G, return_diag=True) + >>> L + array([[ 2, -1, -1], + [-1, 2, -1], + [-1, -1, 2]]) + >>> d + array([2, 2, 2]) + >>> scaling = np.sqrt(d) + >>> scaling + array([1.41421356, 1.41421356, 1.41421356]) + >>> (1/scaling)*L*(1/scaling) + array([[ 1. , -0.5, -0.5], + [-0.5, 1. , -0.5], + [-0.5, -0.5, 1. ]]) + + Or using ``normed=True`` option + + >>> L, d = csgraph.laplacian(G, return_diag=True, normed=True) + >>> L + array([[ 1. , -0.5, -0.5], + [-0.5, 1. , -0.5], + [-0.5, -0.5, 1. ]]) + + which now instead of the diagonal returns the scaling coefficients + + >>> d + array([1.41421356, 1.41421356, 1.41421356]) + + Zero scaling coefficients are substituted with 1s, where scaling + has thus no effect, e.g., + + >>> G = np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0]]) + >>> G + array([[0, 0, 0], + [0, 0, 1], + [0, 1, 0]]) + >>> L, d = csgraph.laplacian(G, return_diag=True, normed=True) + >>> L + array([[ 0., -0., -0.], + [-0., 1., -1.], + [-0., -1., 1.]]) + >>> d + array([1., 1., 1.]) + + Only the symmetric normalization is implemented, resulting + in a symmetric Laplacian matrix if and only if its graph is symmetric + and has all non-negative degrees, like in the examples above. + + The output Laplacian matrix is by default a dense array or a sparse matrix + inferring its shape, format, and dtype from the input graph matrix: + + >>> G = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]]).astype(np.float32) + >>> G + array([[0., 1., 1.], + [1., 0., 1.], + [1., 1., 0.]], dtype=float32) + >>> csgraph.laplacian(G) + array([[ 2., -1., -1.], + [-1., 2., -1.], + [-1., -1., 2.]], dtype=float32) + + but can alternatively be generated matrix-free as a LinearOperator: + + >>> L = csgraph.laplacian(G, form="lo") + >>> L + <3x3 _CustomLinearOperator with dtype=float32> + >>> L(np.eye(3)) + array([[ 2., -1., -1.], + [-1., 2., -1.], + [-1., -1., 2.]]) + + or as a lambda-function: + + >>> L = csgraph.laplacian(G, form="function") + >>> L + . at 0x0000012AE6F5A598> + >>> L(np.eye(3)) + array([[ 2., -1., -1.], + [-1., 2., -1.], + [-1., -1., 2.]]) + + The Laplacian matrix is used for + spectral data clustering and embedding + as well as for spectral graph partitioning. + Our final example illustrates the latter + for a noisy directed linear graph. + + >>> from scipy.sparse import diags, random + >>> from scipy.sparse.linalg import lobpcg + + Create a directed linear graph with ``N=35`` vertices + using a sparse adjacency matrix ``G``: + + >>> N = 35 + >>> G = diags(np.ones(N-1), 1, format="csr") + + Fix a random seed ``rng`` and add a random sparse noise to the graph ``G``: + + >>> rng = np.random.default_rng() + >>> G += 1e-2 * random(N, N, density=0.1, random_state=rng) + + Set initial approximations for eigenvectors: + + >>> X = rng.random((N, 2)) + + The constant vector of ones is always a trivial eigenvector + of the non-normalized Laplacian to be filtered out: + + >>> Y = np.ones((N, 1)) + + Alternating (1) the sign of the graph weights allows determining + labels for spectral max- and min- cuts in a single loop. + Since the graph is undirected, the option ``symmetrized=True`` + must be used in the construction of the Laplacian. + The option ``normed=True`` cannot be used in (2) for the negative weights + here as the symmetric normalization evaluates square roots. + The option ``form="lo"`` in (2) is matrix-free, i.e., guarantees + a fixed memory footprint and read-only access to the graph. + Calling the eigenvalue solver ``lobpcg`` (3) computes the Fiedler vector + that determines the labels as the signs of its components in (5). + Since the sign in an eigenvector is not deterministic and can flip, + we fix the sign of the first component to be always +1 in (4). + + >>> for cut in ["max", "min"]: + ... G = -G # 1. + ... L = csgraph.laplacian(G, symmetrized=True, form="lo") # 2. + ... _, eves = lobpcg(L, X, Y=Y, largest=False, tol=1e-3) # 3. + ... eves *= np.sign(eves[0, 0]) # 4. + ... print(cut + "-cut labels:\\n", 1 * (eves[:, 0]>0)) # 5. + max-cut labels: + [1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1] + min-cut labels: + [1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] + + As anticipated for a (slightly noisy) linear graph, + the max-cut strips all the edges of the graph coloring all + odd vertices into one color and all even vertices into another one, + while the balanced min-cut partitions the graph + in the middle by deleting a single edge. + Both determined partitions are optimal. + """ + is_pydata_sparse = is_pydata_spmatrix(csgraph) + if is_pydata_sparse: + pydata_sparse_cls = csgraph.__class__ + csgraph = convert_pydata_sparse_to_scipy(csgraph) + if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]: + raise ValueError('csgraph must be a square matrix or array') + + if normed and ( + np.issubdtype(csgraph.dtype, np.signedinteger) + or np.issubdtype(csgraph.dtype, np.uint) + ): + csgraph = csgraph.astype(np.float64) + + if form == "array": + create_lap = ( + _laplacian_sparse if issparse(csgraph) else _laplacian_dense + ) + else: + create_lap = ( + _laplacian_sparse_flo + if issparse(csgraph) + else _laplacian_dense_flo + ) + + degree_axis = 1 if use_out_degree else 0 + + lap, d = create_lap( + csgraph, + normed=normed, + axis=degree_axis, + copy=copy, + form=form, + dtype=dtype, + symmetrized=symmetrized, + ) + if is_pydata_sparse: + lap = pydata_sparse_cls.from_scipy_sparse(lap) + if return_diag: + return lap, d + return lap + + +def _setdiag_dense(m, d): + step = len(d) + 1 + m.flat[::step] = d + + +def _laplace(m, d): + return lambda v: v * d[:, np.newaxis] - m @ v + + +def _laplace_normed(m, d, nd): + laplace = _laplace(m, d) + return lambda v: nd[:, np.newaxis] * laplace(v * nd[:, np.newaxis]) + + +def _laplace_sym(m, d): + return ( + lambda v: v * d[:, np.newaxis] + - m @ v + - np.transpose(np.conjugate(np.transpose(np.conjugate(v)) @ m)) + ) + + +def _laplace_normed_sym(m, d, nd): + laplace_sym = _laplace_sym(m, d) + return lambda v: nd[:, np.newaxis] * laplace_sym(v * nd[:, np.newaxis]) + + +def _linearoperator(mv, shape, dtype): + return LinearOperator(matvec=mv, matmat=mv, shape=shape, dtype=dtype) + + +def _laplacian_sparse_flo(graph, normed, axis, copy, form, dtype, symmetrized): + # The keyword argument `copy` is unused and has no effect here. + del copy + + if dtype is None: + dtype = graph.dtype + + graph_sum = np.asarray(graph.sum(axis=axis)).ravel() + graph_diagonal = graph.diagonal() + diag = graph_sum - graph_diagonal + if symmetrized: + graph_sum += np.asarray(graph.sum(axis=1 - axis)).ravel() + diag = graph_sum - graph_diagonal - graph_diagonal + + if normed: + isolated_node_mask = diag == 0 + w = np.where(isolated_node_mask, 1, np.sqrt(diag)) + if symmetrized: + md = _laplace_normed_sym(graph, graph_sum, 1.0 / w) + else: + md = _laplace_normed(graph, graph_sum, 1.0 / w) + if form == "function": + return md, w.astype(dtype, copy=False) + elif form == "lo": + m = _linearoperator(md, shape=graph.shape, dtype=dtype) + return m, w.astype(dtype, copy=False) + else: + raise ValueError(f"Invalid form: {form!r}") + else: + if symmetrized: + md = _laplace_sym(graph, graph_sum) + else: + md = _laplace(graph, graph_sum) + if form == "function": + return md, diag.astype(dtype, copy=False) + elif form == "lo": + m = _linearoperator(md, shape=graph.shape, dtype=dtype) + return m, diag.astype(dtype, copy=False) + else: + raise ValueError(f"Invalid form: {form!r}") + + +def _laplacian_sparse(graph, normed, axis, copy, form, dtype, symmetrized): + # The keyword argument `form` is unused and has no effect here. + del form + + if dtype is None: + dtype = graph.dtype + + needs_copy = False + if graph.format in ('lil', 'dok'): + m = graph.tocoo() + else: + m = graph + if copy: + needs_copy = True + + if symmetrized: + m += m.T.conj() + + w = np.asarray(m.sum(axis=axis)).ravel() - m.diagonal() + if normed: + m = m.tocoo(copy=needs_copy) + isolated_node_mask = (w == 0) + w = np.where(isolated_node_mask, 1, np.sqrt(w)) + m.data /= w[m.row] + m.data /= w[m.col] + m.data *= -1 + m.setdiag(1 - isolated_node_mask) + else: + if m.format == 'dia': + m = m.copy() + else: + m = m.tocoo(copy=needs_copy) + m.data *= -1 + m.setdiag(w) + + return m.astype(dtype, copy=False), w.astype(dtype) + + +def _laplacian_dense_flo(graph, normed, axis, copy, form, dtype, symmetrized): + + if copy: + m = np.array(graph) + else: + m = np.asarray(graph) + + if dtype is None: + dtype = m.dtype + + graph_sum = m.sum(axis=axis) + graph_diagonal = m.diagonal() + diag = graph_sum - graph_diagonal + if symmetrized: + graph_sum += m.sum(axis=1 - axis) + diag = graph_sum - graph_diagonal - graph_diagonal + + if normed: + isolated_node_mask = diag == 0 + w = np.where(isolated_node_mask, 1, np.sqrt(diag)) + if symmetrized: + md = _laplace_normed_sym(m, graph_sum, 1.0 / w) + else: + md = _laplace_normed(m, graph_sum, 1.0 / w) + if form == "function": + return md, w.astype(dtype, copy=False) + elif form == "lo": + m = _linearoperator(md, shape=graph.shape, dtype=dtype) + return m, w.astype(dtype, copy=False) + else: + raise ValueError(f"Invalid form: {form!r}") + else: + if symmetrized: + md = _laplace_sym(m, graph_sum) + else: + md = _laplace(m, graph_sum) + if form == "function": + return md, diag.astype(dtype, copy=False) + elif form == "lo": + m = _linearoperator(md, shape=graph.shape, dtype=dtype) + return m, diag.astype(dtype, copy=False) + else: + raise ValueError(f"Invalid form: {form!r}") + + +def _laplacian_dense(graph, normed, axis, copy, form, dtype, symmetrized): + + if form != "array": + raise ValueError(f'{form!r} must be "array"') + + if dtype is None: + dtype = graph.dtype + + if copy: + m = np.array(graph) + else: + m = np.asarray(graph) + + if dtype is None: + dtype = m.dtype + + if symmetrized: + m += m.T.conj() + np.fill_diagonal(m, 0) + w = m.sum(axis=axis) + if normed: + isolated_node_mask = (w == 0) + w = np.where(isolated_node_mask, 1, np.sqrt(w)) + m /= w + m /= w[:, np.newaxis] + m *= -1 + _setdiag_dense(m, 1 - isolated_node_mask) + else: + m *= -1 + _setdiag_dense(m, w) + + return m.astype(dtype, copy=False), w.astype(dtype, copy=False) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/_matching.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/_matching.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..d175f999f98f4ff0748a201ac4ecb5c30f5eb47e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/_matching.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/_min_spanning_tree.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/_min_spanning_tree.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..461819a54866a46ac46b61774419c21c60d71a8d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/_min_spanning_tree.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/_reordering.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/_reordering.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..8d458b9c7f35f1cb8dd1205edf0bd9cf2345fa69 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/_reordering.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/_shortest_path.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/_shortest_path.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..2df1628ea7b560e8a6b86373bbbfa0b5b2b64d5a Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/_shortest_path.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/_tools.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/_tools.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..015f19ff1d65ce52672983984f79d8a5885d600d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/_tools.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/_traversal.cpython-310-x86_64-linux-gnu.so b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/_traversal.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..82d21813e63453580ebaa136927bcbb3653a41de Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/_traversal.cpython-310-x86_64-linux-gnu.so differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/_validation.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/_validation.py new file mode 100644 index 0000000000000000000000000000000000000000..e160cf5e7b0e9fd94772e5e150e32c8b0d0be7c6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/_validation.py @@ -0,0 +1,61 @@ +import numpy as np +from scipy.sparse import csr_matrix, issparse +from scipy.sparse._sputils import convert_pydata_sparse_to_scipy +from scipy.sparse.csgraph._tools import ( + csgraph_to_dense, csgraph_from_dense, + csgraph_masked_from_dense, csgraph_from_masked +) + +DTYPE = np.float64 + + +def validate_graph(csgraph, directed, dtype=DTYPE, + csr_output=True, dense_output=True, + copy_if_dense=False, copy_if_sparse=False, + null_value_in=0, null_value_out=np.inf, + infinity_null=True, nan_null=True): + """Routine for validation and conversion of csgraph inputs""" + if not (csr_output or dense_output): + raise ValueError("Internal: dense or csr output must be true") + + csgraph = convert_pydata_sparse_to_scipy(csgraph) + + # if undirected and csc storage, then transposing in-place + # is quicker than later converting to csr. + if (not directed) and issparse(csgraph) and csgraph.format == "csc": + csgraph = csgraph.T + + if issparse(csgraph): + if csr_output: + csgraph = csr_matrix(csgraph, dtype=DTYPE, copy=copy_if_sparse) + else: + csgraph = csgraph_to_dense(csgraph, null_value=null_value_out) + elif np.ma.isMaskedArray(csgraph): + if dense_output: + mask = csgraph.mask + csgraph = np.array(csgraph.data, dtype=DTYPE, copy=copy_if_dense) + csgraph[mask] = null_value_out + else: + csgraph = csgraph_from_masked(csgraph) + else: + if dense_output: + csgraph = csgraph_masked_from_dense(csgraph, + copy=copy_if_dense, + null_value=null_value_in, + nan_null=nan_null, + infinity_null=infinity_null) + mask = csgraph.mask + csgraph = np.asarray(csgraph.data, dtype=DTYPE) + csgraph[mask] = null_value_out + else: + csgraph = csgraph_from_dense(csgraph, null_value=null_value_in, + infinity_null=infinity_null, + nan_null=nan_null) + + if csgraph.ndim != 2: + raise ValueError("compressed-sparse graph must be 2-D") + + if csgraph.shape[0] != csgraph.shape[1]: + raise ValueError("compressed-sparse graph must be shape (N, N)") + + return csgraph diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__init__.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8c6d6733d5c34033b0313d5e71e086bd5d7ad5c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_spanning_tree.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_spanning_tree.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f960216d5e733ebbca488457d0aaf5b39e691a16 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_spanning_tree.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_connected_components.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_connected_components.py new file mode 100644 index 0000000000000000000000000000000000000000..0b190a24deb9f2818893a120f8ea376fbfb8d6fe --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_connected_components.py @@ -0,0 +1,119 @@ +import numpy as np +from numpy.testing import assert_equal, assert_array_almost_equal +from scipy.sparse import csgraph, csr_array + + +def test_weak_connections(): + Xde = np.array([[0, 1, 0], + [0, 0, 0], + [0, 0, 0]]) + + Xsp = csgraph.csgraph_from_dense(Xde, null_value=0) + + for X in Xsp, Xde: + n_components, labels =\ + csgraph.connected_components(X, directed=True, + connection='weak') + + assert_equal(n_components, 2) + assert_array_almost_equal(labels, [0, 0, 1]) + + +def test_strong_connections(): + X1de = np.array([[0, 1, 0], + [0, 0, 0], + [0, 0, 0]]) + X2de = X1de + X1de.T + + X1sp = csgraph.csgraph_from_dense(X1de, null_value=0) + X2sp = csgraph.csgraph_from_dense(X2de, null_value=0) + + for X in X1sp, X1de: + n_components, labels =\ + csgraph.connected_components(X, directed=True, + connection='strong') + + assert_equal(n_components, 3) + labels.sort() + assert_array_almost_equal(labels, [0, 1, 2]) + + for X in X2sp, X2de: + n_components, labels =\ + csgraph.connected_components(X, directed=True, + connection='strong') + + assert_equal(n_components, 2) + labels.sort() + assert_array_almost_equal(labels, [0, 0, 1]) + + +def test_strong_connections2(): + X = np.array([[0, 0, 0, 0, 0, 0], + [1, 0, 1, 0, 0, 0], + [0, 0, 0, 1, 0, 0], + [0, 0, 1, 0, 1, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0]]) + n_components, labels =\ + csgraph.connected_components(X, directed=True, + connection='strong') + assert_equal(n_components, 5) + labels.sort() + assert_array_almost_equal(labels, [0, 1, 2, 2, 3, 4]) + + +def test_weak_connections2(): + X = np.array([[0, 0, 0, 0, 0, 0], + [1, 0, 0, 0, 0, 0], + [0, 0, 0, 1, 0, 0], + [0, 0, 1, 0, 1, 0], + [0, 0, 0, 0, 0, 0], + [0, 0, 0, 0, 1, 0]]) + n_components, labels =\ + csgraph.connected_components(X, directed=True, + connection='weak') + assert_equal(n_components, 2) + labels.sort() + assert_array_almost_equal(labels, [0, 0, 1, 1, 1, 1]) + + +def test_ticket1876(): + # Regression test: this failed in the original implementation + # There should be two strongly-connected components; previously gave one + g = np.array([[0, 1, 1, 0], + [1, 0, 0, 1], + [0, 0, 0, 1], + [0, 0, 1, 0]]) + n_components, labels = csgraph.connected_components(g, connection='strong') + + assert_equal(n_components, 2) + assert_equal(labels[0], labels[1]) + assert_equal(labels[2], labels[3]) + + +def test_fully_connected_graph(): + # Fully connected dense matrices raised an exception. + # https://github.com/scipy/scipy/issues/3818 + g = np.ones((4, 4)) + n_components, labels = csgraph.connected_components(g) + assert_equal(n_components, 1) + + +def test_int64_indices_undirected(): + # See https://github.com/scipy/scipy/issues/18716 + g = csr_array(([1], np.array([[0], [1]], dtype=np.int64)), shape=(2, 2)) + assert g.indices.dtype == np.int64 + n, labels = csgraph.connected_components(g, directed=False) + assert n == 1 + assert_array_almost_equal(labels, [0, 0]) + + +def test_int64_indices_directed(): + # See https://github.com/scipy/scipy/issues/18716 + g = csr_array(([1], np.array([[0], [1]], dtype=np.int64)), shape=(2, 2)) + assert g.indices.dtype == np.int64 + n, labels = csgraph.connected_components(g, directed=True, + connection='strong') + assert n == 2 + assert_array_almost_equal(labels, [1, 0]) + diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_conversions.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_conversions.py new file mode 100644 index 0000000000000000000000000000000000000000..e7900d67b543187e6a34b76ee5c9511cfcccae9e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_conversions.py @@ -0,0 +1,61 @@ +import numpy as np +from numpy.testing import assert_array_almost_equal +from scipy.sparse import csr_matrix +from scipy.sparse.csgraph import csgraph_from_dense, csgraph_to_dense + + +def test_csgraph_from_dense(): + np.random.seed(1234) + G = np.random.random((10, 10)) + some_nulls = (G < 0.4) + all_nulls = (G < 0.8) + + for null_value in [0, np.nan, np.inf]: + G[all_nulls] = null_value + with np.errstate(invalid="ignore"): + G_csr = csgraph_from_dense(G, null_value=0) + + G[all_nulls] = 0 + assert_array_almost_equal(G, G_csr.toarray()) + + for null_value in [np.nan, np.inf]: + G[all_nulls] = 0 + G[some_nulls] = null_value + with np.errstate(invalid="ignore"): + G_csr = csgraph_from_dense(G, null_value=0) + + G[all_nulls] = 0 + assert_array_almost_equal(G, G_csr.toarray()) + + +def test_csgraph_to_dense(): + np.random.seed(1234) + G = np.random.random((10, 10)) + nulls = (G < 0.8) + G[nulls] = np.inf + + G_csr = csgraph_from_dense(G) + + for null_value in [0, 10, -np.inf, np.inf]: + G[nulls] = null_value + assert_array_almost_equal(G, csgraph_to_dense(G_csr, null_value)) + + +def test_multiple_edges(): + # create a random square matrix with an even number of elements + np.random.seed(1234) + X = np.random.random((10, 10)) + Xcsr = csr_matrix(X) + + # now double-up every other column + Xcsr.indices[::2] = Xcsr.indices[1::2] + + # normal sparse toarray() will sum the duplicated edges + Xdense = Xcsr.toarray() + assert_array_almost_equal(Xdense[:, 1::2], + X[:, ::2] + X[:, 1::2]) + + # csgraph_to_dense chooses the minimum of each duplicated edge + Xdense = csgraph_to_dense(Xcsr) + assert_array_almost_equal(Xdense[:, 1::2], + np.minimum(X[:, ::2], X[:, 1::2])) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_flow.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_flow.py new file mode 100644 index 0000000000000000000000000000000000000000..8bb129a572dd3abedb2afd896d04fa53e8c096bc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_flow.py @@ -0,0 +1,201 @@ +import numpy as np +from numpy.testing import assert_array_equal +import pytest + +from scipy.sparse import csr_matrix, csc_matrix +from scipy.sparse.csgraph import maximum_flow +from scipy.sparse.csgraph._flow import ( + _add_reverse_edges, _make_edge_pointers, _make_tails +) + +methods = ['edmonds_karp', 'dinic'] + +def test_raises_on_dense_input(): + with pytest.raises(TypeError): + graph = np.array([[0, 1], [0, 0]]) + maximum_flow(graph, 0, 1) + maximum_flow(graph, 0, 1, method='edmonds_karp') + + +def test_raises_on_csc_input(): + with pytest.raises(TypeError): + graph = csc_matrix([[0, 1], [0, 0]]) + maximum_flow(graph, 0, 1) + maximum_flow(graph, 0, 1, method='edmonds_karp') + + +def test_raises_on_floating_point_input(): + with pytest.raises(ValueError): + graph = csr_matrix([[0, 1.5], [0, 0]], dtype=np.float64) + maximum_flow(graph, 0, 1) + maximum_flow(graph, 0, 1, method='edmonds_karp') + + +def test_raises_on_non_square_input(): + with pytest.raises(ValueError): + graph = csr_matrix([[0, 1, 2], [2, 1, 0]]) + maximum_flow(graph, 0, 1) + + +def test_raises_when_source_is_sink(): + with pytest.raises(ValueError): + graph = csr_matrix([[0, 1], [0, 0]]) + maximum_flow(graph, 0, 0) + maximum_flow(graph, 0, 0, method='edmonds_karp') + + +@pytest.mark.parametrize('method', methods) +@pytest.mark.parametrize('source', [-1, 2, 3]) +def test_raises_when_source_is_out_of_bounds(source, method): + with pytest.raises(ValueError): + graph = csr_matrix([[0, 1], [0, 0]]) + maximum_flow(graph, source, 1, method=method) + + +@pytest.mark.parametrize('method', methods) +@pytest.mark.parametrize('sink', [-1, 2, 3]) +def test_raises_when_sink_is_out_of_bounds(sink, method): + with pytest.raises(ValueError): + graph = csr_matrix([[0, 1], [0, 0]]) + maximum_flow(graph, 0, sink, method=method) + + +@pytest.mark.parametrize('method', methods) +def test_simple_graph(method): + # This graph looks as follows: + # (0) --5--> (1) + graph = csr_matrix([[0, 5], [0, 0]]) + res = maximum_flow(graph, 0, 1, method=method) + assert res.flow_value == 5 + expected_flow = np.array([[0, 5], [-5, 0]]) + assert_array_equal(res.flow.toarray(), expected_flow) + + +@pytest.mark.parametrize('method', methods) +def test_bottle_neck_graph(method): + # This graph cannot use the full capacity between 0 and 1: + # (0) --5--> (1) --3--> (2) + graph = csr_matrix([[0, 5, 0], [0, 0, 3], [0, 0, 0]]) + res = maximum_flow(graph, 0, 2, method=method) + assert res.flow_value == 3 + expected_flow = np.array([[0, 3, 0], [-3, 0, 3], [0, -3, 0]]) + assert_array_equal(res.flow.toarray(), expected_flow) + + +@pytest.mark.parametrize('method', methods) +def test_backwards_flow(method): + # This example causes backwards flow between vertices 3 and 4, + # and so this test ensures that we handle that accordingly. See + # https://stackoverflow.com/q/38843963/5085211 + # for more information. + graph = csr_matrix([[0, 10, 0, 0, 10, 0, 0, 0], + [0, 0, 10, 0, 0, 0, 0, 0], + [0, 0, 0, 10, 0, 0, 0, 0], + [0, 0, 0, 0, 0, 0, 0, 10], + [0, 0, 0, 10, 0, 10, 0, 0], + [0, 0, 0, 0, 0, 0, 10, 0], + [0, 0, 0, 0, 0, 0, 0, 10], + [0, 0, 0, 0, 0, 0, 0, 0]]) + res = maximum_flow(graph, 0, 7, method=method) + assert res.flow_value == 20 + expected_flow = np.array([[0, 10, 0, 0, 10, 0, 0, 0], + [-10, 0, 10, 0, 0, 0, 0, 0], + [0, -10, 0, 10, 0, 0, 0, 0], + [0, 0, -10, 0, 0, 0, 0, 10], + [-10, 0, 0, 0, 0, 10, 0, 0], + [0, 0, 0, 0, -10, 0, 10, 0], + [0, 0, 0, 0, 0, -10, 0, 10], + [0, 0, 0, -10, 0, 0, -10, 0]]) + assert_array_equal(res.flow.toarray(), expected_flow) + + +@pytest.mark.parametrize('method', methods) +def test_example_from_clrs_chapter_26_1(method): + # See page 659 in CLRS second edition, but note that the maximum flow + # we find is slightly different than the one in CLRS; we push a flow of + # 12 to v_1 instead of v_2. + graph = csr_matrix([[0, 16, 13, 0, 0, 0], + [0, 0, 10, 12, 0, 0], + [0, 4, 0, 0, 14, 0], + [0, 0, 9, 0, 0, 20], + [0, 0, 0, 7, 0, 4], + [0, 0, 0, 0, 0, 0]]) + res = maximum_flow(graph, 0, 5, method=method) + assert res.flow_value == 23 + expected_flow = np.array([[0, 12, 11, 0, 0, 0], + [-12, 0, 0, 12, 0, 0], + [-11, 0, 0, 0, 11, 0], + [0, -12, 0, 0, -7, 19], + [0, 0, -11, 7, 0, 4], + [0, 0, 0, -19, -4, 0]]) + assert_array_equal(res.flow.toarray(), expected_flow) + + +@pytest.mark.parametrize('method', methods) +def test_disconnected_graph(method): + # This tests the following disconnected graph: + # (0) --5--> (1) (2) --3--> (3) + graph = csr_matrix([[0, 5, 0, 0], + [0, 0, 0, 0], + [0, 0, 9, 3], + [0, 0, 0, 0]]) + res = maximum_flow(graph, 0, 3, method=method) + assert res.flow_value == 0 + expected_flow = np.zeros((4, 4), dtype=np.int32) + assert_array_equal(res.flow.toarray(), expected_flow) + + +@pytest.mark.parametrize('method', methods) +def test_add_reverse_edges_large_graph(method): + # Regression test for https://github.com/scipy/scipy/issues/14385 + n = 100_000 + indices = np.arange(1, n) + indptr = np.array(list(range(n)) + [n - 1]) + data = np.ones(n - 1, dtype=np.int32) + graph = csr_matrix((data, indices, indptr), shape=(n, n)) + res = maximum_flow(graph, 0, n - 1, method=method) + assert res.flow_value == 1 + expected_flow = graph - graph.transpose() + assert_array_equal(res.flow.data, expected_flow.data) + assert_array_equal(res.flow.indices, expected_flow.indices) + assert_array_equal(res.flow.indptr, expected_flow.indptr) + + +@pytest.mark.parametrize("a,b_data_expected", [ + ([[]], []), + ([[0], [0]], []), + ([[1, 0, 2], [0, 0, 0], [0, 3, 0]], [1, 2, 0, 0, 3]), + ([[9, 8, 7], [4, 5, 6], [0, 0, 0]], [9, 8, 7, 4, 5, 6, 0, 0])]) +def test_add_reverse_edges(a, b_data_expected): + """Test that the reversal of the edges of the input graph works + as expected. + """ + a = csr_matrix(a, dtype=np.int32, shape=(len(a), len(a))) + b = _add_reverse_edges(a) + assert_array_equal(b.data, b_data_expected) + + +@pytest.mark.parametrize("a,expected", [ + ([[]], []), + ([[0]], []), + ([[1]], [0]), + ([[0, 1], [10, 0]], [1, 0]), + ([[1, 0, 2], [0, 0, 3], [4, 5, 0]], [0, 3, 4, 1, 2]) +]) +def test_make_edge_pointers(a, expected): + a = csr_matrix(a, dtype=np.int32) + rev_edge_ptr = _make_edge_pointers(a) + assert_array_equal(rev_edge_ptr, expected) + + +@pytest.mark.parametrize("a,expected", [ + ([[]], []), + ([[0]], []), + ([[1]], [0]), + ([[0, 1], [10, 0]], [0, 1]), + ([[1, 0, 2], [0, 0, 3], [4, 5, 0]], [0, 0, 1, 2, 2]) +]) +def test_make_tails(a, expected): + a = csr_matrix(a, dtype=np.int32) + tails = _make_tails(a) + assert_array_equal(tails, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_graph_laplacian.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_graph_laplacian.py new file mode 100644 index 0000000000000000000000000000000000000000..4a4213dcbec63530466078437d01ca7765494514 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_graph_laplacian.py @@ -0,0 +1,369 @@ +import pytest +import numpy as np +from numpy.testing import assert_allclose +from pytest import raises as assert_raises +from scipy import sparse + +from scipy.sparse import csgraph +from scipy._lib._util import np_long, np_ulong + + +def check_int_type(mat): + return np.issubdtype(mat.dtype, np.signedinteger) or np.issubdtype( + mat.dtype, np_ulong + ) + + +def test_laplacian_value_error(): + for t in int, float, complex: + for m in ([1, 1], + [[[1]]], + [[1, 2, 3], [4, 5, 6]], + [[1, 2], [3, 4], [5, 5]]): + A = np.array(m, dtype=t) + assert_raises(ValueError, csgraph.laplacian, A) + + +def _explicit_laplacian(x, normed=False): + if sparse.issparse(x): + x = x.toarray() + x = np.asarray(x) + y = -1.0 * x + for j in range(y.shape[0]): + y[j,j] = x[j,j+1:].sum() + x[j,:j].sum() + if normed: + d = np.diag(y).copy() + d[d == 0] = 1.0 + y /= d[:,None]**.5 + y /= d[None,:]**.5 + return y + + +def _check_symmetric_graph_laplacian(mat, normed, copy=True): + if not hasattr(mat, 'shape'): + mat = eval(mat, dict(np=np, sparse=sparse)) + + if sparse.issparse(mat): + sp_mat = mat + mat = sp_mat.toarray() + else: + sp_mat = sparse.csr_matrix(mat) + + mat_copy = np.copy(mat) + sp_mat_copy = sparse.csr_matrix(sp_mat, copy=True) + + n_nodes = mat.shape[0] + explicit_laplacian = _explicit_laplacian(mat, normed=normed) + laplacian = csgraph.laplacian(mat, normed=normed, copy=copy) + sp_laplacian = csgraph.laplacian(sp_mat, normed=normed, + copy=copy) + + if copy: + assert_allclose(mat, mat_copy) + _assert_allclose_sparse(sp_mat, sp_mat_copy) + else: + if not (normed and check_int_type(mat)): + assert_allclose(laplacian, mat) + if sp_mat.format == 'coo': + _assert_allclose_sparse(sp_laplacian, sp_mat) + + assert_allclose(laplacian, sp_laplacian.toarray()) + + for tested in [laplacian, sp_laplacian.toarray()]: + if not normed: + assert_allclose(tested.sum(axis=0), np.zeros(n_nodes)) + assert_allclose(tested.T, tested) + assert_allclose(tested, explicit_laplacian) + + +def test_symmetric_graph_laplacian(): + symmetric_mats = ( + 'np.arange(10) * np.arange(10)[:, np.newaxis]', + 'np.ones((7, 7))', + 'np.eye(19)', + 'sparse.diags([1, 1], [-1, 1], shape=(4, 4))', + 'sparse.diags([1, 1], [-1, 1], shape=(4, 4)).toarray()', + 'sparse.diags([1, 1], [-1, 1], shape=(4, 4)).todense()', + 'np.vander(np.arange(4)) + np.vander(np.arange(4)).T' + ) + for mat in symmetric_mats: + for normed in True, False: + for copy in True, False: + _check_symmetric_graph_laplacian(mat, normed, copy) + + +def _assert_allclose_sparse(a, b, **kwargs): + # helper function that can deal with sparse matrices + if sparse.issparse(a): + a = a.toarray() + if sparse.issparse(b): + b = b.toarray() + assert_allclose(a, b, **kwargs) + + +def _check_laplacian_dtype_none( + A, desired_L, desired_d, normed, use_out_degree, copy, dtype, arr_type +): + mat = arr_type(A, dtype=dtype) + L, d = csgraph.laplacian( + mat, + normed=normed, + return_diag=True, + use_out_degree=use_out_degree, + copy=copy, + dtype=None, + ) + if normed and check_int_type(mat): + assert L.dtype == np.float64 + assert d.dtype == np.float64 + _assert_allclose_sparse(L, desired_L, atol=1e-12) + _assert_allclose_sparse(d, desired_d, atol=1e-12) + else: + assert L.dtype == dtype + assert d.dtype == dtype + desired_L = np.asarray(desired_L).astype(dtype) + desired_d = np.asarray(desired_d).astype(dtype) + _assert_allclose_sparse(L, desired_L, atol=1e-12) + _assert_allclose_sparse(d, desired_d, atol=1e-12) + + if not copy: + if not (normed and check_int_type(mat)): + if type(mat) is np.ndarray: + assert_allclose(L, mat) + elif mat.format == "coo": + _assert_allclose_sparse(L, mat) + + +def _check_laplacian_dtype( + A, desired_L, desired_d, normed, use_out_degree, copy, dtype, arr_type +): + mat = arr_type(A, dtype=dtype) + L, d = csgraph.laplacian( + mat, + normed=normed, + return_diag=True, + use_out_degree=use_out_degree, + copy=copy, + dtype=dtype, + ) + assert L.dtype == dtype + assert d.dtype == dtype + desired_L = np.asarray(desired_L).astype(dtype) + desired_d = np.asarray(desired_d).astype(dtype) + _assert_allclose_sparse(L, desired_L, atol=1e-12) + _assert_allclose_sparse(d, desired_d, atol=1e-12) + + if not copy: + if not (normed and check_int_type(mat)): + if type(mat) is np.ndarray: + assert_allclose(L, mat) + elif mat.format == 'coo': + _assert_allclose_sparse(L, mat) + + +INT_DTYPES = {np.intc, np_long, np.longlong} +REAL_DTYPES = {np.float32, np.float64, np.longdouble} +COMPLEX_DTYPES = {np.complex64, np.complex128, np.clongdouble} +# use sorted list to ensure fixed order of tests +DTYPES = sorted(INT_DTYPES ^ REAL_DTYPES ^ COMPLEX_DTYPES, key=str) + + +@pytest.mark.parametrize("dtype", DTYPES) +@pytest.mark.parametrize("arr_type", [np.array, + sparse.csr_matrix, + sparse.coo_matrix, + sparse.csr_array, + sparse.coo_array]) +@pytest.mark.parametrize("copy", [True, False]) +@pytest.mark.parametrize("normed", [True, False]) +@pytest.mark.parametrize("use_out_degree", [True, False]) +def test_asymmetric_laplacian(use_out_degree, normed, + copy, dtype, arr_type): + # adjacency matrix + A = [[0, 1, 0], + [4, 2, 0], + [0, 0, 0]] + A = arr_type(np.array(A), dtype=dtype) + A_copy = A.copy() + + if not normed and use_out_degree: + # Laplacian matrix using out-degree + L = [[1, -1, 0], + [-4, 4, 0], + [0, 0, 0]] + d = [1, 4, 0] + + if normed and use_out_degree: + # normalized Laplacian matrix using out-degree + L = [[1, -0.5, 0], + [-2, 1, 0], + [0, 0, 0]] + d = [1, 2, 1] + + if not normed and not use_out_degree: + # Laplacian matrix using in-degree + L = [[4, -1, 0], + [-4, 1, 0], + [0, 0, 0]] + d = [4, 1, 0] + + if normed and not use_out_degree: + # normalized Laplacian matrix using in-degree + L = [[1, -0.5, 0], + [-2, 1, 0], + [0, 0, 0]] + d = [2, 1, 1] + + _check_laplacian_dtype_none( + A, + L, + d, + normed=normed, + use_out_degree=use_out_degree, + copy=copy, + dtype=dtype, + arr_type=arr_type, + ) + + _check_laplacian_dtype( + A_copy, + L, + d, + normed=normed, + use_out_degree=use_out_degree, + copy=copy, + dtype=dtype, + arr_type=arr_type, + ) + + +@pytest.mark.parametrize("fmt", ['csr', 'csc', 'coo', 'lil', + 'dok', 'dia', 'bsr']) +@pytest.mark.parametrize("normed", [True, False]) +@pytest.mark.parametrize("copy", [True, False]) +def test_sparse_formats(fmt, normed, copy): + mat = sparse.diags([1, 1], [-1, 1], shape=(4, 4), format=fmt) + _check_symmetric_graph_laplacian(mat, normed, copy) + + +@pytest.mark.parametrize( + "arr_type", [np.asarray, + sparse.csr_matrix, + sparse.coo_matrix, + sparse.csr_array, + sparse.coo_array] +) +@pytest.mark.parametrize("form", ["array", "function", "lo"]) +def test_laplacian_symmetrized(arr_type, form): + # adjacency matrix + n = 3 + mat = arr_type(np.arange(n * n).reshape(n, n)) + L_in, d_in = csgraph.laplacian( + mat, + return_diag=True, + form=form, + ) + L_out, d_out = csgraph.laplacian( + mat, + return_diag=True, + use_out_degree=True, + form=form, + ) + Ls, ds = csgraph.laplacian( + mat, + return_diag=True, + symmetrized=True, + form=form, + ) + Ls_normed, ds_normed = csgraph.laplacian( + mat, + return_diag=True, + symmetrized=True, + normed=True, + form=form, + ) + mat += mat.T + Lss, dss = csgraph.laplacian(mat, return_diag=True, form=form) + Lss_normed, dss_normed = csgraph.laplacian( + mat, + return_diag=True, + normed=True, + form=form, + ) + + assert_allclose(ds, d_in + d_out) + assert_allclose(ds, dss) + assert_allclose(ds_normed, dss_normed) + + d = {} + for L in ["L_in", "L_out", "Ls", "Ls_normed", "Lss", "Lss_normed"]: + if form == "array": + d[L] = eval(L) + else: + d[L] = eval(L)(np.eye(n, dtype=mat.dtype)) + + _assert_allclose_sparse(d["Ls"], d["L_in"] + d["L_out"].T) + _assert_allclose_sparse(d["Ls"], d["Lss"]) + _assert_allclose_sparse(d["Ls_normed"], d["Lss_normed"]) + + +@pytest.mark.parametrize( + "arr_type", [np.asarray, + sparse.csr_matrix, + sparse.coo_matrix, + sparse.csr_array, + sparse.coo_array] +) +@pytest.mark.parametrize("dtype", DTYPES) +@pytest.mark.parametrize("normed", [True, False]) +@pytest.mark.parametrize("symmetrized", [True, False]) +@pytest.mark.parametrize("use_out_degree", [True, False]) +@pytest.mark.parametrize("form", ["function", "lo"]) +def test_format(dtype, arr_type, normed, symmetrized, use_out_degree, form): + n = 3 + mat = [[0, 1, 0], [4, 2, 0], [0, 0, 0]] + mat = arr_type(np.array(mat), dtype=dtype) + Lo, do = csgraph.laplacian( + mat, + return_diag=True, + normed=normed, + symmetrized=symmetrized, + use_out_degree=use_out_degree, + dtype=dtype, + ) + La, da = csgraph.laplacian( + mat, + return_diag=True, + normed=normed, + symmetrized=symmetrized, + use_out_degree=use_out_degree, + dtype=dtype, + form="array", + ) + assert_allclose(do, da) + _assert_allclose_sparse(Lo, La) + + L, d = csgraph.laplacian( + mat, + return_diag=True, + normed=normed, + symmetrized=symmetrized, + use_out_degree=use_out_degree, + dtype=dtype, + form=form, + ) + assert_allclose(d, do) + assert d.dtype == dtype + Lm = L(np.eye(n, dtype=mat.dtype)).astype(dtype) + _assert_allclose_sparse(Lm, Lo, rtol=2e-7, atol=2e-7) + x = np.arange(6).reshape(3, 2) + if not (normed and dtype in INT_DTYPES): + assert_allclose(L(x), Lo @ x) + else: + # Normalized Lo is casted to integer, but L() is not + pass + + +def test_format_error_message(): + with pytest.raises(ValueError, match="Invalid form: 'toto'"): + _ = csgraph.laplacian(np.eye(1), form='toto') diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_matching.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_matching.py new file mode 100644 index 0000000000000000000000000000000000000000..87e2920fe971d22a16b473d543e2ad26ac8e777d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_matching.py @@ -0,0 +1,294 @@ +from itertools import product + +import numpy as np +from numpy.testing import assert_array_equal, assert_equal +import pytest + +from scipy.sparse import csr_matrix, coo_matrix, diags +from scipy.sparse.csgraph import ( + maximum_bipartite_matching, min_weight_full_bipartite_matching +) + + +def test_maximum_bipartite_matching_raises_on_dense_input(): + with pytest.raises(TypeError): + graph = np.array([[0, 1], [0, 0]]) + maximum_bipartite_matching(graph) + + +def test_maximum_bipartite_matching_empty_graph(): + graph = csr_matrix((0, 0)) + x = maximum_bipartite_matching(graph, perm_type='row') + y = maximum_bipartite_matching(graph, perm_type='column') + expected_matching = np.array([]) + assert_array_equal(expected_matching, x) + assert_array_equal(expected_matching, y) + + +def test_maximum_bipartite_matching_empty_left_partition(): + graph = csr_matrix((2, 0)) + x = maximum_bipartite_matching(graph, perm_type='row') + y = maximum_bipartite_matching(graph, perm_type='column') + assert_array_equal(np.array([]), x) + assert_array_equal(np.array([-1, -1]), y) + + +def test_maximum_bipartite_matching_empty_right_partition(): + graph = csr_matrix((0, 3)) + x = maximum_bipartite_matching(graph, perm_type='row') + y = maximum_bipartite_matching(graph, perm_type='column') + assert_array_equal(np.array([-1, -1, -1]), x) + assert_array_equal(np.array([]), y) + + +def test_maximum_bipartite_matching_graph_with_no_edges(): + graph = csr_matrix((2, 2)) + x = maximum_bipartite_matching(graph, perm_type='row') + y = maximum_bipartite_matching(graph, perm_type='column') + assert_array_equal(np.array([-1, -1]), x) + assert_array_equal(np.array([-1, -1]), y) + + +def test_maximum_bipartite_matching_graph_that_causes_augmentation(): + # In this graph, column 1 is initially assigned to row 1, but it should be + # reassigned to make room for row 2. + graph = csr_matrix([[1, 1], [1, 0]]) + x = maximum_bipartite_matching(graph, perm_type='column') + y = maximum_bipartite_matching(graph, perm_type='row') + expected_matching = np.array([1, 0]) + assert_array_equal(expected_matching, x) + assert_array_equal(expected_matching, y) + + +def test_maximum_bipartite_matching_graph_with_more_rows_than_columns(): + graph = csr_matrix([[1, 1], [1, 0], [0, 1]]) + x = maximum_bipartite_matching(graph, perm_type='column') + y = maximum_bipartite_matching(graph, perm_type='row') + assert_array_equal(np.array([0, -1, 1]), x) + assert_array_equal(np.array([0, 2]), y) + + +def test_maximum_bipartite_matching_graph_with_more_columns_than_rows(): + graph = csr_matrix([[1, 1, 0], [0, 0, 1]]) + x = maximum_bipartite_matching(graph, perm_type='column') + y = maximum_bipartite_matching(graph, perm_type='row') + assert_array_equal(np.array([0, 2]), x) + assert_array_equal(np.array([0, -1, 1]), y) + + +def test_maximum_bipartite_matching_explicit_zeros_count_as_edges(): + data = [0, 0] + indices = [1, 0] + indptr = [0, 1, 2] + graph = csr_matrix((data, indices, indptr), shape=(2, 2)) + x = maximum_bipartite_matching(graph, perm_type='row') + y = maximum_bipartite_matching(graph, perm_type='column') + expected_matching = np.array([1, 0]) + assert_array_equal(expected_matching, x) + assert_array_equal(expected_matching, y) + + +def test_maximum_bipartite_matching_feasibility_of_result(): + # This is a regression test for GitHub issue #11458 + data = np.ones(50, dtype=int) + indices = [11, 12, 19, 22, 23, 5, 22, 3, 8, 10, 5, 6, 11, 12, 13, 5, 13, + 14, 20, 22, 3, 15, 3, 13, 14, 11, 12, 19, 22, 23, 5, 22, 3, 8, + 10, 5, 6, 11, 12, 13, 5, 13, 14, 20, 22, 3, 15, 3, 13, 14] + indptr = [0, 5, 7, 10, 10, 15, 20, 22, 22, 23, 25, 30, 32, 35, 35, 40, 45, + 47, 47, 48, 50] + graph = csr_matrix((data, indices, indptr), shape=(20, 25)) + x = maximum_bipartite_matching(graph, perm_type='row') + y = maximum_bipartite_matching(graph, perm_type='column') + assert (x != -1).sum() == 13 + assert (y != -1).sum() == 13 + # Ensure that each element of the matching is in fact an edge in the graph. + for u, v in zip(range(graph.shape[0]), y): + if v != -1: + assert graph[u, v] + for u, v in zip(x, range(graph.shape[1])): + if u != -1: + assert graph[u, v] + + +def test_matching_large_random_graph_with_one_edge_incident_to_each_vertex(): + np.random.seed(42) + A = diags(np.ones(25), offsets=0, format='csr') + rand_perm = np.random.permutation(25) + rand_perm2 = np.random.permutation(25) + + Rrow = np.arange(25) + Rcol = rand_perm + Rdata = np.ones(25, dtype=int) + Rmat = coo_matrix((Rdata, (Rrow, Rcol))).tocsr() + + Crow = rand_perm2 + Ccol = np.arange(25) + Cdata = np.ones(25, dtype=int) + Cmat = coo_matrix((Cdata, (Crow, Ccol))).tocsr() + # Randomly permute identity matrix + B = Rmat * A * Cmat + + # Row permute + perm = maximum_bipartite_matching(B, perm_type='row') + Rrow = np.arange(25) + Rcol = perm + Rdata = np.ones(25, dtype=int) + Rmat = coo_matrix((Rdata, (Rrow, Rcol))).tocsr() + C1 = Rmat * B + + # Column permute + perm2 = maximum_bipartite_matching(B, perm_type='column') + Crow = perm2 + Ccol = np.arange(25) + Cdata = np.ones(25, dtype=int) + Cmat = coo_matrix((Cdata, (Crow, Ccol))).tocsr() + C2 = B * Cmat + + # Should get identity matrix back + assert_equal(any(C1.diagonal() == 0), False) + assert_equal(any(C2.diagonal() == 0), False) + + +@pytest.mark.parametrize('num_rows,num_cols', [(0, 0), (2, 0), (0, 3)]) +def test_min_weight_full_matching_trivial_graph(num_rows, num_cols): + biadjacency_matrix = csr_matrix((num_cols, num_rows)) + row_ind, col_ind = min_weight_full_bipartite_matching(biadjacency_matrix) + assert len(row_ind) == 0 + assert len(col_ind) == 0 + + +@pytest.mark.parametrize('biadjacency_matrix', + [ + [[1, 1, 1], [1, 0, 0], [1, 0, 0]], + [[1, 1, 1], [0, 0, 1], [0, 0, 1]], + [[1, 0, 0, 1], [1, 1, 0, 1], [0, 0, 0, 0]], + [[1, 0, 0], [2, 0, 0]], + [[0, 1, 0], [0, 2, 0]], + [[1, 0], [2, 0], [5, 0]] + ]) +def test_min_weight_full_matching_infeasible_problems(biadjacency_matrix): + with pytest.raises(ValueError): + min_weight_full_bipartite_matching(csr_matrix(biadjacency_matrix)) + + +def test_min_weight_full_matching_large_infeasible(): + # Regression test for GitHub issue #17269 + a = np.asarray([ + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.001, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.001, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.001, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.001, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.001, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.001, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.001, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.001, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.001], + [0.0, 0.11687445, 0.0, 0.0, 0.01319788, 0.07509257, 0.0, + 0.0, 0.0, 0.74228317, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.81087935, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.8408466, 0.0, 0.0, 0.0, 0.0, 0.01194389, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.82994211, 0.0, 0.0, 0.0, 0.11468516, 0.0, 0.0, 0.0, + 0.11173505, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.0, 0.0], + [0.18796507, 0.0, 0.04002318, 0.0, 0.0, 0.0, 0.0, 0.0, 0.75883335, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.71545464, 0.0, 0.0, 0.0, 0.0, 0.0, 0.02748488, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.78470564, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.14829198, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.10870609, 0.0, 0.0, 0.0, 0.8918677, 0.0, 0.0, 0.0, 0.06306644, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, + 0.63844085, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.7442354, 0.0, 0.0, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.09850549, 0.0, 0.0, 0.18638258, + 0.2769244, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.73182464, 0.0, 0.0, 0.46443561, + 0.38589284, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + [0.29510278, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.09666032, 0.0, + 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] + ]) + with pytest.raises(ValueError, match='no full matching exists'): + min_weight_full_bipartite_matching(csr_matrix(a)) + + +def test_explicit_zero_causes_warning(): + with pytest.warns(UserWarning): + biadjacency_matrix = csr_matrix(((2, 0, 3), (0, 1, 1), (0, 2, 3))) + min_weight_full_bipartite_matching(biadjacency_matrix) + + +# General test for linear sum assignment solvers to make it possible to rely +# on the same tests for scipy.optimize.linear_sum_assignment. +def linear_sum_assignment_assertions( + solver, array_type, sign, test_case +): + cost_matrix, expected_cost = test_case + maximize = sign == -1 + cost_matrix = sign * array_type(cost_matrix) + expected_cost = sign * np.array(expected_cost) + + row_ind, col_ind = solver(cost_matrix, maximize=maximize) + assert_array_equal(row_ind, np.sort(row_ind)) + assert_array_equal(expected_cost, + np.array(cost_matrix[row_ind, col_ind]).flatten()) + + cost_matrix = cost_matrix.T + row_ind, col_ind = solver(cost_matrix, maximize=maximize) + assert_array_equal(row_ind, np.sort(row_ind)) + assert_array_equal(np.sort(expected_cost), + np.sort(np.array( + cost_matrix[row_ind, col_ind])).flatten()) + + +linear_sum_assignment_test_cases = product( + [-1, 1], + [ + # Square + ([[400, 150, 400], + [400, 450, 600], + [300, 225, 300]], + [150, 400, 300]), + + # Rectangular variant + ([[400, 150, 400, 1], + [400, 450, 600, 2], + [300, 225, 300, 3]], + [150, 2, 300]), + + ([[10, 10, 8], + [9, 8, 1], + [9, 7, 4]], + [10, 1, 7]), + + # Square + ([[10, 10, 8, 11], + [9, 8, 1, 1], + [9, 7, 4, 10]], + [10, 1, 4]), + + # Rectangular variant + ([[10, float("inf"), float("inf")], + [float("inf"), float("inf"), 1], + [float("inf"), 7, float("inf")]], + [10, 1, 7]) + ]) + + +@pytest.mark.parametrize('sign,test_case', linear_sum_assignment_test_cases) +def test_min_weight_full_matching_small_inputs(sign, test_case): + linear_sum_assignment_assertions( + min_weight_full_bipartite_matching, csr_matrix, sign, test_case) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_pydata_sparse.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_pydata_sparse.py new file mode 100644 index 0000000000000000000000000000000000000000..63ed5f61a430e4291c40284f1bbfff3165421013 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_pydata_sparse.py @@ -0,0 +1,149 @@ +import pytest + +import numpy as np +import scipy.sparse as sp +import scipy.sparse.csgraph as spgraph + +from numpy.testing import assert_equal + +try: + import sparse +except Exception: + sparse = None + +pytestmark = pytest.mark.skipif(sparse is None, + reason="pydata/sparse not installed") + + +msg = "pydata/sparse (0.15.1) does not implement necessary operations" + + +sparse_params = (pytest.param("COO"), + pytest.param("DOK", marks=[pytest.mark.xfail(reason=msg)])) + + +@pytest.fixture(params=sparse_params) +def sparse_cls(request): + return getattr(sparse, request.param) + + +@pytest.fixture +def graphs(sparse_cls): + graph = [ + [0, 1, 1, 0, 0], + [0, 0, 1, 0, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 1], + [0, 0, 0, 0, 0], + ] + A_dense = np.array(graph) + A_sparse = sparse_cls(A_dense) + return A_dense, A_sparse + + +@pytest.mark.parametrize( + "func", + [ + spgraph.shortest_path, + spgraph.dijkstra, + spgraph.floyd_warshall, + spgraph.bellman_ford, + spgraph.johnson, + spgraph.reverse_cuthill_mckee, + spgraph.maximum_bipartite_matching, + spgraph.structural_rank, + ] +) +def test_csgraph_equiv(func, graphs): + A_dense, A_sparse = graphs + actual = func(A_sparse) + desired = func(sp.csc_matrix(A_dense)) + assert_equal(actual, desired) + + +def test_connected_components(graphs): + A_dense, A_sparse = graphs + func = spgraph.connected_components + + actual_comp, actual_labels = func(A_sparse) + desired_comp, desired_labels, = func(sp.csc_matrix(A_dense)) + + assert actual_comp == desired_comp + assert_equal(actual_labels, desired_labels) + + +def test_laplacian(graphs): + A_dense, A_sparse = graphs + sparse_cls = type(A_sparse) + func = spgraph.laplacian + + actual = func(A_sparse) + desired = func(sp.csc_matrix(A_dense)) + + assert isinstance(actual, sparse_cls) + + assert_equal(actual.todense(), desired.todense()) + + +@pytest.mark.parametrize( + "func", [spgraph.breadth_first_order, spgraph.depth_first_order] +) +def test_order_search(graphs, func): + A_dense, A_sparse = graphs + + actual = func(A_sparse, 0) + desired = func(sp.csc_matrix(A_dense), 0) + + assert_equal(actual, desired) + + +@pytest.mark.parametrize( + "func", [spgraph.breadth_first_tree, spgraph.depth_first_tree] +) +def test_tree_search(graphs, func): + A_dense, A_sparse = graphs + sparse_cls = type(A_sparse) + + actual = func(A_sparse, 0) + desired = func(sp.csc_matrix(A_dense), 0) + + assert isinstance(actual, sparse_cls) + + assert_equal(actual.todense(), desired.todense()) + + +def test_minimum_spanning_tree(graphs): + A_dense, A_sparse = graphs + sparse_cls = type(A_sparse) + func = spgraph.minimum_spanning_tree + + actual = func(A_sparse) + desired = func(sp.csc_matrix(A_dense)) + + assert isinstance(actual, sparse_cls) + + assert_equal(actual.todense(), desired.todense()) + + +def test_maximum_flow(graphs): + A_dense, A_sparse = graphs + sparse_cls = type(A_sparse) + func = spgraph.maximum_flow + + actual = func(A_sparse, 0, 2) + desired = func(sp.csr_matrix(A_dense), 0, 2) + + assert actual.flow_value == desired.flow_value + assert isinstance(actual.flow, sparse_cls) + + assert_equal(actual.flow.todense(), desired.flow.todense()) + + +def test_min_weight_full_bipartite_matching(graphs): + A_dense, A_sparse = graphs + func = spgraph.min_weight_full_bipartite_matching + + actual = func(A_sparse[0:2, 1:3]) + desired = func(sp.csc_matrix(A_dense)[0:2, 1:3]) + + assert_equal(actual, desired) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_reordering.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_reordering.py new file mode 100644 index 0000000000000000000000000000000000000000..cb4c002fa303e7196278367afd316d47b3473cbb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_reordering.py @@ -0,0 +1,70 @@ +import numpy as np +from numpy.testing import assert_equal +from scipy.sparse.csgraph import reverse_cuthill_mckee, structural_rank +from scipy.sparse import csc_matrix, csr_matrix, coo_matrix + + +def test_graph_reverse_cuthill_mckee(): + A = np.array([[1, 0, 0, 0, 1, 0, 0, 0], + [0, 1, 1, 0, 0, 1, 0, 1], + [0, 1, 1, 0, 1, 0, 0, 0], + [0, 0, 0, 1, 0, 0, 1, 0], + [1, 0, 1, 0, 1, 0, 0, 0], + [0, 1, 0, 0, 0, 1, 0, 1], + [0, 0, 0, 1, 0, 0, 1, 0], + [0, 1, 0, 0, 0, 1, 0, 1]], dtype=int) + + graph = csr_matrix(A) + perm = reverse_cuthill_mckee(graph) + correct_perm = np.array([6, 3, 7, 5, 1, 2, 4, 0]) + assert_equal(perm, correct_perm) + + # Test int64 indices input + graph.indices = graph.indices.astype('int64') + graph.indptr = graph.indptr.astype('int64') + perm = reverse_cuthill_mckee(graph, True) + assert_equal(perm, correct_perm) + + +def test_graph_reverse_cuthill_mckee_ordering(): + data = np.ones(63,dtype=int) + rows = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, + 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, + 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, + 9, 10, 10, 10, 10, 10, 11, 11, 11, 11, + 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, + 14, 15, 15, 15, 15, 15]) + cols = np.array([0, 2, 5, 8, 10, 1, 3, 9, 11, 0, 2, + 7, 10, 1, 3, 11, 4, 6, 12, 14, 0, 7, 13, + 15, 4, 6, 14, 2, 5, 7, 15, 0, 8, 10, 13, + 1, 9, 11, 0, 2, 8, 10, 15, 1, 3, 9, 11, + 4, 12, 14, 5, 8, 13, 15, 4, 6, 12, 14, + 5, 7, 10, 13, 15]) + graph = coo_matrix((data, (rows,cols))).tocsr() + perm = reverse_cuthill_mckee(graph) + correct_perm = np.array([12, 14, 4, 6, 10, 8, 2, 15, + 0, 13, 7, 5, 9, 11, 1, 3]) + assert_equal(perm, correct_perm) + + +def test_graph_structural_rank(): + # Test square matrix #1 + A = csc_matrix([[1, 1, 0], + [1, 0, 1], + [0, 1, 0]]) + assert_equal(structural_rank(A), 3) + + # Test square matrix #2 + rows = np.array([0,0,0,0,0,1,1,2,2,3,3,3,3,3,3,4,4,5,5,6,6,7,7]) + cols = np.array([0,1,2,3,4,2,5,2,6,0,1,3,5,6,7,4,5,5,6,2,6,2,4]) + data = np.ones_like(rows) + B = coo_matrix((data,(rows,cols)), shape=(8,8)) + assert_equal(structural_rank(B), 6) + + #Test non-square matrix + C = csc_matrix([[1, 0, 2, 0], + [2, 0, 4, 0]]) + assert_equal(structural_rank(C), 2) + + #Test tall matrix + assert_equal(structural_rank(C.T), 2) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_shortest_path.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_shortest_path.py new file mode 100644 index 0000000000000000000000000000000000000000..f745e0fbba31f12e7ae17d5de05942578762d0af --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_shortest_path.py @@ -0,0 +1,395 @@ +from io import StringIO +import warnings +import numpy as np +from numpy.testing import assert_array_almost_equal, assert_array_equal, assert_allclose +from pytest import raises as assert_raises +from scipy.sparse.csgraph import (shortest_path, dijkstra, johnson, + bellman_ford, construct_dist_matrix, + NegativeCycleError) +import scipy.sparse +from scipy.io import mmread +import pytest + +directed_G = np.array([[0, 3, 3, 0, 0], + [0, 0, 0, 2, 4], + [0, 0, 0, 0, 0], + [1, 0, 0, 0, 0], + [2, 0, 0, 2, 0]], dtype=float) + +undirected_G = np.array([[0, 3, 3, 1, 2], + [3, 0, 0, 2, 4], + [3, 0, 0, 0, 0], + [1, 2, 0, 0, 2], + [2, 4, 0, 2, 0]], dtype=float) + +unweighted_G = (directed_G > 0).astype(float) + +directed_SP = [[0, 3, 3, 5, 7], + [3, 0, 6, 2, 4], + [np.inf, np.inf, 0, np.inf, np.inf], + [1, 4, 4, 0, 8], + [2, 5, 5, 2, 0]] + +directed_sparse_zero_G = scipy.sparse.csr_matrix(([0, 1, 2, 3, 1], + ([0, 1, 2, 3, 4], + [1, 2, 0, 4, 3])), + shape = (5, 5)) + +directed_sparse_zero_SP = [[0, 0, 1, np.inf, np.inf], + [3, 0, 1, np.inf, np.inf], + [2, 2, 0, np.inf, np.inf], + [np.inf, np.inf, np.inf, 0, 3], + [np.inf, np.inf, np.inf, 1, 0]] + +undirected_sparse_zero_G = scipy.sparse.csr_matrix(([0, 0, 1, 1, 2, 2, 1, 1], + ([0, 1, 1, 2, 2, 0, 3, 4], + [1, 0, 2, 1, 0, 2, 4, 3])), + shape = (5, 5)) + +undirected_sparse_zero_SP = [[0, 0, 1, np.inf, np.inf], + [0, 0, 1, np.inf, np.inf], + [1, 1, 0, np.inf, np.inf], + [np.inf, np.inf, np.inf, 0, 1], + [np.inf, np.inf, np.inf, 1, 0]] + +directed_pred = np.array([[-9999, 0, 0, 1, 1], + [3, -9999, 0, 1, 1], + [-9999, -9999, -9999, -9999, -9999], + [3, 0, 0, -9999, 1], + [4, 0, 0, 4, -9999]], dtype=float) + +undirected_SP = np.array([[0, 3, 3, 1, 2], + [3, 0, 6, 2, 4], + [3, 6, 0, 4, 5], + [1, 2, 4, 0, 2], + [2, 4, 5, 2, 0]], dtype=float) + +undirected_SP_limit_2 = np.array([[0, np.inf, np.inf, 1, 2], + [np.inf, 0, np.inf, 2, np.inf], + [np.inf, np.inf, 0, np.inf, np.inf], + [1, 2, np.inf, 0, 2], + [2, np.inf, np.inf, 2, 0]], dtype=float) + +undirected_SP_limit_0 = np.ones((5, 5), dtype=float) - np.eye(5) +undirected_SP_limit_0[undirected_SP_limit_0 > 0] = np.inf + +undirected_pred = np.array([[-9999, 0, 0, 0, 0], + [1, -9999, 0, 1, 1], + [2, 0, -9999, 0, 0], + [3, 3, 0, -9999, 3], + [4, 4, 0, 4, -9999]], dtype=float) + +directed_negative_weighted_G = np.array([[0, 0, 0], + [-1, 0, 0], + [0, -1, 0]], dtype=float) + +directed_negative_weighted_SP = np.array([[0, np.inf, np.inf], + [-1, 0, np.inf], + [-2, -1, 0]], dtype=float) + +methods = ['auto', 'FW', 'D', 'BF', 'J'] + + +def test_dijkstra_limit(): + limits = [0, 2, np.inf] + results = [undirected_SP_limit_0, + undirected_SP_limit_2, + undirected_SP] + + def check(limit, result): + SP = dijkstra(undirected_G, directed=False, limit=limit) + assert_array_almost_equal(SP, result) + + for limit, result in zip(limits, results): + check(limit, result) + + +def test_directed(): + def check(method): + SP = shortest_path(directed_G, method=method, directed=True, + overwrite=False) + assert_array_almost_equal(SP, directed_SP) + + for method in methods: + check(method) + + +def test_undirected(): + def check(method, directed_in): + if directed_in: + SP1 = shortest_path(directed_G, method=method, directed=False, + overwrite=False) + assert_array_almost_equal(SP1, undirected_SP) + else: + SP2 = shortest_path(undirected_G, method=method, directed=True, + overwrite=False) + assert_array_almost_equal(SP2, undirected_SP) + + for method in methods: + for directed_in in (True, False): + check(method, directed_in) + + +def test_directed_sparse_zero(): + # test directed sparse graph with zero-weight edge and two connected components + def check(method): + SP = shortest_path(directed_sparse_zero_G, method=method, directed=True, + overwrite=False) + assert_array_almost_equal(SP, directed_sparse_zero_SP) + + for method in methods: + check(method) + + +def test_undirected_sparse_zero(): + def check(method, directed_in): + if directed_in: + SP1 = shortest_path(directed_sparse_zero_G, method=method, directed=False, + overwrite=False) + assert_array_almost_equal(SP1, undirected_sparse_zero_SP) + else: + SP2 = shortest_path(undirected_sparse_zero_G, method=method, directed=True, + overwrite=False) + assert_array_almost_equal(SP2, undirected_sparse_zero_SP) + + for method in methods: + for directed_in in (True, False): + check(method, directed_in) + + +@pytest.mark.parametrize('directed, SP_ans', + ((True, directed_SP), + (False, undirected_SP))) +@pytest.mark.parametrize('indices', ([0, 2, 4], [0, 4], [3, 4], [0, 0])) +def test_dijkstra_indices_min_only(directed, SP_ans, indices): + SP_ans = np.array(SP_ans) + indices = np.array(indices, dtype=np.int64) + min_ind_ans = indices[np.argmin(SP_ans[indices, :], axis=0)] + min_d_ans = np.zeros(SP_ans.shape[0], SP_ans.dtype) + for k in range(SP_ans.shape[0]): + min_d_ans[k] = SP_ans[min_ind_ans[k], k] + min_ind_ans[np.isinf(min_d_ans)] = -9999 + + SP, pred, sources = dijkstra(directed_G, + directed=directed, + indices=indices, + min_only=True, + return_predecessors=True) + assert_array_almost_equal(SP, min_d_ans) + assert_array_equal(min_ind_ans, sources) + SP = dijkstra(directed_G, + directed=directed, + indices=indices, + min_only=True, + return_predecessors=False) + assert_array_almost_equal(SP, min_d_ans) + + +@pytest.mark.parametrize('n', (10, 100, 1000)) +def test_dijkstra_min_only_random(n): + np.random.seed(1234) + data = scipy.sparse.rand(n, n, density=0.5, format='lil', + random_state=42, dtype=np.float64) + data.setdiag(np.zeros(n, dtype=np.bool_)) + # choose some random vertices + v = np.arange(n) + np.random.shuffle(v) + indices = v[:int(n*.1)] + ds, pred, sources = dijkstra(data, + directed=True, + indices=indices, + min_only=True, + return_predecessors=True) + for k in range(n): + p = pred[k] + s = sources[k] + while p != -9999: + assert sources[p] == s + p = pred[p] + + +def test_dijkstra_random(): + # reproduces the hang observed in gh-17782 + n = 10 + indices = [0, 4, 4, 5, 7, 9, 0, 6, 2, 3, 7, 9, 1, 2, 9, 2, 5, 6] + indptr = [0, 0, 2, 5, 6, 7, 8, 12, 15, 18, 18] + data = [0.33629, 0.40458, 0.47493, 0.42757, 0.11497, 0.91653, 0.69084, + 0.64979, 0.62555, 0.743, 0.01724, 0.99945, 0.31095, 0.15557, + 0.02439, 0.65814, 0.23478, 0.24072] + graph = scipy.sparse.csr_matrix((data, indices, indptr), shape=(n, n)) + dijkstra(graph, directed=True, return_predecessors=True) + + +def test_gh_17782_segfault(): + text = """%%MatrixMarket matrix coordinate real general + 84 84 22 + 2 1 4.699999809265137e+00 + 6 14 1.199999973177910e-01 + 9 6 1.199999973177910e-01 + 10 16 2.012000083923340e+01 + 11 10 1.422000026702881e+01 + 12 1 9.645999908447266e+01 + 13 18 2.012000083923340e+01 + 14 13 4.679999828338623e+00 + 15 11 1.199999973177910e-01 + 16 12 1.199999973177910e-01 + 18 15 1.199999973177910e-01 + 32 2 2.299999952316284e+00 + 33 20 6.000000000000000e+00 + 33 32 5.000000000000000e+00 + 36 9 3.720000028610229e+00 + 36 37 3.720000028610229e+00 + 36 38 3.720000028610229e+00 + 37 44 8.159999847412109e+00 + 38 32 7.903999328613281e+01 + 43 20 2.400000000000000e+01 + 43 33 4.000000000000000e+00 + 44 43 6.028000259399414e+01 + """ + data = mmread(StringIO(text)) + dijkstra(data, directed=True, return_predecessors=True) + + +def test_shortest_path_indices(): + indices = np.arange(4) + + def check(func, indshape): + outshape = indshape + (5,) + SP = func(directed_G, directed=False, + indices=indices.reshape(indshape)) + assert_array_almost_equal(SP, undirected_SP[indices].reshape(outshape)) + + for indshape in [(4,), (4, 1), (2, 2)]: + for func in (dijkstra, bellman_ford, johnson, shortest_path): + check(func, indshape) + + assert_raises(ValueError, shortest_path, directed_G, method='FW', + indices=indices) + + +def test_predecessors(): + SP_res = {True: directed_SP, + False: undirected_SP} + pred_res = {True: directed_pred, + False: undirected_pred} + + def check(method, directed): + SP, pred = shortest_path(directed_G, method, directed=directed, + overwrite=False, + return_predecessors=True) + assert_array_almost_equal(SP, SP_res[directed]) + assert_array_almost_equal(pred, pred_res[directed]) + + for method in methods: + for directed in (True, False): + check(method, directed) + + +def test_construct_shortest_path(): + def check(method, directed): + SP1, pred = shortest_path(directed_G, + directed=directed, + overwrite=False, + return_predecessors=True) + SP2 = construct_dist_matrix(directed_G, pred, directed=directed) + assert_array_almost_equal(SP1, SP2) + + for method in methods: + for directed in (True, False): + check(method, directed) + + +def test_unweighted_path(): + def check(method, directed): + SP1 = shortest_path(directed_G, + directed=directed, + overwrite=False, + unweighted=True) + SP2 = shortest_path(unweighted_G, + directed=directed, + overwrite=False, + unweighted=False) + assert_array_almost_equal(SP1, SP2) + + for method in methods: + for directed in (True, False): + check(method, directed) + + +def test_negative_cycles(): + # create a small graph with a negative cycle + graph = np.ones([5, 5]) + graph.flat[::6] = 0 + graph[1, 2] = -2 + + def check(method, directed): + assert_raises(NegativeCycleError, shortest_path, graph, method, + directed) + + for method in ['FW', 'J', 'BF']: + for directed in (True, False): + check(method, directed) + + +@pytest.mark.parametrize("method", ['FW', 'J', 'BF']) +def test_negative_weights(method): + SP = shortest_path(directed_negative_weighted_G, method, directed=True) + assert_allclose(SP, directed_negative_weighted_SP, atol=1e-10) + + +def test_masked_input(): + np.ma.masked_equal(directed_G, 0) + + def check(method): + SP = shortest_path(directed_G, method=method, directed=True, + overwrite=False) + assert_array_almost_equal(SP, directed_SP) + + for method in methods: + check(method) + + +def test_overwrite(): + G = np.array([[0, 3, 3, 1, 2], + [3, 0, 0, 2, 4], + [3, 0, 0, 0, 0], + [1, 2, 0, 0, 2], + [2, 4, 0, 2, 0]], dtype=float) + foo = G.copy() + shortest_path(foo, overwrite=False) + assert_array_equal(foo, G) + + +@pytest.mark.parametrize('method', methods) +def test_buffer(method): + # Smoke test that sparse matrices with read-only buffers (e.g., those from + # joblib workers) do not cause:: + # + # ValueError: buffer source array is read-only + # + G = scipy.sparse.csr_matrix([[1.]]) + G.data.flags['WRITEABLE'] = False + shortest_path(G, method=method) + + +def test_NaN_warnings(): + with warnings.catch_warnings(record=True) as record: + shortest_path(np.array([[0, 1], [np.nan, 0]])) + for r in record: + assert r.category is not RuntimeWarning + + +def test_sparse_matrices(): + # Test that using lil,csr and csc sparse matrix do not cause error + G_dense = np.array([[0, 3, 0, 0, 0], + [0, 0, -1, 0, 0], + [0, 0, 0, 2, 0], + [0, 0, 0, 0, 4], + [0, 0, 0, 0, 0]], dtype=float) + SP = shortest_path(G_dense) + G_csr = scipy.sparse.csr_matrix(G_dense) + G_csc = scipy.sparse.csc_matrix(G_dense) + G_lil = scipy.sparse.lil_matrix(G_dense) + assert_array_almost_equal(SP, shortest_path(G_csr)) + assert_array_almost_equal(SP, shortest_path(G_csc)) + assert_array_almost_equal(SP, shortest_path(G_lil)) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_spanning_tree.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_spanning_tree.py new file mode 100644 index 0000000000000000000000000000000000000000..90ef6d1b1ba86170b0264f76e6a63e21749acdc8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_spanning_tree.py @@ -0,0 +1,66 @@ +"""Test the minimum spanning tree function""" +import numpy as np +from numpy.testing import assert_ +import numpy.testing as npt +from scipy.sparse import csr_matrix +from scipy.sparse.csgraph import minimum_spanning_tree + + +def test_minimum_spanning_tree(): + + # Create a graph with two connected components. + graph = [[0,1,0,0,0], + [1,0,0,0,0], + [0,0,0,8,5], + [0,0,8,0,1], + [0,0,5,1,0]] + graph = np.asarray(graph) + + # Create the expected spanning tree. + expected = [[0,1,0,0,0], + [0,0,0,0,0], + [0,0,0,0,5], + [0,0,0,0,1], + [0,0,0,0,0]] + expected = np.asarray(expected) + + # Ensure minimum spanning tree code gives this expected output. + csgraph = csr_matrix(graph) + mintree = minimum_spanning_tree(csgraph) + mintree_array = mintree.toarray() + npt.assert_array_equal(mintree_array, expected, + 'Incorrect spanning tree found.') + + # Ensure that the original graph was not modified. + npt.assert_array_equal(csgraph.toarray(), graph, + 'Original graph was modified.') + + # Now let the algorithm modify the csgraph in place. + mintree = minimum_spanning_tree(csgraph, overwrite=True) + npt.assert_array_equal(mintree.toarray(), expected, + 'Graph was not properly modified to contain MST.') + + np.random.seed(1234) + for N in (5, 10, 15, 20): + + # Create a random graph. + graph = 3 + np.random.random((N, N)) + csgraph = csr_matrix(graph) + + # The spanning tree has at most N - 1 edges. + mintree = minimum_spanning_tree(csgraph) + assert_(mintree.nnz < N) + + # Set the sub diagonal to 1 to create a known spanning tree. + idx = np.arange(N-1) + graph[idx,idx+1] = 1 + csgraph = csr_matrix(graph) + mintree = minimum_spanning_tree(csgraph) + + # We expect to see this pattern in the spanning tree and otherwise + # have this zero. + expected = np.zeros((N, N)) + expected[idx, idx+1] = 1 + + npt.assert_array_equal(mintree.toarray(), expected, + 'Incorrect spanning tree found.') diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_traversal.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_traversal.py new file mode 100644 index 0000000000000000000000000000000000000000..414e2d14864da8613eaf85f41a0b391ce1ae916d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/test_traversal.py @@ -0,0 +1,81 @@ +import numpy as np +import pytest +from numpy.testing import assert_array_almost_equal +from scipy.sparse import csr_array +from scipy.sparse.csgraph import (breadth_first_tree, depth_first_tree, + csgraph_to_dense, csgraph_from_dense) + + +def test_graph_breadth_first(): + csgraph = np.array([[0, 1, 2, 0, 0], + [1, 0, 0, 0, 3], + [2, 0, 0, 7, 0], + [0, 0, 7, 0, 1], + [0, 3, 0, 1, 0]]) + csgraph = csgraph_from_dense(csgraph, null_value=0) + + bfirst = np.array([[0, 1, 2, 0, 0], + [0, 0, 0, 0, 3], + [0, 0, 0, 7, 0], + [0, 0, 0, 0, 0], + [0, 0, 0, 0, 0]]) + + for directed in [True, False]: + bfirst_test = breadth_first_tree(csgraph, 0, directed) + assert_array_almost_equal(csgraph_to_dense(bfirst_test), + bfirst) + + +def test_graph_depth_first(): + csgraph = np.array([[0, 1, 2, 0, 0], + [1, 0, 0, 0, 3], + [2, 0, 0, 7, 0], + [0, 0, 7, 0, 1], + [0, 3, 0, 1, 0]]) + csgraph = csgraph_from_dense(csgraph, null_value=0) + + dfirst = np.array([[0, 1, 0, 0, 0], + [0, 0, 0, 0, 3], + [0, 0, 0, 0, 0], + [0, 0, 7, 0, 0], + [0, 0, 0, 1, 0]]) + + for directed in [True, False]: + dfirst_test = depth_first_tree(csgraph, 0, directed) + assert_array_almost_equal(csgraph_to_dense(dfirst_test), + dfirst) + + +def test_graph_breadth_first_trivial_graph(): + csgraph = np.array([[0]]) + csgraph = csgraph_from_dense(csgraph, null_value=0) + + bfirst = np.array([[0]]) + + for directed in [True, False]: + bfirst_test = breadth_first_tree(csgraph, 0, directed) + assert_array_almost_equal(csgraph_to_dense(bfirst_test), + bfirst) + + +def test_graph_depth_first_trivial_graph(): + csgraph = np.array([[0]]) + csgraph = csgraph_from_dense(csgraph, null_value=0) + + bfirst = np.array([[0]]) + + for directed in [True, False]: + bfirst_test = depth_first_tree(csgraph, 0, directed) + assert_array_almost_equal(csgraph_to_dense(bfirst_test), + bfirst) + + +@pytest.mark.parametrize('directed', [True, False]) +@pytest.mark.parametrize('tree_func', [breadth_first_tree, depth_first_tree]) +def test_int64_indices(tree_func, directed): + # See https://github.com/scipy/scipy/issues/18716 + g = csr_array(([1], np.array([[0], [1]], dtype=np.int64)), shape=(2, 2)) + assert g.indices.dtype == np.int64 + tree = tree_func(g, 0, directed=directed) + assert_array_almost_equal(csgraph_to_dense(tree), [[0, 1], [0, 0]]) + diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csr.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csr.py new file mode 100644 index 0000000000000000000000000000000000000000..86bb1e072ebe4480e9dcb01f2d36f7387872b898 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/csr.py @@ -0,0 +1,27 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.sparse` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'csr_count_blocks', + 'csr_matrix', + 'csr_tobsr', + 'csr_tocsc', + 'get_csr_submatrix', + 'isspmatrix_csr', + 'spmatrix', + 'upcast', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="sparse", module="csr", + private_modules=["_csr"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/data.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/data.py new file mode 100644 index 0000000000000000000000000000000000000000..a9958bcda6dd35ac0779514d79b7f1c494c1b01a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/data.py @@ -0,0 +1,23 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.sparse` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'isscalarlike', + 'name', + 'npfunc', + 'validateaxis', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="sparse", module="data", + private_modules=["_data"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/dia.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/dia.py new file mode 100644 index 0000000000000000000000000000000000000000..f79abd39f114b23df8ceb6eafb7fcc1c07218dcb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/dia.py @@ -0,0 +1,29 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.sparse` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'check_shape', + 'dia_matrix', + 'dia_matvec', + 'get_sum_dtype', + 'getdtype', + 'isshape', + 'isspmatrix_dia', + 'spmatrix', + 'upcast_char', + 'validateaxis', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="sparse", module="dia", + private_modules=["_dia"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/dok.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/dok.py new file mode 100644 index 0000000000000000000000000000000000000000..847824456eaa3145d5ecb078e30251875168775b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/dok.py @@ -0,0 +1,32 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.sparse` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'IndexMixin', + 'check_shape', + 'dok_matrix', + 'getdtype', + 'isdense', + 'isintlike', + 'isscalarlike', + 'isshape', + 'isspmatrix_dok', + 'itertools', + 'spmatrix', + 'upcast', + 'upcast_scalar', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="sparse", module="dok", + private_modules=["_dok"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/extract.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/extract.py new file mode 100644 index 0000000000000000000000000000000000000000..be5e161b6f99e57e2b2a6b3d4f1ef6427c07658d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/extract.py @@ -0,0 +1,23 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.sparse` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'coo_matrix', + 'find', + 'tril', + 'triu', +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="sparse", module="extract", + private_modules=["_extract"], all=__all__, + attribute=name) diff --git a/env-llmeval/lib/python3.10/site-packages/scipy/sparse/spfuncs.py b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/spfuncs.py new file mode 100644 index 0000000000000000000000000000000000000000..b005a9b7c56b82f0c902c26664607b237d808f68 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/scipy/sparse/spfuncs.py @@ -0,0 +1,22 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.sparse` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'csr_count_blocks', + 'estimate_blocksize', + 'count_blocks' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="sparse", module="spfuncs", + private_modules=["_spfuncs"], all=__all__, + attribute=name)