applied-ai-018 commited on
Commit
10f0083
·
verified ·
1 Parent(s): fe9f72d

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/__init__.cpython-310.pyc +0 -0
  2. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_measurements.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_morphology.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_ni_docstrings.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_ni_support.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/filters.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/fourier.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/interpolation.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/measurements.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/morphology.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/scipy/sparse/__init__.py +324 -0
  12. llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/__init__.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_base.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_bsr.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_compressed.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_construct.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_coo.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_csc.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_csr.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_data.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_dia.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_dok.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_extract.cpython-310.pyc +0 -0
  24. llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_index.cpython-310.pyc +0 -0
  25. llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_lil.cpython-310.pyc +0 -0
  26. llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_matrix.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_matrix_io.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_spfuncs.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_sputils.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/base.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/bsr.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/compressed.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/construct.cpython-310.pyc +0 -0
  34. llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/coo.cpython-310.pyc +0 -0
  35. llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/csc.cpython-310.pyc +0 -0
  36. llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/csr.cpython-310.pyc +0 -0
  37. llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/data.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/dia.cpython-310.pyc +0 -0
  39. llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/dok.cpython-310.pyc +0 -0
  40. llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/extract.cpython-310.pyc +0 -0
  41. llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/lil.cpython-310.pyc +0 -0
  42. llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/sparsetools.cpython-310.pyc +0 -0
  43. llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/spfuncs.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/sputils.cpython-310.pyc +0 -0
  45. llmeval-env/lib/python3.10/site-packages/scipy/sparse/_base.py +1568 -0
  46. llmeval-env/lib/python3.10/site-packages/scipy/sparse/_bsr.py +855 -0
  47. llmeval-env/lib/python3.10/site-packages/scipy/sparse/_compressed.py +1367 -0
  48. llmeval-env/lib/python3.10/site-packages/scipy/sparse/_construct.py +1401 -0
  49. llmeval-env/lib/python3.10/site-packages/scipy/sparse/_coo.py +858 -0
  50. llmeval-env/lib/python3.10/site-packages/scipy/sparse/_csparsetools.cpython-310-x86_64-linux-gnu.so +0 -0
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.83 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_measurements.cpython-310.pyc ADDED
Binary file (47.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_morphology.cpython-310.pyc ADDED
Binary file (83.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_ni_docstrings.cpython-310.pyc ADDED
Binary file (8.33 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/_ni_support.cpython-310.pyc ADDED
Binary file (2.85 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/filters.cpython-310.pyc ADDED
Binary file (984 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/fourier.cpython-310.pyc ADDED
Binary file (671 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/interpolation.cpython-310.pyc ADDED
Binary file (740 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/measurements.cpython-310.pyc ADDED
Binary file (827 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/ndimage/__pycache__/morphology.cpython-310.pyc ADDED
Binary file (984 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/__init__.py ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ =====================================
3
+ Sparse matrices (:mod:`scipy.sparse`)
4
+ =====================================
5
+
6
+ .. currentmodule:: scipy.sparse
7
+
8
+ .. toctree::
9
+ :hidden:
10
+
11
+ sparse.csgraph
12
+ sparse.linalg
13
+
14
+ SciPy 2-D sparse array package for numeric data.
15
+
16
+ .. note::
17
+
18
+ This package is switching to an array interface, compatible with
19
+ NumPy arrays, from the older matrix interface. We recommend that
20
+ you use the array objects (`bsr_array`, `coo_array`, etc.) for
21
+ all new work.
22
+
23
+ When using the array interface, please note that:
24
+
25
+ - ``x * y`` no longer performs matrix multiplication, but
26
+ element-wise multiplication (just like with NumPy arrays). To
27
+ make code work with both arrays and matrices, use ``x @ y`` for
28
+ matrix multiplication.
29
+ - Operations such as `sum`, that used to produce dense matrices, now
30
+ produce arrays, whose multiplication behavior differs similarly.
31
+ - Sparse arrays currently must be two-dimensional. This also means
32
+ that all *slicing* operations on these objects must produce
33
+ two-dimensional results, or they will result in an error. This
34
+ will be addressed in a future version.
35
+
36
+ The construction utilities (`eye`, `kron`, `random`, `diags`, etc.)
37
+ have not yet been ported, but their results can be wrapped into arrays::
38
+
39
+ A = csr_array(eye(3))
40
+
41
+ Contents
42
+ ========
43
+
44
+ Sparse array classes
45
+ --------------------
46
+
47
+ .. autosummary::
48
+ :toctree: generated/
49
+
50
+ bsr_array - Block Sparse Row array
51
+ coo_array - A sparse array in COOrdinate format
52
+ csc_array - Compressed Sparse Column array
53
+ csr_array - Compressed Sparse Row array
54
+ dia_array - Sparse array with DIAgonal storage
55
+ dok_array - Dictionary Of Keys based sparse array
56
+ lil_array - Row-based list of lists sparse array
57
+ sparray - Sparse array base class
58
+
59
+ Sparse matrix classes
60
+ ---------------------
61
+
62
+ .. autosummary::
63
+ :toctree: generated/
64
+
65
+ bsr_matrix - Block Sparse Row matrix
66
+ coo_matrix - A sparse matrix in COOrdinate format
67
+ csc_matrix - Compressed Sparse Column matrix
68
+ csr_matrix - Compressed Sparse Row matrix
69
+ dia_matrix - Sparse matrix with DIAgonal storage
70
+ dok_matrix - Dictionary Of Keys based sparse matrix
71
+ lil_matrix - Row-based list of lists sparse matrix
72
+ spmatrix - Sparse matrix base class
73
+
74
+ Functions
75
+ ---------
76
+
77
+ Building sparse arrays:
78
+
79
+ .. autosummary::
80
+ :toctree: generated/
81
+
82
+ diags_array - Return a sparse array from diagonals
83
+ eye_array - Sparse MxN array whose k-th diagonal is all ones
84
+ random_array - Random values in a given shape array
85
+ block_array - Build a sparse array from sub-blocks
86
+
87
+ Building sparse matrices:
88
+
89
+ .. autosummary::
90
+ :toctree: generated/
91
+
92
+ eye - Sparse MxN matrix whose k-th diagonal is all ones
93
+ identity - Identity matrix in sparse matrix format
94
+ diags - Return a sparse matrix from diagonals
95
+ spdiags - Return a sparse matrix from diagonals
96
+ bmat - Build a sparse matrix from sparse sub-blocks
97
+ random - Random values in a given shape matrix
98
+ rand - Random values in a given shape matrix (old interface)
99
+
100
+ Building larger structures from smaller (array or matrix)
101
+
102
+ .. autosummary::
103
+ :toctree: generated/
104
+
105
+ kron - kronecker product of two sparse matrices
106
+ kronsum - kronecker sum of sparse matrices
107
+ block_diag - Build a block diagonal sparse matrix
108
+ tril - Lower triangular portion of a matrix in sparse format
109
+ triu - Upper triangular portion of a matrix in sparse format
110
+ hstack - Stack sparse matrices horizontally (column wise)
111
+ vstack - Stack sparse matrices vertically (row wise)
112
+
113
+ Save and load sparse matrices:
114
+
115
+ .. autosummary::
116
+ :toctree: generated/
117
+
118
+ save_npz - Save a sparse matrix/array to a file using ``.npz`` format.
119
+ load_npz - Load a sparse matrix/array from a file using ``.npz`` format.
120
+
121
+ Sparse tools:
122
+
123
+ .. autosummary::
124
+ :toctree: generated/
125
+
126
+ find
127
+
128
+ Identifying sparse arrays:
129
+
130
+ - use `isinstance(A, sp.sparse.sparray)` to check whether an array or matrix.
131
+ - use `A.format == 'csr'` to check the sparse format
132
+
133
+ Identifying sparse matrices:
134
+
135
+ .. autosummary::
136
+ :toctree: generated/
137
+
138
+ issparse
139
+ isspmatrix
140
+ isspmatrix_csc
141
+ isspmatrix_csr
142
+ isspmatrix_bsr
143
+ isspmatrix_lil
144
+ isspmatrix_dok
145
+ isspmatrix_coo
146
+ isspmatrix_dia
147
+
148
+ Submodules
149
+ ----------
150
+
151
+ .. autosummary::
152
+
153
+ csgraph - Compressed sparse graph routines
154
+ linalg - sparse linear algebra routines
155
+
156
+ Exceptions
157
+ ----------
158
+
159
+ .. autosummary::
160
+ :toctree: generated/
161
+
162
+ SparseEfficiencyWarning
163
+ SparseWarning
164
+
165
+
166
+ Usage information
167
+ =================
168
+
169
+ There are seven available sparse array types:
170
+
171
+ 1. `csc_array`: Compressed Sparse Column format
172
+ 2. `csr_array`: Compressed Sparse Row format
173
+ 3. `bsr_array`: Block Sparse Row format
174
+ 4. `lil_array`: List of Lists format
175
+ 5. `dok_array`: Dictionary of Keys format
176
+ 6. `coo_array`: COOrdinate format (aka IJV, triplet format)
177
+ 7. `dia_array`: DIAgonal format
178
+
179
+ To construct an array efficiently, use either `dok_array` or `lil_array`.
180
+ The `lil_array` class supports basic slicing and fancy indexing with a
181
+ similar syntax to NumPy arrays. As illustrated below, the COO format
182
+ may also be used to efficiently construct arrays. Despite their
183
+ similarity to NumPy arrays, it is **strongly discouraged** to use NumPy
184
+ functions directly on these arrays because NumPy may not properly convert
185
+ them for computations, leading to unexpected (and incorrect) results. If you
186
+ do want to apply a NumPy function to these arrays, first check if SciPy has
187
+ its own implementation for the given sparse array class, or **convert the
188
+ sparse array to a NumPy array** (e.g., using the ``toarray`` method of the
189
+ class) first before applying the method.
190
+
191
+ To perform manipulations such as multiplication or inversion, first
192
+ convert the array to either CSC or CSR format. The `lil_array` format is
193
+ row-based, so conversion to CSR is efficient, whereas conversion to CSC
194
+ is less so.
195
+
196
+ All conversions among the CSR, CSC, and COO formats are efficient,
197
+ linear-time operations.
198
+
199
+ Matrix vector product
200
+ ---------------------
201
+ To do a vector product between a sparse array and a vector simply use
202
+ the array ``dot`` method, as described in its docstring:
203
+
204
+ >>> import numpy as np
205
+ >>> from scipy.sparse import csr_array
206
+ >>> A = csr_array([[1, 2, 0], [0, 0, 3], [4, 0, 5]])
207
+ >>> v = np.array([1, 0, -1])
208
+ >>> A.dot(v)
209
+ array([ 1, -3, -1], dtype=int64)
210
+
211
+ .. warning:: As of NumPy 1.7, ``np.dot`` is not aware of sparse arrays,
212
+ therefore using it will result on unexpected results or errors.
213
+ The corresponding dense array should be obtained first instead:
214
+
215
+ >>> np.dot(A.toarray(), v)
216
+ array([ 1, -3, -1], dtype=int64)
217
+
218
+ but then all the performance advantages would be lost.
219
+
220
+ The CSR format is especially suitable for fast matrix vector products.
221
+
222
+ Example 1
223
+ ---------
224
+ Construct a 1000x1000 `lil_array` and add some values to it:
225
+
226
+ >>> from scipy.sparse import lil_array
227
+ >>> from scipy.sparse.linalg import spsolve
228
+ >>> from numpy.linalg import solve, norm
229
+ >>> from numpy.random import rand
230
+
231
+ >>> A = lil_array((1000, 1000))
232
+ >>> A[0, :100] = rand(100)
233
+ >>> A[1, 100:200] = A[0, :100]
234
+ >>> A.setdiag(rand(1000))
235
+
236
+ Now convert it to CSR format and solve A x = b for x:
237
+
238
+ >>> A = A.tocsr()
239
+ >>> b = rand(1000)
240
+ >>> x = spsolve(A, b)
241
+
242
+ Convert it to a dense array and solve, and check that the result
243
+ is the same:
244
+
245
+ >>> x_ = solve(A.toarray(), b)
246
+
247
+ Now we can compute norm of the error with:
248
+
249
+ >>> err = norm(x-x_)
250
+ >>> err < 1e-10
251
+ True
252
+
253
+ It should be small :)
254
+
255
+
256
+ Example 2
257
+ ---------
258
+
259
+ Construct an array in COO format:
260
+
261
+ >>> from scipy import sparse
262
+ >>> from numpy import array
263
+ >>> I = array([0,3,1,0])
264
+ >>> J = array([0,3,1,2])
265
+ >>> V = array([4,5,7,9])
266
+ >>> A = sparse.coo_array((V,(I,J)),shape=(4,4))
267
+
268
+ Notice that the indices do not need to be sorted.
269
+
270
+ Duplicate (i,j) entries are summed when converting to CSR or CSC.
271
+
272
+ >>> I = array([0,0,1,3,1,0,0])
273
+ >>> J = array([0,2,1,3,1,0,0])
274
+ >>> V = array([1,1,1,1,1,1,1])
275
+ >>> B = sparse.coo_array((V,(I,J)),shape=(4,4)).tocsr()
276
+
277
+ This is useful for constructing finite-element stiffness and mass matrices.
278
+
279
+ Further details
280
+ ---------------
281
+
282
+ CSR column indices are not necessarily sorted. Likewise for CSC row
283
+ indices. Use the ``.sorted_indices()`` and ``.sort_indices()`` methods when
284
+ sorted indices are required (e.g., when passing data to other libraries).
285
+
286
+ """
287
+
288
+ # Original code by Travis Oliphant.
289
+ # Modified and extended by Ed Schofield, Robert Cimrman,
290
+ # Nathan Bell, and Jake Vanderplas.
291
+
292
+ import warnings as _warnings
293
+
294
+ from ._base import *
295
+ from ._csr import *
296
+ from ._csc import *
297
+ from ._lil import *
298
+ from ._dok import *
299
+ from ._coo import *
300
+ from ._dia import *
301
+ from ._bsr import *
302
+ from ._construct import *
303
+ from ._extract import *
304
+ from ._matrix import spmatrix
305
+ from ._matrix_io import *
306
+
307
+ # For backward compatibility with v0.19.
308
+ from . import csgraph
309
+
310
+ # Deprecated namespaces, to be removed in v2.0.0
311
+ from . import (
312
+ base, bsr, compressed, construct, coo, csc, csr, data, dia, dok, extract,
313
+ lil, sparsetools, sputils
314
+ )
315
+
316
+ __all__ = [s for s in dir() if not s.startswith('_')]
317
+
318
+ # Filter PendingDeprecationWarning for np.matrix introduced with numpy 1.15
319
+ msg = 'the matrix subclass is not the recommended way'
320
+ _warnings.filterwarnings('ignore', message=msg)
321
+
322
+ from scipy._lib._testutils import PytestTester
323
+ test = PytestTester(__name__)
324
+ del PytestTester
llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (9.38 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_base.cpython-310.pyc ADDED
Binary file (46.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_bsr.cpython-310.pyc ADDED
Binary file (22.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_compressed.cpython-310.pyc ADDED
Binary file (33.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_construct.cpython-310.pyc ADDED
Binary file (42.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_coo.cpython-310.pyc ADDED
Binary file (27 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_csc.cpython-310.pyc ADDED
Binary file (11.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_csr.cpython-310.pyc ADDED
Binary file (14.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_data.cpython-310.pyc ADDED
Binary file (15.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_dia.cpython-310.pyc ADDED
Binary file (14.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_dok.cpython-310.pyc ADDED
Binary file (22.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_extract.cpython-310.pyc ADDED
Binary file (5.02 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_index.cpython-310.pyc ADDED
Binary file (11 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_lil.cpython-310.pyc ADDED
Binary file (18.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_matrix.cpython-310.pyc ADDED
Binary file (4.23 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_matrix_io.cpython-310.pyc ADDED
Binary file (5.37 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_spfuncs.cpython-310.pyc ADDED
Binary file (1.83 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/_sputils.cpython-310.pyc ADDED
Binary file (12.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/base.cpython-310.pyc ADDED
Binary file (791 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/bsr.cpython-310.pyc ADDED
Binary file (792 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/compressed.cpython-310.pyc ADDED
Binary file (955 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/construct.cpython-310.pyc ADDED
Binary file (864 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/coo.cpython-310.pyc ADDED
Binary file (819 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/csc.cpython-310.pyc ADDED
Binary file (656 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/csr.cpython-310.pyc ADDED
Binary file (693 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/data.cpython-310.pyc ADDED
Binary file (632 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/dia.cpython-310.pyc ADDED
Binary file (712 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/dok.cpython-310.pyc ADDED
Binary file (738 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/extract.cpython-310.pyc ADDED
Binary file (630 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/lil.cpython-310.pyc ADDED
Binary file (745 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/sparsetools.cpython-310.pyc ADDED
Binary file (1.79 kB). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/spfuncs.cpython-310.pyc ADDED
Binary file (652 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/__pycache__/sputils.cpython-310.pyc ADDED
Binary file (910 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/scipy/sparse/_base.py ADDED
@@ -0,0 +1,1568 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Base class for sparse matrices"""
2
+ from warnings import warn
3
+
4
+ import numpy as np
5
+ from scipy._lib._util import VisibleDeprecationWarning
6
+
7
+ from ._sputils import (asmatrix, check_reshape_kwargs, check_shape,
8
+ get_sum_dtype, isdense, isscalarlike,
9
+ matrix, validateaxis,)
10
+
11
+ from ._matrix import spmatrix
12
+
13
+ __all__ = ['isspmatrix', 'issparse', 'sparray',
14
+ 'SparseWarning', 'SparseEfficiencyWarning']
15
+
16
+
17
+ class SparseWarning(Warning):
18
+ pass
19
+
20
+
21
+ class SparseFormatWarning(SparseWarning):
22
+ pass
23
+
24
+
25
+ class SparseEfficiencyWarning(SparseWarning):
26
+ pass
27
+
28
+
29
+ # The formats that we might potentially understand.
30
+ _formats = {'csc': [0, "Compressed Sparse Column"],
31
+ 'csr': [1, "Compressed Sparse Row"],
32
+ 'dok': [2, "Dictionary Of Keys"],
33
+ 'lil': [3, "List of Lists"],
34
+ 'dod': [4, "Dictionary of Dictionaries"],
35
+ 'sss': [5, "Symmetric Sparse Skyline"],
36
+ 'coo': [6, "COOrdinate"],
37
+ 'lba': [7, "Linpack BAnded"],
38
+ 'egd': [8, "Ellpack-itpack Generalized Diagonal"],
39
+ 'dia': [9, "DIAgonal"],
40
+ 'bsr': [10, "Block Sparse Row"],
41
+ 'msr': [11, "Modified compressed Sparse Row"],
42
+ 'bsc': [12, "Block Sparse Column"],
43
+ 'msc': [13, "Modified compressed Sparse Column"],
44
+ 'ssk': [14, "Symmetric SKyline"],
45
+ 'nsk': [15, "Nonsymmetric SKyline"],
46
+ 'jad': [16, "JAgged Diagonal"],
47
+ 'uss': [17, "Unsymmetric Sparse Skyline"],
48
+ 'vbr': [18, "Variable Block Row"],
49
+ 'und': [19, "Undefined"]
50
+ }
51
+
52
+
53
+ # These univariate ufuncs preserve zeros.
54
+ _ufuncs_with_fixed_point_at_zero = frozenset([
55
+ np.sin, np.tan, np.arcsin, np.arctan, np.sinh, np.tanh, np.arcsinh,
56
+ np.arctanh, np.rint, np.sign, np.expm1, np.log1p, np.deg2rad,
57
+ np.rad2deg, np.floor, np.ceil, np.trunc, np.sqrt])
58
+
59
+
60
+ MAXPRINT = 50
61
+
62
+
63
+ class _spbase:
64
+ """ This class provides a base class for all sparse arrays. It
65
+ cannot be instantiated. Most of the work is provided by subclasses.
66
+ """
67
+
68
+ __array_priority__ = 10.1
69
+ _format = 'und' # undefined
70
+
71
+ @property
72
+ def ndim(self) -> int:
73
+ return len(self._shape)
74
+
75
+ @property
76
+ def _shape_as_2d(self):
77
+ s = self._shape
78
+ return (1, s[-1]) if len(s) == 1 else s
79
+
80
+ @property
81
+ def _bsr_container(self):
82
+ from ._bsr import bsr_array
83
+ return bsr_array
84
+
85
+ @property
86
+ def _coo_container(self):
87
+ from ._coo import coo_array
88
+ return coo_array
89
+
90
+ @property
91
+ def _csc_container(self):
92
+ from ._csc import csc_array
93
+ return csc_array
94
+
95
+ @property
96
+ def _csr_container(self):
97
+ from ._csr import csr_array
98
+ return csr_array
99
+
100
+ @property
101
+ def _dia_container(self):
102
+ from ._dia import dia_array
103
+ return dia_array
104
+
105
+ @property
106
+ def _dok_container(self):
107
+ from ._dok import dok_array
108
+ return dok_array
109
+
110
+ @property
111
+ def _lil_container(self):
112
+ from ._lil import lil_array
113
+ return lil_array
114
+
115
+ def __init__(self, maxprint=MAXPRINT):
116
+ self._shape = None
117
+ if self.__class__.__name__ == '_spbase':
118
+ raise ValueError("This class is not intended"
119
+ " to be instantiated directly.")
120
+ self.maxprint = maxprint
121
+
122
+ # Use this in 1.14.0 and later:
123
+ #
124
+ # @property
125
+ # def shape(self):
126
+ # return self._shape
127
+
128
+ def reshape(self, *args, **kwargs):
129
+ """reshape(self, shape, order='C', copy=False)
130
+
131
+ Gives a new shape to a sparse array/matrix without changing its data.
132
+
133
+ Parameters
134
+ ----------
135
+ shape : length-2 tuple of ints
136
+ The new shape should be compatible with the original shape.
137
+ order : {'C', 'F'}, optional
138
+ Read the elements using this index order. 'C' means to read and
139
+ write the elements using C-like index order; e.g., read entire first
140
+ row, then second row, etc. 'F' means to read and write the elements
141
+ using Fortran-like index order; e.g., read entire first column, then
142
+ second column, etc.
143
+ copy : bool, optional
144
+ Indicates whether or not attributes of self should be copied
145
+ whenever possible. The degree to which attributes are copied varies
146
+ depending on the type of sparse array being used.
147
+
148
+ Returns
149
+ -------
150
+ reshaped : sparse array/matrix
151
+ A sparse array/matrix with the given `shape`, not necessarily of the same
152
+ format as the current object.
153
+
154
+ See Also
155
+ --------
156
+ numpy.reshape : NumPy's implementation of 'reshape' for ndarrays
157
+ """
158
+ # If the shape already matches, don't bother doing an actual reshape
159
+ # Otherwise, the default is to convert to COO and use its reshape
160
+ is_array = isinstance(self, sparray)
161
+ shape = check_shape(args, self.shape, allow_1d=is_array)
162
+ order, copy = check_reshape_kwargs(kwargs)
163
+ if shape == self.shape:
164
+ if copy:
165
+ return self.copy()
166
+ else:
167
+ return self
168
+
169
+ return self.tocoo(copy=copy).reshape(shape, order=order, copy=False)
170
+
171
+ def resize(self, shape):
172
+ """Resize the array/matrix in-place to dimensions given by ``shape``
173
+
174
+ Any elements that lie within the new shape will remain at the same
175
+ indices, while non-zero elements lying outside the new shape are
176
+ removed.
177
+
178
+ Parameters
179
+ ----------
180
+ shape : (int, int)
181
+ number of rows and columns in the new array/matrix
182
+
183
+ Notes
184
+ -----
185
+ The semantics are not identical to `numpy.ndarray.resize` or
186
+ `numpy.resize`. Here, the same data will be maintained at each index
187
+ before and after reshape, if that index is within the new bounds. In
188
+ numpy, resizing maintains contiguity of the array, moving elements
189
+ around in the logical array but not within a flattened representation.
190
+
191
+ We give no guarantees about whether the underlying data attributes
192
+ (arrays, etc.) will be modified in place or replaced with new objects.
193
+ """
194
+ # As an inplace operation, this requires implementation in each format.
195
+ raise NotImplementedError(
196
+ f'{type(self).__name__}.resize is not implemented')
197
+
198
+ def astype(self, dtype, casting='unsafe', copy=True):
199
+ """Cast the array/matrix elements to a specified type.
200
+
201
+ Parameters
202
+ ----------
203
+ dtype : string or numpy dtype
204
+ Typecode or data-type to which to cast the data.
205
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
206
+ Controls what kind of data casting may occur.
207
+ Defaults to 'unsafe' for backwards compatibility.
208
+ 'no' means the data types should not be cast at all.
209
+ 'equiv' means only byte-order changes are allowed.
210
+ 'safe' means only casts which can preserve values are allowed.
211
+ 'same_kind' means only safe casts or casts within a kind,
212
+ like float64 to float32, are allowed.
213
+ 'unsafe' means any data conversions may be done.
214
+ copy : bool, optional
215
+ If `copy` is `False`, the result might share some memory with this
216
+ array/matrix. If `copy` is `True`, it is guaranteed that the result and
217
+ this array/matrix do not share any memory.
218
+ """
219
+
220
+ dtype = np.dtype(dtype)
221
+ if self.dtype != dtype:
222
+ return self.tocsr().astype(
223
+ dtype, casting=casting, copy=copy).asformat(self.format)
224
+ elif copy:
225
+ return self.copy()
226
+ else:
227
+ return self
228
+
229
+ @classmethod
230
+ def _ascontainer(cls, X, **kwargs):
231
+ if issubclass(cls, sparray):
232
+ return np.asarray(X, **kwargs)
233
+ else:
234
+ return asmatrix(X, **kwargs)
235
+
236
+ @classmethod
237
+ def _container(cls, X, **kwargs):
238
+ if issubclass(cls, sparray):
239
+ return np.array(X, **kwargs)
240
+ else:
241
+ return matrix(X, **kwargs)
242
+
243
+ def _asfptype(self):
244
+ """Upcast array to a floating point format (if necessary)"""
245
+
246
+ fp_types = ['f', 'd', 'F', 'D']
247
+
248
+ if self.dtype.char in fp_types:
249
+ return self
250
+ else:
251
+ for fp_type in fp_types:
252
+ if self.dtype <= np.dtype(fp_type):
253
+ return self.astype(fp_type)
254
+
255
+ raise TypeError('cannot upcast [%s] to a floating '
256
+ 'point format' % self.dtype.name)
257
+
258
+ def __iter__(self):
259
+ for r in range(self.shape[0]):
260
+ yield self[r]
261
+
262
+ def _getmaxprint(self):
263
+ """Maximum number of elements to display when printed."""
264
+ return self.maxprint
265
+
266
+ def count_nonzero(self):
267
+ """Number of non-zero entries, equivalent to
268
+
269
+ np.count_nonzero(a.toarray())
270
+
271
+ Unlike the nnz property, which return the number of stored
272
+ entries (the length of the data attribute), this method counts the
273
+ actual number of non-zero entries in data.
274
+ """
275
+ raise NotImplementedError("count_nonzero not implemented for %s." %
276
+ self.__class__.__name__)
277
+
278
+ def _getnnz(self, axis=None):
279
+ """Number of stored values, including explicit zeros.
280
+
281
+ Parameters
282
+ ----------
283
+ axis : None, 0, or 1
284
+ Select between the number of values across the whole array, in
285
+ each column, or in each row.
286
+
287
+ See also
288
+ --------
289
+ count_nonzero : Number of non-zero entries
290
+ """
291
+ raise NotImplementedError("getnnz not implemented for %s." %
292
+ self.__class__.__name__)
293
+
294
+ @property
295
+ def nnz(self) -> int:
296
+ """Number of stored values, including explicit zeros.
297
+
298
+ See also
299
+ --------
300
+ count_nonzero : Number of non-zero entries
301
+ """
302
+ return self._getnnz()
303
+
304
+ @property
305
+ def size(self) -> int:
306
+ """Number of stored values.
307
+
308
+ See also
309
+ --------
310
+ count_nonzero : Number of non-zero values.
311
+ """
312
+ return self._getnnz()
313
+
314
+ @property
315
+ def format(self) -> str:
316
+ """Format string for matrix."""
317
+ return self._format
318
+
319
+ @property
320
+ def A(self) -> np.ndarray:
321
+ """DEPRECATED: Return a dense array.
322
+
323
+ .. deprecated:: 1.11.0
324
+
325
+ `.A` is deprecated and will be removed in v1.14.0.
326
+ Use `.toarray()` instead.
327
+ """
328
+ if isinstance(self, sparray):
329
+ message = ("`.A` is deprecated and will be removed in v1.14.0. "
330
+ "Use `.toarray()` instead.")
331
+ warn(VisibleDeprecationWarning(message), stacklevel=2)
332
+ return self.toarray()
333
+
334
+ @property
335
+ def T(self):
336
+ """Transpose."""
337
+ return self.transpose()
338
+
339
+ @property
340
+ def H(self):
341
+ """DEPRECATED: Returns the (complex) conjugate transpose.
342
+
343
+ .. deprecated:: 1.11.0
344
+
345
+ `.H` is deprecated and will be removed in v1.14.0.
346
+ Please use `.T.conjugate()` instead.
347
+ """
348
+ if isinstance(self, sparray):
349
+ message = ("`.H` is deprecated and will be removed in v1.14.0. "
350
+ "Please use `.T.conjugate()` instead.")
351
+ warn(VisibleDeprecationWarning(message), stacklevel=2)
352
+ return self.T.conjugate()
353
+
354
+ @property
355
+ def real(self):
356
+ return self._real()
357
+
358
+ @property
359
+ def imag(self):
360
+ return self._imag()
361
+
362
+ def __repr__(self):
363
+ _, format_name = _formats[self.format]
364
+ sparse_cls = 'array' if isinstance(self, sparray) else 'matrix'
365
+ shape_str = 'x'.join(str(x) for x in self.shape)
366
+ return (
367
+ f"<{shape_str} sparse {sparse_cls} of type '{self.dtype.type}'\n"
368
+ f"\twith {self.nnz} stored elements in {format_name} format>"
369
+ )
370
+
371
+ def __str__(self):
372
+ maxprint = self._getmaxprint()
373
+
374
+ A = self.tocoo()
375
+
376
+ # helper function, outputs "(i,j) v"
377
+ def tostr(row, col, data):
378
+ triples = zip(list(zip(row, col)), data)
379
+ return '\n'.join([(' {}\t{}'.format(*t)) for t in triples])
380
+
381
+ if self.nnz > maxprint:
382
+ half = maxprint // 2
383
+ out = tostr(A.row[:half], A.col[:half], A.data[:half])
384
+ out += "\n :\t:\n"
385
+ half = maxprint - maxprint//2
386
+ out += tostr(A.row[-half:], A.col[-half:], A.data[-half:])
387
+ else:
388
+ out = tostr(A.row, A.col, A.data)
389
+
390
+ return out
391
+
392
+ def __bool__(self): # Simple -- other ideas?
393
+ if self.shape == (1, 1):
394
+ return self.nnz != 0
395
+ else:
396
+ raise ValueError("The truth value of an array with more than one "
397
+ "element is ambiguous. Use a.any() or a.all().")
398
+ __nonzero__ = __bool__
399
+
400
+ # What should len(sparse) return? For consistency with dense matrices,
401
+ # perhaps it should be the number of rows? But for some uses the number of
402
+ # non-zeros is more important. For now, raise an exception!
403
+ def __len__(self):
404
+ raise TypeError("sparse array length is ambiguous; use getnnz()"
405
+ " or shape[0]")
406
+
407
+ def asformat(self, format, copy=False):
408
+ """Return this array/matrix in the passed format.
409
+
410
+ Parameters
411
+ ----------
412
+ format : {str, None}
413
+ The desired sparse format ("csr", "csc", "lil", "dok", "array", ...)
414
+ or None for no conversion.
415
+ copy : bool, optional
416
+ If True, the result is guaranteed to not share data with self.
417
+
418
+ Returns
419
+ -------
420
+ A : This array/matrix in the passed format.
421
+ """
422
+ if format is None or format == self.format:
423
+ if copy:
424
+ return self.copy()
425
+ else:
426
+ return self
427
+ else:
428
+ try:
429
+ convert_method = getattr(self, 'to' + format)
430
+ except AttributeError as e:
431
+ raise ValueError(f'Format {format} is unknown.') from e
432
+
433
+ # Forward the copy kwarg, if it's accepted.
434
+ try:
435
+ return convert_method(copy=copy)
436
+ except TypeError:
437
+ return convert_method()
438
+
439
+ ###################################################################
440
+ # NOTE: All arithmetic operations use csr_matrix by default.
441
+ # Therefore a new sparse array format just needs to define a
442
+ # .tocsr() method to provide arithmetic support. Any of these
443
+ # methods can be overridden for efficiency.
444
+ ####################################################################
445
+
446
+ def multiply(self, other):
447
+ """Point-wise multiplication by another array/matrix."""
448
+ return self.tocsr().multiply(other)
449
+
450
+ def maximum(self, other):
451
+ """Element-wise maximum between this and another array/matrix."""
452
+ return self.tocsr().maximum(other)
453
+
454
+ def minimum(self, other):
455
+ """Element-wise minimum between this and another array/matrix."""
456
+ return self.tocsr().minimum(other)
457
+
458
+ def dot(self, other):
459
+ """Ordinary dot product
460
+
461
+ Examples
462
+ --------
463
+ >>> import numpy as np
464
+ >>> from scipy.sparse import csr_array
465
+ >>> A = csr_array([[1, 2, 0], [0, 0, 3], [4, 0, 5]])
466
+ >>> v = np.array([1, 0, -1])
467
+ >>> A.dot(v)
468
+ array([ 1, -3, -1], dtype=int64)
469
+
470
+ """
471
+ if np.isscalar(other):
472
+ return self * other
473
+ else:
474
+ return self @ other
475
+
476
+ def power(self, n, dtype=None):
477
+ """Element-wise power."""
478
+ return self.tocsr().power(n, dtype=dtype)
479
+
480
+ def __eq__(self, other):
481
+ return self.tocsr().__eq__(other)
482
+
483
+ def __ne__(self, other):
484
+ return self.tocsr().__ne__(other)
485
+
486
+ def __lt__(self, other):
487
+ return self.tocsr().__lt__(other)
488
+
489
+ def __gt__(self, other):
490
+ return self.tocsr().__gt__(other)
491
+
492
+ def __le__(self, other):
493
+ return self.tocsr().__le__(other)
494
+
495
+ def __ge__(self, other):
496
+ return self.tocsr().__ge__(other)
497
+
498
+ def __abs__(self):
499
+ return abs(self.tocsr())
500
+
501
+ def __round__(self, ndigits=0):
502
+ return round(self.tocsr(), ndigits=ndigits)
503
+
504
+ def _add_sparse(self, other):
505
+ return self.tocsr()._add_sparse(other)
506
+
507
+ def _add_dense(self, other):
508
+ return self.tocoo()._add_dense(other)
509
+
510
+ def _sub_sparse(self, other):
511
+ return self.tocsr()._sub_sparse(other)
512
+
513
+ def _sub_dense(self, other):
514
+ return self.todense() - other
515
+
516
+ def _rsub_dense(self, other):
517
+ # note: this can't be replaced by other + (-self) for unsigned types
518
+ return other - self.todense()
519
+
520
+ def __add__(self, other): # self + other
521
+ if isscalarlike(other):
522
+ if other == 0:
523
+ return self.copy()
524
+ # Now we would add this scalar to every element.
525
+ raise NotImplementedError('adding a nonzero scalar to a '
526
+ 'sparse array is not supported')
527
+ elif issparse(other):
528
+ if other.shape != self.shape:
529
+ raise ValueError("inconsistent shapes")
530
+ return self._add_sparse(other)
531
+ elif isdense(other):
532
+ other = np.broadcast_to(other, self.shape)
533
+ return self._add_dense(other)
534
+ else:
535
+ return NotImplemented
536
+
537
+ def __radd__(self,other): # other + self
538
+ return self.__add__(other)
539
+
540
+ def __sub__(self, other): # self - other
541
+ if isscalarlike(other):
542
+ if other == 0:
543
+ return self.copy()
544
+ raise NotImplementedError('subtracting a nonzero scalar from a '
545
+ 'sparse array is not supported')
546
+ elif issparse(other):
547
+ if other.shape != self.shape:
548
+ raise ValueError("inconsistent shapes")
549
+ return self._sub_sparse(other)
550
+ elif isdense(other):
551
+ other = np.broadcast_to(other, self.shape)
552
+ return self._sub_dense(other)
553
+ else:
554
+ return NotImplemented
555
+
556
+ def __rsub__(self,other): # other - self
557
+ if isscalarlike(other):
558
+ if other == 0:
559
+ return -self.copy()
560
+ raise NotImplementedError('subtracting a sparse array from a '
561
+ 'nonzero scalar is not supported')
562
+ elif isdense(other):
563
+ other = np.broadcast_to(other, self.shape)
564
+ return self._rsub_dense(other)
565
+ else:
566
+ return NotImplemented
567
+
568
+ def _matmul_dispatch(self, other):
569
+ """np.array-like matmul & `np.matrix`-like mul, i.e. `dot` or `NotImplemented`
570
+
571
+ interpret other and call one of the following
572
+ self._mul_scalar()
573
+ self._matmul_vector()
574
+ self._matmul_multivector()
575
+ self._matmul_sparse()
576
+ """
577
+ # This method has to be different from `__matmul__` because it is also
578
+ # called by sparse matrix classes.
579
+
580
+ # Currently matrix multiplication is only supported
581
+ # for 2D arrays. Hence we unpacked and use only the
582
+ # two last axes' lengths.
583
+ M, N = self._shape_as_2d
584
+
585
+ if other.__class__ is np.ndarray:
586
+ # Fast path for the most common case
587
+ if other.shape == (N,):
588
+ return self._matmul_vector(other)
589
+ elif other.shape == (N, 1):
590
+ result = self._matmul_vector(other.ravel())
591
+ if self.ndim == 1:
592
+ return result
593
+ return result.reshape(M, 1)
594
+ elif other.ndim == 2 and other.shape[0] == N:
595
+ return self._matmul_multivector(other)
596
+
597
+ if isscalarlike(other):
598
+ # scalar value
599
+ return self._mul_scalar(other)
600
+
601
+ if issparse(other):
602
+ if self.shape[-1] != other.shape[0]:
603
+ raise ValueError('dimension mismatch')
604
+ if other.ndim == 1:
605
+ raise ValueError('Cannot yet multiply a 1d sparse array')
606
+ return self._matmul_sparse(other)
607
+
608
+ # If it's a list or whatever, treat it like an array
609
+ other_a = np.asanyarray(other)
610
+
611
+ if other_a.ndim == 0 and other_a.dtype == np.object_:
612
+ # Not interpretable as an array; return NotImplemented so that
613
+ # other's __rmatmul__ can kick in if that's implemented.
614
+ return NotImplemented
615
+
616
+ try:
617
+ other.shape
618
+ except AttributeError:
619
+ other = other_a
620
+
621
+ if other.ndim == 1 or other.ndim == 2 and other.shape[1] == 1:
622
+ # dense row or column vector
623
+ if other.shape != (N,) and other.shape != (N, 1):
624
+ raise ValueError('dimension mismatch')
625
+
626
+ result = self._matmul_vector(np.ravel(other))
627
+
628
+ if isinstance(other, np.matrix):
629
+ result = self._ascontainer(result)
630
+
631
+ if other.ndim == 2 and other.shape[1] == 1:
632
+ # If 'other' was an (nx1) column vector, reshape the result
633
+ result = result.reshape(-1, 1)
634
+
635
+ return result
636
+
637
+ elif other.ndim == 2:
638
+ ##
639
+ # dense 2D array or matrix ("multivector")
640
+
641
+ if other.shape[0] != N:
642
+ raise ValueError('dimension mismatch')
643
+
644
+ result = self._matmul_multivector(np.asarray(other))
645
+
646
+ if isinstance(other, np.matrix):
647
+ result = self._ascontainer(result)
648
+
649
+ return result
650
+
651
+ else:
652
+ raise ValueError('could not interpret dimensions')
653
+
654
+ def __mul__(self, *args, **kwargs):
655
+ return self.multiply(*args, **kwargs)
656
+
657
+ def __rmul__(self, *args, **kwargs): # other * self
658
+ return self.multiply(*args, **kwargs)
659
+
660
+ # by default, use CSR for __mul__ handlers
661
+ def _mul_scalar(self, other):
662
+ return self.tocsr()._mul_scalar(other)
663
+
664
+ def _matmul_vector(self, other):
665
+ return self.tocsr()._matmul_vector(other)
666
+
667
+ def _matmul_multivector(self, other):
668
+ return self.tocsr()._matmul_multivector(other)
669
+
670
+ def _matmul_sparse(self, other):
671
+ return self.tocsr()._matmul_sparse(other)
672
+
673
+ def _rmatmul_dispatch(self, other):
674
+ if isscalarlike(other):
675
+ return self._mul_scalar(other)
676
+ else:
677
+ # Don't use asarray unless we have to
678
+ try:
679
+ tr = other.transpose()
680
+ except AttributeError:
681
+ tr = np.asarray(other).transpose()
682
+ ret = self.transpose()._matmul_dispatch(tr)
683
+ if ret is NotImplemented:
684
+ return NotImplemented
685
+ return ret.transpose()
686
+
687
+ #######################
688
+ # matmul (@) operator #
689
+ #######################
690
+
691
+ def __matmul__(self, other):
692
+ if isscalarlike(other):
693
+ raise ValueError("Scalar operands are not allowed, "
694
+ "use '*' instead")
695
+ return self._matmul_dispatch(other)
696
+
697
+ def __rmatmul__(self, other):
698
+ if isscalarlike(other):
699
+ raise ValueError("Scalar operands are not allowed, "
700
+ "use '*' instead")
701
+ return self._rmatmul_dispatch(other)
702
+
703
+ ####################
704
+ # Other Arithmetic #
705
+ ####################
706
+
707
+ def _divide(self, other, true_divide=False, rdivide=False):
708
+ if isscalarlike(other):
709
+ if rdivide:
710
+ if true_divide:
711
+ return np.true_divide(other, self.todense())
712
+ else:
713
+ return np.divide(other, self.todense())
714
+
715
+ if true_divide and np.can_cast(self.dtype, np.float64):
716
+ return self.astype(np.float64)._mul_scalar(1./other)
717
+ else:
718
+ r = self._mul_scalar(1./other)
719
+
720
+ scalar_dtype = np.asarray(other).dtype
721
+ if (np.issubdtype(self.dtype, np.integer) and
722
+ np.issubdtype(scalar_dtype, np.integer)):
723
+ return r.astype(self.dtype)
724
+ else:
725
+ return r
726
+
727
+ elif isdense(other):
728
+ if not rdivide:
729
+ if true_divide:
730
+ recip = np.true_divide(1., other)
731
+ else:
732
+ recip = np.divide(1., other)
733
+ return self.multiply(recip)
734
+ else:
735
+ if true_divide:
736
+ return np.true_divide(other, self.todense())
737
+ else:
738
+ return np.divide(other, self.todense())
739
+ elif issparse(other):
740
+ if rdivide:
741
+ return other._divide(self, true_divide, rdivide=False)
742
+
743
+ self_csr = self.tocsr()
744
+ if true_divide and np.can_cast(self.dtype, np.float64):
745
+ return self_csr.astype(np.float64)._divide_sparse(other)
746
+ else:
747
+ return self_csr._divide_sparse(other)
748
+ else:
749
+ return NotImplemented
750
+
751
+ def __truediv__(self, other):
752
+ return self._divide(other, true_divide=True)
753
+
754
+ def __div__(self, other):
755
+ # Always do true division
756
+ return self._divide(other, true_divide=True)
757
+
758
+ def __rtruediv__(self, other):
759
+ # Implementing this as the inverse would be too magical -- bail out
760
+ return NotImplemented
761
+
762
+ def __rdiv__(self, other):
763
+ # Implementing this as the inverse would be too magical -- bail out
764
+ return NotImplemented
765
+
766
+ def __neg__(self):
767
+ return -self.tocsr()
768
+
769
+ def __iadd__(self, other):
770
+ return NotImplemented
771
+
772
+ def __isub__(self, other):
773
+ return NotImplemented
774
+
775
+ def __imul__(self, other):
776
+ return NotImplemented
777
+
778
+ def __idiv__(self, other):
779
+ return self.__itruediv__(other)
780
+
781
+ def __itruediv__(self, other):
782
+ return NotImplemented
783
+
784
+ def __pow__(self, *args, **kwargs):
785
+ return self.power(*args, **kwargs)
786
+
787
+ def transpose(self, axes=None, copy=False):
788
+ """
789
+ Reverses the dimensions of the sparse array/matrix.
790
+
791
+ Parameters
792
+ ----------
793
+ axes : None, optional
794
+ This argument is in the signature *solely* for NumPy
795
+ compatibility reasons. Do not pass in anything except
796
+ for the default value.
797
+ copy : bool, optional
798
+ Indicates whether or not attributes of `self` should be
799
+ copied whenever possible. The degree to which attributes
800
+ are copied varies depending on the type of sparse array/matrix
801
+ being used.
802
+
803
+ Returns
804
+ -------
805
+ p : `self` with the dimensions reversed.
806
+
807
+ Notes
808
+ -----
809
+ If `self` is a `csr_array` or a `csc_array`, then this will return a
810
+ `csc_array` or a `csr_array`, respectively.
811
+
812
+ See Also
813
+ --------
814
+ numpy.transpose : NumPy's implementation of 'transpose' for ndarrays
815
+ """
816
+ return self.tocsr(copy=copy).transpose(axes=axes, copy=False)
817
+
818
+ def conjugate(self, copy=True):
819
+ """Element-wise complex conjugation.
820
+
821
+ If the array/matrix is of non-complex data type and `copy` is False,
822
+ this method does nothing and the data is not copied.
823
+
824
+ Parameters
825
+ ----------
826
+ copy : bool, optional
827
+ If True, the result is guaranteed to not share data with self.
828
+
829
+ Returns
830
+ -------
831
+ A : The element-wise complex conjugate.
832
+
833
+ """
834
+ if np.issubdtype(self.dtype, np.complexfloating):
835
+ return self.tocsr(copy=copy).conjugate(copy=False)
836
+ elif copy:
837
+ return self.copy()
838
+ else:
839
+ return self
840
+
841
+ def conj(self, copy=True):
842
+ return self.conjugate(copy=copy)
843
+
844
+ conj.__doc__ = conjugate.__doc__
845
+
846
+ def _real(self):
847
+ return self.tocsr()._real()
848
+
849
+ def _imag(self):
850
+ return self.tocsr()._imag()
851
+
852
+ def nonzero(self):
853
+ """Nonzero indices of the array/matrix.
854
+
855
+ Returns a tuple of arrays (row,col) containing the indices
856
+ of the non-zero elements of the array.
857
+
858
+ Examples
859
+ --------
860
+ >>> from scipy.sparse import csr_array
861
+ >>> A = csr_array([[1,2,0],[0,0,3],[4,0,5]])
862
+ >>> A.nonzero()
863
+ (array([0, 0, 1, 2, 2]), array([0, 1, 2, 0, 2]))
864
+
865
+ """
866
+
867
+ # convert to COOrdinate format
868
+ A = self.tocoo()
869
+ nz_mask = A.data != 0
870
+ return (A.row[nz_mask], A.col[nz_mask])
871
+
872
+ def _getcol(self, j):
873
+ """Returns a copy of column j of the array, as an (m x 1) sparse
874
+ array (column vector).
875
+ """
876
+ if self.ndim == 1:
877
+ raise ValueError("getcol not provided for 1d arrays. Use indexing A[j]")
878
+ # Subclasses should override this method for efficiency.
879
+ # Post-multiply by a (n x 1) column vector 'a' containing all zeros
880
+ # except for a_j = 1
881
+ N = self.shape[-1]
882
+ if j < 0:
883
+ j += N
884
+ if j < 0 or j >= N:
885
+ raise IndexError("index out of bounds")
886
+ col_selector = self._csc_container(([1], [[j], [0]]),
887
+ shape=(N, 1), dtype=self.dtype)
888
+ result = self @ col_selector
889
+ return result
890
+
891
+ def _getrow(self, i):
892
+ """Returns a copy of row i of the array, as a (1 x n) sparse
893
+ array (row vector).
894
+ """
895
+ if self.ndim == 1:
896
+ raise ValueError("getrow not meaningful for a 1d array")
897
+ # Subclasses should override this method for efficiency.
898
+ # Pre-multiply by a (1 x m) row vector 'a' containing all zeros
899
+ # except for a_i = 1
900
+ M = self.shape[0]
901
+ if i < 0:
902
+ i += M
903
+ if i < 0 or i >= M:
904
+ raise IndexError("index out of bounds")
905
+ row_selector = self._csr_container(([1], [[0], [i]]),
906
+ shape=(1, M), dtype=self.dtype)
907
+ return row_selector @ self
908
+
909
+ # The following dunder methods cannot be implemented.
910
+ #
911
+ # def __array__(self):
912
+ # # Sparse matrices rely on NumPy wrapping them in object arrays under
913
+ # # the hood to make unary ufuncs work on them. So we cannot raise
914
+ # # TypeError here - which would be handy to not give users object
915
+ # # arrays they probably don't want (they're looking for `.toarray()`).
916
+ # #
917
+ # # Conversion with `toarray()` would also break things because of the
918
+ # # behavior discussed above, plus we want to avoid densification by
919
+ # # accident because that can too easily blow up memory.
920
+ #
921
+ # def __array_ufunc__(self):
922
+ # # We cannot implement __array_ufunc__ due to mismatching semantics.
923
+ # # See gh-7707 and gh-7349 for details.
924
+ #
925
+ # def __array_function__(self):
926
+ # # We cannot implement __array_function__ due to mismatching semantics.
927
+ # # See gh-10362 for details.
928
+
929
+ def todense(self, order=None, out=None):
930
+ """
931
+ Return a dense representation of this sparse array/matrix.
932
+
933
+ Parameters
934
+ ----------
935
+ order : {'C', 'F'}, optional
936
+ Whether to store multi-dimensional data in C (row-major)
937
+ or Fortran (column-major) order in memory. The default
938
+ is 'None', which provides no ordering guarantees.
939
+ Cannot be specified in conjunction with the `out`
940
+ argument.
941
+
942
+ out : ndarray, 2-D, optional
943
+ If specified, uses this array (or `numpy.matrix`) as the
944
+ output buffer instead of allocating a new array to
945
+ return. The provided array must have the same shape and
946
+ dtype as the sparse array/matrix on which you are calling the
947
+ method.
948
+
949
+ Returns
950
+ -------
951
+ arr : numpy.matrix, 2-D
952
+ A NumPy matrix object with the same shape and containing
953
+ the same data represented by the sparse array/matrix, with the
954
+ requested memory order. If `out` was passed and was an
955
+ array (rather than a `numpy.matrix`), it will be filled
956
+ with the appropriate values and returned wrapped in a
957
+ `numpy.matrix` object that shares the same memory.
958
+ """
959
+ return self._ascontainer(self.toarray(order=order, out=out))
960
+
961
+ def toarray(self, order=None, out=None):
962
+ """
963
+ Return a dense ndarray representation of this sparse array/matrix.
964
+
965
+ Parameters
966
+ ----------
967
+ order : {'C', 'F'}, optional
968
+ Whether to store multidimensional data in C (row-major)
969
+ or Fortran (column-major) order in memory. The default
970
+ is 'None', which provides no ordering guarantees.
971
+ Cannot be specified in conjunction with the `out`
972
+ argument.
973
+
974
+ out : ndarray, 2-D, optional
975
+ If specified, uses this array as the output buffer
976
+ instead of allocating a new array to return. The provided
977
+ array must have the same shape and dtype as the sparse
978
+ array/matrix on which you are calling the method. For most
979
+ sparse types, `out` is required to be memory contiguous
980
+ (either C or Fortran ordered).
981
+
982
+ Returns
983
+ -------
984
+ arr : ndarray, 2-D
985
+ An array with the same shape and containing the same
986
+ data represented by the sparse array/matrix, with the requested
987
+ memory order. If `out` was passed, the same object is
988
+ returned after being modified in-place to contain the
989
+ appropriate values.
990
+ """
991
+ return self.tocoo(copy=False).toarray(order=order, out=out)
992
+
993
+ # Any sparse array format deriving from _spbase must define one of
994
+ # tocsr or tocoo. The other conversion methods may be implemented for
995
+ # efficiency, but are not required.
996
+ def tocsr(self, copy=False):
997
+ """Convert this array/matrix to Compressed Sparse Row format.
998
+
999
+ With copy=False, the data/indices may be shared between this array/matrix and
1000
+ the resultant csr_array/matrix.
1001
+ """
1002
+ return self.tocoo(copy=copy).tocsr(copy=False)
1003
+
1004
+ def todok(self, copy=False):
1005
+ """Convert this array/matrix to Dictionary Of Keys format.
1006
+
1007
+ With copy=False, the data/indices may be shared between this array/matrix and
1008
+ the resultant dok_array/matrix.
1009
+ """
1010
+ return self.tocoo(copy=copy).todok(copy=False)
1011
+
1012
+ def tocoo(self, copy=False):
1013
+ """Convert this array/matrix to COOrdinate format.
1014
+
1015
+ With copy=False, the data/indices may be shared between this array/matrix and
1016
+ the resultant coo_array/matrix.
1017
+ """
1018
+ return self.tocsr(copy=False).tocoo(copy=copy)
1019
+
1020
+ def tolil(self, copy=False):
1021
+ """Convert this array/matrix to List of Lists format.
1022
+
1023
+ With copy=False, the data/indices may be shared between this array/matrix and
1024
+ the resultant lil_array/matrix.
1025
+ """
1026
+ return self.tocsr(copy=False).tolil(copy=copy)
1027
+
1028
+ def todia(self, copy=False):
1029
+ """Convert this array/matrix to sparse DIAgonal format.
1030
+
1031
+ With copy=False, the data/indices may be shared between this array/matrix and
1032
+ the resultant dia_array/matrix.
1033
+ """
1034
+ return self.tocoo(copy=copy).todia(copy=False)
1035
+
1036
+ def tobsr(self, blocksize=None, copy=False):
1037
+ """Convert this array/matrix to Block Sparse Row format.
1038
+
1039
+ With copy=False, the data/indices may be shared between this array/matrix and
1040
+ the resultant bsr_array/matrix.
1041
+
1042
+ When blocksize=(R, C) is provided, it will be used for construction of
1043
+ the bsr_array/matrix.
1044
+ """
1045
+ return self.tocsr(copy=False).tobsr(blocksize=blocksize, copy=copy)
1046
+
1047
+ def tocsc(self, copy=False):
1048
+ """Convert this array/matrix to Compressed Sparse Column format.
1049
+
1050
+ With copy=False, the data/indices may be shared between this array/matrix and
1051
+ the resultant csc_array/matrix.
1052
+ """
1053
+ return self.tocsr(copy=copy).tocsc(copy=False)
1054
+
1055
+ def copy(self):
1056
+ """Returns a copy of this array/matrix.
1057
+
1058
+ No data/indices will be shared between the returned value and current
1059
+ array/matrix.
1060
+ """
1061
+ return self.__class__(self, copy=True)
1062
+
1063
+ def sum(self, axis=None, dtype=None, out=None):
1064
+ """
1065
+ Sum the array/matrix elements over a given axis.
1066
+
1067
+ Parameters
1068
+ ----------
1069
+ axis : {-2, -1, 0, 1, None} optional
1070
+ Axis along which the sum is computed. The default is to
1071
+ compute the sum of all the array/matrix elements, returning a scalar
1072
+ (i.e., `axis` = `None`).
1073
+ dtype : dtype, optional
1074
+ The type of the returned array/matrix and of the accumulator in which
1075
+ the elements are summed. The dtype of `a` is used by default
1076
+ unless `a` has an integer dtype of less precision than the default
1077
+ platform integer. In that case, if `a` is signed then the platform
1078
+ integer is used while if `a` is unsigned then an unsigned integer
1079
+ of the same precision as the platform integer is used.
1080
+
1081
+ .. versionadded:: 0.18.0
1082
+
1083
+ out : np.matrix, optional
1084
+ Alternative output matrix in which to place the result. It must
1085
+ have the same shape as the expected output, but the type of the
1086
+ output values will be cast if necessary.
1087
+
1088
+ .. versionadded:: 0.18.0
1089
+
1090
+ Returns
1091
+ -------
1092
+ sum_along_axis : np.matrix
1093
+ A matrix with the same shape as `self`, with the specified
1094
+ axis removed.
1095
+
1096
+ See Also
1097
+ --------
1098
+ numpy.matrix.sum : NumPy's implementation of 'sum' for matrices
1099
+
1100
+ """
1101
+ validateaxis(axis)
1102
+
1103
+ # Mimic numpy's casting.
1104
+ res_dtype = get_sum_dtype(self.dtype)
1105
+
1106
+ if self.ndim == 1:
1107
+ if axis not in (None, -1, 0):
1108
+ raise ValueError("axis must be None, -1 or 0")
1109
+ ret = (self @ np.ones(self.shape, dtype=res_dtype)).astype(dtype)
1110
+
1111
+ if out is not None:
1112
+ if any(dim != 1 for dim in out.shape):
1113
+ raise ValueError("dimensions do not match")
1114
+ out[...] = ret
1115
+ return ret
1116
+
1117
+ # We use multiplication by a matrix of ones to achieve this.
1118
+ # For some sparse array formats more efficient methods are
1119
+ # possible -- these should override this function.
1120
+ M, N = self.shape
1121
+
1122
+ if axis is None:
1123
+ # sum over rows and columns
1124
+ return (
1125
+ self @ self._ascontainer(np.ones((N, 1), dtype=res_dtype))
1126
+ ).sum(dtype=dtype, out=out)
1127
+
1128
+ if axis < 0:
1129
+ axis += 2
1130
+
1131
+ # axis = 0 or 1 now
1132
+ if axis == 0:
1133
+ # sum over columns
1134
+ ret = self._ascontainer(
1135
+ np.ones((1, M), dtype=res_dtype)
1136
+ ) @ self
1137
+ else:
1138
+ # sum over rows
1139
+ ret = self @ self._ascontainer(
1140
+ np.ones((N, 1), dtype=res_dtype)
1141
+ )
1142
+
1143
+ if out is not None and out.shape != ret.shape:
1144
+ raise ValueError("dimensions do not match")
1145
+
1146
+ return ret.sum(axis=axis, dtype=dtype, out=out)
1147
+
1148
+ def mean(self, axis=None, dtype=None, out=None):
1149
+ """
1150
+ Compute the arithmetic mean along the specified axis.
1151
+
1152
+ Returns the average of the array/matrix elements. The average is taken
1153
+ over all elements in the array/matrix by default, otherwise over the
1154
+ specified axis. `float64` intermediate and return values are used
1155
+ for integer inputs.
1156
+
1157
+ Parameters
1158
+ ----------
1159
+ axis : {-2, -1, 0, 1, None} optional
1160
+ Axis along which the mean is computed. The default is to compute
1161
+ the mean of all elements in the array/matrix (i.e., `axis` = `None`).
1162
+ dtype : data-type, optional
1163
+ Type to use in computing the mean. For integer inputs, the default
1164
+ is `float64`; for floating point inputs, it is the same as the
1165
+ input dtype.
1166
+
1167
+ .. versionadded:: 0.18.0
1168
+
1169
+ out : np.matrix, optional
1170
+ Alternative output matrix in which to place the result. It must
1171
+ have the same shape as the expected output, but the type of the
1172
+ output values will be cast if necessary.
1173
+
1174
+ .. versionadded:: 0.18.0
1175
+
1176
+ Returns
1177
+ -------
1178
+ m : np.matrix
1179
+
1180
+ See Also
1181
+ --------
1182
+ numpy.matrix.mean : NumPy's implementation of 'mean' for matrices
1183
+
1184
+ """
1185
+ validateaxis(axis)
1186
+
1187
+ res_dtype = self.dtype.type
1188
+ integral = (np.issubdtype(self.dtype, np.integer) or
1189
+ np.issubdtype(self.dtype, np.bool_))
1190
+
1191
+ # output dtype
1192
+ if dtype is None:
1193
+ if integral:
1194
+ res_dtype = np.float64
1195
+ else:
1196
+ res_dtype = np.dtype(dtype).type
1197
+
1198
+ # intermediate dtype for summation
1199
+ inter_dtype = np.float64 if integral else res_dtype
1200
+ inter_self = self.astype(inter_dtype)
1201
+
1202
+ if self.ndim == 1:
1203
+ if axis not in (None, -1, 0):
1204
+ raise ValueError("axis must be None, -1 or 0")
1205
+ res = inter_self / self.shape[-1]
1206
+ return res.sum(dtype=res_dtype, out=out)
1207
+
1208
+ if axis is None:
1209
+ return (inter_self / (self.shape[0] * self.shape[1]))\
1210
+ .sum(dtype=res_dtype, out=out)
1211
+
1212
+ if axis < 0:
1213
+ axis += 2
1214
+
1215
+ # axis = 0 or 1 now
1216
+ if axis == 0:
1217
+ return (inter_self * (1.0 / self.shape[0])).sum(
1218
+ axis=0, dtype=res_dtype, out=out)
1219
+ else:
1220
+ return (inter_self * (1.0 / self.shape[1])).sum(
1221
+ axis=1, dtype=res_dtype, out=out)
1222
+
1223
+ def diagonal(self, k=0):
1224
+ """Returns the kth diagonal of the array/matrix.
1225
+
1226
+ Parameters
1227
+ ----------
1228
+ k : int, optional
1229
+ Which diagonal to get, corresponding to elements a[i, i+k].
1230
+ Default: 0 (the main diagonal).
1231
+
1232
+ .. versionadded:: 1.0
1233
+
1234
+ See also
1235
+ --------
1236
+ numpy.diagonal : Equivalent numpy function.
1237
+
1238
+ Examples
1239
+ --------
1240
+ >>> from scipy.sparse import csr_array
1241
+ >>> A = csr_array([[1, 2, 0], [0, 0, 3], [4, 0, 5]])
1242
+ >>> A.diagonal()
1243
+ array([1, 0, 5])
1244
+ >>> A.diagonal(k=1)
1245
+ array([2, 3])
1246
+ """
1247
+ return self.tocsr().diagonal(k=k)
1248
+
1249
+ def trace(self, offset=0):
1250
+ """Returns the sum along diagonals of the sparse array/matrix.
1251
+
1252
+ Parameters
1253
+ ----------
1254
+ offset : int, optional
1255
+ Which diagonal to get, corresponding to elements a[i, i+offset].
1256
+ Default: 0 (the main diagonal).
1257
+
1258
+ """
1259
+ return self.diagonal(k=offset).sum()
1260
+
1261
+ def setdiag(self, values, k=0):
1262
+ """
1263
+ Set diagonal or off-diagonal elements of the array/matrix.
1264
+
1265
+ Parameters
1266
+ ----------
1267
+ values : array_like
1268
+ New values of the diagonal elements.
1269
+
1270
+ Values may have any length. If the diagonal is longer than values,
1271
+ then the remaining diagonal entries will not be set. If values are
1272
+ longer than the diagonal, then the remaining values are ignored.
1273
+
1274
+ If a scalar value is given, all of the diagonal is set to it.
1275
+
1276
+ k : int, optional
1277
+ Which off-diagonal to set, corresponding to elements a[i,i+k].
1278
+ Default: 0 (the main diagonal).
1279
+
1280
+ """
1281
+ M, N = self.shape
1282
+ if (k > 0 and k >= N) or (k < 0 and -k >= M):
1283
+ raise ValueError("k exceeds array dimensions")
1284
+ self._setdiag(np.asarray(values), k)
1285
+
1286
+ def _setdiag(self, values, k):
1287
+ """This part of the implementation gets overridden by the
1288
+ different formats.
1289
+ """
1290
+ M, N = self.shape
1291
+ if k < 0:
1292
+ if values.ndim == 0:
1293
+ # broadcast
1294
+ max_index = min(M+k, N)
1295
+ for i in range(max_index):
1296
+ self[i - k, i] = values
1297
+ else:
1298
+ max_index = min(M+k, N, len(values))
1299
+ if max_index <= 0:
1300
+ return
1301
+ for i, v in enumerate(values[:max_index]):
1302
+ self[i - k, i] = v
1303
+ else:
1304
+ if values.ndim == 0:
1305
+ # broadcast
1306
+ max_index = min(M, N-k)
1307
+ for i in range(max_index):
1308
+ self[i, i + k] = values
1309
+ else:
1310
+ max_index = min(M, N-k, len(values))
1311
+ if max_index <= 0:
1312
+ return
1313
+ for i, v in enumerate(values[:max_index]):
1314
+ self[i, i + k] = v
1315
+
1316
+ def _process_toarray_args(self, order, out):
1317
+ if out is not None:
1318
+ if order is not None:
1319
+ raise ValueError('order cannot be specified if out '
1320
+ 'is not None')
1321
+ if out.shape != self.shape or out.dtype != self.dtype:
1322
+ raise ValueError('out array must be same dtype and shape as '
1323
+ 'sparse array')
1324
+ out[...] = 0.
1325
+ return out
1326
+ else:
1327
+ return np.zeros(self.shape, dtype=self.dtype, order=order)
1328
+
1329
+ def _get_index_dtype(self, arrays=(), maxval=None, check_contents=False):
1330
+ """
1331
+ Determine index dtype for array.
1332
+
1333
+ This wraps _sputils.get_index_dtype, providing compatibility for both
1334
+ array and matrix API sparse matrices. Matrix API sparse matrices would
1335
+ attempt to downcast the indices - which can be computationally
1336
+ expensive and undesirable for users. The array API changes this
1337
+ behaviour.
1338
+
1339
+ See discussion: https://github.com/scipy/scipy/issues/16774
1340
+
1341
+ The get_index_dtype import is due to implementation details of the test
1342
+ suite. It allows the decorator ``with_64bit_maxval_limit`` to mock a
1343
+ lower int32 max value for checks on the matrix API's downcasting
1344
+ behaviour.
1345
+ """
1346
+ from ._sputils import get_index_dtype
1347
+
1348
+ # Don't check contents for array API
1349
+ return get_index_dtype(arrays,
1350
+ maxval,
1351
+ (check_contents and not isinstance(self, sparray)))
1352
+
1353
+
1354
+ ## All methods below are deprecated and should be removed in
1355
+ ## scipy 1.14.0
1356
+ ##
1357
+ ## Also uncomment the definition of shape above.
1358
+
1359
+ def get_shape(self):
1360
+ """Get shape of a sparse array/matrix.
1361
+
1362
+ .. deprecated:: 1.11.0
1363
+ This method will be removed in SciPy 1.14.0.
1364
+ Use `X.shape` instead.
1365
+ """
1366
+ msg = (
1367
+ "`get_shape` is deprecated and will be removed in v1.14.0; "
1368
+ "use `X.shape` instead."
1369
+ )
1370
+ warn(msg, DeprecationWarning, stacklevel=2)
1371
+
1372
+ return self._shape
1373
+
1374
+ def set_shape(self, shape):
1375
+ """See `reshape`.
1376
+
1377
+ .. deprecated:: 1.11.0
1378
+ This method will be removed in SciPy 1.14.0.
1379
+ Use `X.reshape` instead.
1380
+ """
1381
+ msg = (
1382
+ "Shape assignment is deprecated and will be removed in v1.14.0; "
1383
+ "use `reshape` instead."
1384
+ )
1385
+ warn(msg, DeprecationWarning, stacklevel=2)
1386
+
1387
+ # Make sure copy is False since this is in place
1388
+ # Make sure format is unchanged because we are doing a __dict__ swap
1389
+ new_self = self.reshape(shape, copy=False).asformat(self.format)
1390
+ self.__dict__ = new_self.__dict__
1391
+
1392
+ shape = property(
1393
+ fget=lambda self: self._shape,
1394
+ fset=set_shape,
1395
+ doc="""The shape of the array.
1396
+
1397
+ Note that, starting in SciPy 1.14.0, this property will no longer be
1398
+ settable. To change the array shape, use `X.reshape` instead.
1399
+ """
1400
+ )
1401
+
1402
+ def asfptype(self):
1403
+ """Upcast array/matrix to a floating point format (if necessary)
1404
+
1405
+ .. deprecated:: 1.11.0
1406
+ This method is for internal use only, and will be removed from the
1407
+ public API in SciPy 1.14.0.
1408
+ """
1409
+ msg = (
1410
+ "`asfptype` is an internal function, and is deprecated "
1411
+ "as part of the public API. It will be removed in v1.14.0."
1412
+ )
1413
+ warn(msg, DeprecationWarning, stacklevel=2)
1414
+ return self._asfptype()
1415
+
1416
+ def getmaxprint(self):
1417
+ """Maximum number of elements to display when printed.
1418
+
1419
+ .. deprecated:: 1.11.0
1420
+ This method is for internal use only, and will be removed from the
1421
+ public API in SciPy 1.14.0.
1422
+ """
1423
+ msg = (
1424
+ "`getmaxprint` is an internal function, and is deprecated "
1425
+ "as part of the public API. It will be removed in v1.14.0."
1426
+ )
1427
+ warn(msg, DeprecationWarning, stacklevel=2)
1428
+ return self._getmaxprint()
1429
+
1430
+ def getformat(self):
1431
+ """Sparse array/matrix storage format.
1432
+
1433
+ .. deprecated:: 1.11.0
1434
+ This method will be removed in SciPy 1.14.0.
1435
+ Use `X.format` instead.
1436
+ """
1437
+ msg = (
1438
+ "`getformat` is deprecated and will be removed in v1.14.0; "
1439
+ "use `X.format` instead."
1440
+ )
1441
+ warn(msg, DeprecationWarning, stacklevel=2)
1442
+ return self.format
1443
+
1444
+ def getnnz(self, axis=None):
1445
+ """Number of stored values, including explicit zeros.
1446
+
1447
+ Parameters
1448
+ ----------
1449
+ axis : None, 0, or 1
1450
+ Select between the number of values across the whole array/matrix, in
1451
+ each column, or in each row.
1452
+
1453
+ See also
1454
+ --------
1455
+ count_nonzero : Number of non-zero entries
1456
+ """
1457
+ return self._getnnz(axis=axis)
1458
+
1459
+ def getH(self):
1460
+ """Return the Hermitian transpose of this array/matrix.
1461
+
1462
+ .. deprecated:: 1.11.0
1463
+ This method will be removed in SciPy 1.14.0.
1464
+ Use `X.conj().T` instead.
1465
+ """
1466
+ msg = (
1467
+ "`getH` is deprecated and will be removed in v1.14.0; "
1468
+ "use `X.conj().T` instead."
1469
+ )
1470
+ warn(msg, DeprecationWarning, stacklevel=2)
1471
+ return self.conjugate().transpose()
1472
+
1473
+ def getcol(self, j):
1474
+ """Returns a copy of column j of the array/matrix, as an (m x 1) sparse
1475
+ array/matrix (column vector).
1476
+
1477
+ .. deprecated:: 1.11.0
1478
+ This method will be removed in SciPy 1.14.0.
1479
+ Use array/matrix indexing instead.
1480
+ """
1481
+ msg = (
1482
+ "`getcol` is deprecated and will be removed in v1.14.0; "
1483
+ f"use `X[:, [{j}]]` instead."
1484
+ )
1485
+ warn(msg, DeprecationWarning, stacklevel=2)
1486
+ return self._getcol(j)
1487
+
1488
+ def getrow(self, i):
1489
+ """Returns a copy of row i of the array/matrix, as a (1 x n) sparse
1490
+ array/matrix (row vector).
1491
+
1492
+ .. deprecated:: 1.11.0
1493
+ This method will be removed in SciPy 1.14.0.
1494
+ Use array/matrix indexing instead.
1495
+ """
1496
+ msg = (
1497
+ "`getrow` is deprecated and will be removed in v1.14.0; "
1498
+ f"use `X[[{i}]]` instead."
1499
+ )
1500
+ warn(msg, DeprecationWarning, stacklevel=2)
1501
+ return self._getrow(i)
1502
+
1503
+ ## End 1.14.0 deprecated methods
1504
+
1505
+
1506
+ class sparray:
1507
+ """A namespace class to separate sparray from spmatrix"""
1508
+ pass
1509
+
1510
+ sparray.__doc__ = _spbase.__doc__
1511
+
1512
+
1513
+ def issparse(x):
1514
+ """Is `x` of a sparse array or sparse matrix type?
1515
+
1516
+ Parameters
1517
+ ----------
1518
+ x
1519
+ object to check for being a sparse array or sparse matrix
1520
+
1521
+ Returns
1522
+ -------
1523
+ bool
1524
+ True if `x` is a sparse array or a sparse matrix, False otherwise
1525
+
1526
+ Examples
1527
+ --------
1528
+ >>> import numpy as np
1529
+ >>> from scipy.sparse import csr_array, csr_matrix, issparse
1530
+ >>> issparse(csr_matrix([[5]]))
1531
+ True
1532
+ >>> issparse(csr_array([[5]]))
1533
+ True
1534
+ >>> issparse(np.array([[5]]))
1535
+ False
1536
+ >>> issparse(5)
1537
+ False
1538
+ """
1539
+ return isinstance(x, _spbase)
1540
+
1541
+
1542
+ def isspmatrix(x):
1543
+ """Is `x` of a sparse matrix type?
1544
+
1545
+ Parameters
1546
+ ----------
1547
+ x
1548
+ object to check for being a sparse matrix
1549
+
1550
+ Returns
1551
+ -------
1552
+ bool
1553
+ True if `x` is a sparse matrix, False otherwise
1554
+
1555
+ Examples
1556
+ --------
1557
+ >>> import numpy as np
1558
+ >>> from scipy.sparse import csr_array, csr_matrix, isspmatrix
1559
+ >>> isspmatrix(csr_matrix([[5]]))
1560
+ True
1561
+ >>> isspmatrix(csr_array([[5]]))
1562
+ False
1563
+ >>> isspmatrix(np.array([[5]]))
1564
+ False
1565
+ >>> isspmatrix(5)
1566
+ False
1567
+ """
1568
+ return isinstance(x, spmatrix)
llmeval-env/lib/python3.10/site-packages/scipy/sparse/_bsr.py ADDED
@@ -0,0 +1,855 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Compressed Block Sparse Row format"""
2
+
3
+ __docformat__ = "restructuredtext en"
4
+
5
+ __all__ = ['bsr_array', 'bsr_matrix', 'isspmatrix_bsr']
6
+
7
+ from warnings import warn
8
+
9
+ import numpy as np
10
+
11
+ from scipy._lib._util import copy_if_needed
12
+ from ._matrix import spmatrix
13
+ from ._data import _data_matrix, _minmax_mixin
14
+ from ._compressed import _cs_matrix
15
+ from ._base import issparse, _formats, _spbase, sparray
16
+ from ._sputils import (isshape, getdtype, getdata, to_native, upcast,
17
+ check_shape)
18
+ from . import _sparsetools
19
+ from ._sparsetools import (bsr_matvec, bsr_matvecs, csr_matmat_maxnnz,
20
+ bsr_matmat, bsr_transpose, bsr_sort_indices,
21
+ bsr_tocsr)
22
+
23
+
24
+ class _bsr_base(_cs_matrix, _minmax_mixin):
25
+ _format = 'bsr'
26
+
27
+ def __init__(self, arg1, shape=None, dtype=None, copy=False, blocksize=None):
28
+ _data_matrix.__init__(self)
29
+
30
+ if issparse(arg1):
31
+ if arg1.format == self.format and copy:
32
+ arg1 = arg1.copy()
33
+ else:
34
+ arg1 = arg1.tobsr(blocksize=blocksize)
35
+ self.indptr, self.indices, self.data, self._shape = (
36
+ arg1.indptr, arg1.indices, arg1.data, arg1._shape
37
+ )
38
+
39
+ elif isinstance(arg1,tuple):
40
+ if isshape(arg1):
41
+ # it's a tuple of matrix dimensions (M,N)
42
+ self._shape = check_shape(arg1)
43
+ M,N = self.shape
44
+ # process blocksize
45
+ if blocksize is None:
46
+ blocksize = (1,1)
47
+ else:
48
+ if not isshape(blocksize):
49
+ raise ValueError('invalid blocksize=%s' % blocksize)
50
+ blocksize = tuple(blocksize)
51
+ self.data = np.zeros((0,) + blocksize, getdtype(dtype, default=float))
52
+
53
+ R,C = blocksize
54
+ if (M % R) != 0 or (N % C) != 0:
55
+ raise ValueError('shape must be multiple of blocksize')
56
+
57
+ # Select index dtype large enough to pass array and
58
+ # scalar parameters to sparsetools
59
+ idx_dtype = self._get_index_dtype(maxval=max(M//R, N//C, R, C))
60
+ self.indices = np.zeros(0, dtype=idx_dtype)
61
+ self.indptr = np.zeros(M//R + 1, dtype=idx_dtype)
62
+
63
+ elif len(arg1) == 2:
64
+ # (data,(row,col)) format
65
+ coo = self._coo_container(arg1, dtype=dtype, shape=shape)
66
+ bsr = coo.tobsr(blocksize=blocksize)
67
+ self.indptr, self.indices, self.data, self._shape = (
68
+ bsr.indptr, bsr.indices, bsr.data, bsr._shape
69
+ )
70
+
71
+ elif len(arg1) == 3:
72
+ # (data,indices,indptr) format
73
+ (data, indices, indptr) = arg1
74
+
75
+ # Select index dtype large enough to pass array and
76
+ # scalar parameters to sparsetools
77
+ maxval = 1
78
+ if shape is not None:
79
+ maxval = max(shape)
80
+ if blocksize is not None:
81
+ maxval = max(maxval, max(blocksize))
82
+ idx_dtype = self._get_index_dtype((indices, indptr), maxval=maxval,
83
+ check_contents=True)
84
+ if not copy:
85
+ copy = copy_if_needed
86
+ self.indices = np.array(indices, copy=copy, dtype=idx_dtype)
87
+ self.indptr = np.array(indptr, copy=copy, dtype=idx_dtype)
88
+ self.data = getdata(data, copy=copy, dtype=dtype)
89
+ if self.data.ndim != 3:
90
+ raise ValueError(
91
+ f'BSR data must be 3-dimensional, got shape={self.data.shape}'
92
+ )
93
+ if blocksize is not None:
94
+ if not isshape(blocksize):
95
+ raise ValueError(f'invalid blocksize={blocksize}')
96
+ if tuple(blocksize) != self.data.shape[1:]:
97
+ raise ValueError('mismatching blocksize={} vs {}'.format(
98
+ blocksize, self.data.shape[1:]))
99
+ else:
100
+ raise ValueError('unrecognized bsr_array constructor usage')
101
+ else:
102
+ # must be dense
103
+ try:
104
+ arg1 = np.asarray(arg1)
105
+ except Exception as e:
106
+ raise ValueError("unrecognized form for"
107
+ " %s_matrix constructor" % self.format) from e
108
+ arg1 = self._coo_container(
109
+ arg1, dtype=dtype
110
+ ).tobsr(blocksize=blocksize)
111
+ self.indptr, self.indices, self.data, self._shape = (
112
+ arg1.indptr, arg1.indices, arg1.data, arg1._shape
113
+ )
114
+
115
+ if shape is not None:
116
+ self._shape = check_shape(shape)
117
+ else:
118
+ if self.shape is None:
119
+ # shape not already set, try to infer dimensions
120
+ try:
121
+ M = len(self.indptr) - 1
122
+ N = self.indices.max() + 1
123
+ except Exception as e:
124
+ raise ValueError('unable to infer matrix dimensions') from e
125
+ else:
126
+ R,C = self.blocksize
127
+ self._shape = check_shape((M*R,N*C))
128
+
129
+ if self.shape is None:
130
+ if shape is None:
131
+ # TODO infer shape here
132
+ raise ValueError('need to infer shape')
133
+ else:
134
+ self._shape = check_shape(shape)
135
+
136
+ if dtype is not None:
137
+ self.data = self.data.astype(dtype, copy=False)
138
+
139
+ self.check_format(full_check=False)
140
+
141
+ def check_format(self, full_check=True):
142
+ """Check whether the array/matrix respects the BSR format.
143
+
144
+ Parameters
145
+ ----------
146
+ full_check : bool, optional
147
+ If `True`, run rigorous check, scanning arrays for valid values.
148
+ Note that activating those check might copy arrays for casting,
149
+ modifying indices and index pointers' inplace.
150
+ If `False`, run basic checks on attributes. O(1) operations.
151
+ Default is `True`.
152
+ """
153
+ M,N = self.shape
154
+ R,C = self.blocksize
155
+
156
+ # index arrays should have integer data types
157
+ if self.indptr.dtype.kind != 'i':
158
+ warn(f"indptr array has non-integer dtype ({self.indptr.dtype.name})",
159
+ stacklevel=2)
160
+ if self.indices.dtype.kind != 'i':
161
+ warn(f"indices array has non-integer dtype ({self.indices.dtype.name})",
162
+ stacklevel=2)
163
+
164
+ # check array shapes
165
+ if self.indices.ndim != 1 or self.indptr.ndim != 1:
166
+ raise ValueError("indices, and indptr should be 1-D")
167
+ if self.data.ndim != 3:
168
+ raise ValueError("data should be 3-D")
169
+
170
+ # check index pointer
171
+ if (len(self.indptr) != M//R + 1):
172
+ raise ValueError("index pointer size (%d) should be (%d)" %
173
+ (len(self.indptr), M//R + 1))
174
+ if (self.indptr[0] != 0):
175
+ raise ValueError("index pointer should start with 0")
176
+
177
+ # check index and data arrays
178
+ if (len(self.indices) != len(self.data)):
179
+ raise ValueError("indices and data should have the same size")
180
+ if (self.indptr[-1] > len(self.indices)):
181
+ raise ValueError("Last value of index pointer should be less than "
182
+ "the size of index and data arrays")
183
+
184
+ self.prune()
185
+
186
+ if full_check:
187
+ # check format validity (more expensive)
188
+ if self.nnz > 0:
189
+ if self.indices.max() >= N//C:
190
+ raise ValueError("column index values must be < %d (now max %d)"
191
+ % (N//C, self.indices.max()))
192
+ if self.indices.min() < 0:
193
+ raise ValueError("column index values must be >= 0")
194
+ if np.diff(self.indptr).min() < 0:
195
+ raise ValueError("index pointer values must form a "
196
+ "non-decreasing sequence")
197
+
198
+ idx_dtype = self._get_index_dtype((self.indices, self.indptr))
199
+ self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
200
+ self.indices = np.asarray(self.indices, dtype=idx_dtype)
201
+ self.data = to_native(self.data)
202
+ # if not self.has_sorted_indices():
203
+ # warn('Indices were not in sorted order. Sorting indices.')
204
+ # self.sort_indices(check_first=False)
205
+
206
+ @property
207
+ def blocksize(self) -> tuple:
208
+ """Block size of the matrix."""
209
+ return self.data.shape[1:]
210
+
211
+ def _getnnz(self, axis=None):
212
+ if axis is not None:
213
+ raise NotImplementedError("_getnnz over an axis is not implemented "
214
+ "for BSR format")
215
+ R,C = self.blocksize
216
+ return int(self.indptr[-1] * R * C)
217
+
218
+ _getnnz.__doc__ = _spbase._getnnz.__doc__
219
+
220
+ def __repr__(self):
221
+ _, fmt = _formats[self.format]
222
+ sparse_cls = 'array' if isinstance(self, sparray) else 'matrix'
223
+ shape_str = 'x'.join(str(x) for x in self.shape)
224
+ blksz = 'x'.join(str(x) for x in self.blocksize)
225
+ return (
226
+ f"<{shape_str} sparse {sparse_cls} of type '{self.dtype.type}'\n"
227
+ f"\twith {self.nnz} stored elements (blocksize = {blksz}) in {fmt} format>"
228
+ )
229
+
230
+ def diagonal(self, k=0):
231
+ rows, cols = self.shape
232
+ if k <= -rows or k >= cols:
233
+ return np.empty(0, dtype=self.data.dtype)
234
+ R, C = self.blocksize
235
+ y = np.zeros(min(rows + min(k, 0), cols - max(k, 0)),
236
+ dtype=upcast(self.dtype))
237
+ _sparsetools.bsr_diagonal(k, rows // R, cols // C, R, C,
238
+ self.indptr, self.indices,
239
+ np.ravel(self.data), y)
240
+ return y
241
+
242
+ diagonal.__doc__ = _spbase.diagonal.__doc__
243
+
244
+ ##########################
245
+ # NotImplemented methods #
246
+ ##########################
247
+
248
+ def __getitem__(self,key):
249
+ raise NotImplementedError
250
+
251
+ def __setitem__(self,key,val):
252
+ raise NotImplementedError
253
+
254
+ ######################
255
+ # Arithmetic methods #
256
+ ######################
257
+
258
+ def _add_dense(self, other):
259
+ return self.tocoo(copy=False)._add_dense(other)
260
+
261
+ def _matmul_vector(self, other):
262
+ M,N = self.shape
263
+ R,C = self.blocksize
264
+
265
+ result = np.zeros(self.shape[0], dtype=upcast(self.dtype, other.dtype))
266
+
267
+ bsr_matvec(M//R, N//C, R, C,
268
+ self.indptr, self.indices, self.data.ravel(),
269
+ other, result)
270
+
271
+ return result
272
+
273
+ def _matmul_multivector(self,other):
274
+ R,C = self.blocksize
275
+ M,N = self.shape
276
+ n_vecs = other.shape[1] # number of column vectors
277
+
278
+ result = np.zeros((M,n_vecs), dtype=upcast(self.dtype,other.dtype))
279
+
280
+ bsr_matvecs(M//R, N//C, n_vecs, R, C,
281
+ self.indptr, self.indices, self.data.ravel(),
282
+ other.ravel(), result.ravel())
283
+
284
+ return result
285
+
286
+ def _matmul_sparse(self, other):
287
+ M, K1 = self.shape
288
+ K2, N = other.shape
289
+
290
+ R,n = self.blocksize
291
+
292
+ # convert to this format
293
+ if other.format == "bsr":
294
+ C = other.blocksize[1]
295
+ else:
296
+ C = 1
297
+
298
+ if other.format == "csr" and n == 1:
299
+ other = other.tobsr(blocksize=(n,C), copy=False) # lightweight conversion
300
+ else:
301
+ other = other.tobsr(blocksize=(n,C))
302
+
303
+ idx_dtype = self._get_index_dtype((self.indptr, self.indices,
304
+ other.indptr, other.indices))
305
+
306
+ bnnz = csr_matmat_maxnnz(M//R, N//C,
307
+ self.indptr.astype(idx_dtype),
308
+ self.indices.astype(idx_dtype),
309
+ other.indptr.astype(idx_dtype),
310
+ other.indices.astype(idx_dtype))
311
+
312
+ idx_dtype = self._get_index_dtype((self.indptr, self.indices,
313
+ other.indptr, other.indices),
314
+ maxval=bnnz)
315
+ indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
316
+ indices = np.empty(bnnz, dtype=idx_dtype)
317
+ data = np.empty(R*C*bnnz, dtype=upcast(self.dtype,other.dtype))
318
+
319
+ bsr_matmat(bnnz, M//R, N//C, R, C, n,
320
+ self.indptr.astype(idx_dtype),
321
+ self.indices.astype(idx_dtype),
322
+ np.ravel(self.data),
323
+ other.indptr.astype(idx_dtype),
324
+ other.indices.astype(idx_dtype),
325
+ np.ravel(other.data),
326
+ indptr,
327
+ indices,
328
+ data)
329
+
330
+ data = data.reshape(-1,R,C)
331
+
332
+ # TODO eliminate zeros
333
+
334
+ return self._bsr_container(
335
+ (data, indices, indptr), shape=(M, N), blocksize=(R, C)
336
+ )
337
+
338
+ ######################
339
+ # Conversion methods #
340
+ ######################
341
+
342
+ def tobsr(self, blocksize=None, copy=False):
343
+ """Convert this array/matrix into Block Sparse Row Format.
344
+
345
+ With copy=False, the data/indices may be shared between this
346
+ array/matrix and the resultant bsr_array/bsr_matrix.
347
+
348
+ If blocksize=(R, C) is provided, it will be used for determining
349
+ block size of the bsr_array/bsr_matrix.
350
+ """
351
+ if blocksize not in [None, self.blocksize]:
352
+ return self.tocsr().tobsr(blocksize=blocksize)
353
+ if copy:
354
+ return self.copy()
355
+ else:
356
+ return self
357
+
358
+ def tocsr(self, copy=False):
359
+ M, N = self.shape
360
+ R, C = self.blocksize
361
+ nnz = self.nnz
362
+ idx_dtype = self._get_index_dtype((self.indptr, self.indices),
363
+ maxval=max(nnz, N))
364
+ indptr = np.empty(M + 1, dtype=idx_dtype)
365
+ indices = np.empty(nnz, dtype=idx_dtype)
366
+ data = np.empty(nnz, dtype=upcast(self.dtype))
367
+
368
+ bsr_tocsr(M // R, # n_brow
369
+ N // C, # n_bcol
370
+ R, C,
371
+ self.indptr.astype(idx_dtype, copy=False),
372
+ self.indices.astype(idx_dtype, copy=False),
373
+ self.data,
374
+ indptr,
375
+ indices,
376
+ data)
377
+ return self._csr_container((data, indices, indptr), shape=self.shape)
378
+
379
+ tocsr.__doc__ = _spbase.tocsr.__doc__
380
+
381
+ def tocsc(self, copy=False):
382
+ return self.tocsr(copy=False).tocsc(copy=copy)
383
+
384
+ tocsc.__doc__ = _spbase.tocsc.__doc__
385
+
386
+ def tocoo(self, copy=True):
387
+ """Convert this array/matrix to COOrdinate format.
388
+
389
+ When copy=False the data array will be shared between
390
+ this array/matrix and the resultant coo_array/coo_matrix.
391
+ """
392
+
393
+ M,N = self.shape
394
+ R,C = self.blocksize
395
+
396
+ indptr_diff = np.diff(self.indptr)
397
+ if indptr_diff.dtype.itemsize > np.dtype(np.intp).itemsize:
398
+ # Check for potential overflow
399
+ indptr_diff_limited = indptr_diff.astype(np.intp)
400
+ if np.any(indptr_diff_limited != indptr_diff):
401
+ raise ValueError("Matrix too big to convert")
402
+ indptr_diff = indptr_diff_limited
403
+
404
+ idx_dtype = self._get_index_dtype(maxval=max(M, N))
405
+ row = (R * np.arange(M//R, dtype=idx_dtype)).repeat(indptr_diff)
406
+ row = row.repeat(R*C).reshape(-1,R,C)
407
+ row += np.tile(np.arange(R, dtype=idx_dtype).reshape(-1,1), (1,C))
408
+ row = row.reshape(-1)
409
+
410
+ col = ((C * self.indices).astype(idx_dtype, copy=False)
411
+ .repeat(R*C).reshape(-1,R,C))
412
+ col += np.tile(np.arange(C, dtype=idx_dtype), (R,1))
413
+ col = col.reshape(-1)
414
+
415
+ data = self.data.reshape(-1)
416
+
417
+ if copy:
418
+ data = data.copy()
419
+
420
+ return self._coo_container(
421
+ (data, (row, col)), shape=self.shape
422
+ )
423
+
424
+ def toarray(self, order=None, out=None):
425
+ return self.tocoo(copy=False).toarray(order=order, out=out)
426
+
427
+ toarray.__doc__ = _spbase.toarray.__doc__
428
+
429
+ def transpose(self, axes=None, copy=False):
430
+ if axes is not None and axes != (1, 0):
431
+ raise ValueError("Sparse matrices do not support "
432
+ "an 'axes' parameter because swapping "
433
+ "dimensions is the only logical permutation.")
434
+
435
+ R, C = self.blocksize
436
+ M, N = self.shape
437
+ NBLK = self.nnz//(R*C)
438
+
439
+ if self.nnz == 0:
440
+ return self._bsr_container((N, M), blocksize=(C, R),
441
+ dtype=self.dtype, copy=copy)
442
+
443
+ indptr = np.empty(N//C + 1, dtype=self.indptr.dtype)
444
+ indices = np.empty(NBLK, dtype=self.indices.dtype)
445
+ data = np.empty((NBLK, C, R), dtype=self.data.dtype)
446
+
447
+ bsr_transpose(M//R, N//C, R, C,
448
+ self.indptr, self.indices, self.data.ravel(),
449
+ indptr, indices, data.ravel())
450
+
451
+ return self._bsr_container((data, indices, indptr),
452
+ shape=(N, M), copy=copy)
453
+
454
+ transpose.__doc__ = _spbase.transpose.__doc__
455
+
456
+ ##############################################################
457
+ # methods that examine or modify the internal data structure #
458
+ ##############################################################
459
+
460
+ def eliminate_zeros(self):
461
+ """Remove zero elements in-place."""
462
+
463
+ if not self.nnz:
464
+ return # nothing to do
465
+
466
+ R,C = self.blocksize
467
+ M,N = self.shape
468
+
469
+ mask = (self.data != 0).reshape(-1,R*C).sum(axis=1) # nonzero blocks
470
+
471
+ nonzero_blocks = mask.nonzero()[0]
472
+
473
+ self.data[:len(nonzero_blocks)] = self.data[nonzero_blocks]
474
+
475
+ # modifies self.indptr and self.indices *in place*
476
+ _sparsetools.csr_eliminate_zeros(M//R, N//C, self.indptr,
477
+ self.indices, mask)
478
+ self.prune()
479
+
480
+ def sum_duplicates(self):
481
+ """Eliminate duplicate array/matrix entries by adding them together
482
+
483
+ The is an *in place* operation
484
+ """
485
+ if self.has_canonical_format:
486
+ return
487
+ self.sort_indices()
488
+ R, C = self.blocksize
489
+ M, N = self.shape
490
+
491
+ # port of _sparsetools.csr_sum_duplicates
492
+ n_row = M // R
493
+ nnz = 0
494
+ row_end = 0
495
+ for i in range(n_row):
496
+ jj = row_end
497
+ row_end = self.indptr[i+1]
498
+ while jj < row_end:
499
+ j = self.indices[jj]
500
+ x = self.data[jj]
501
+ jj += 1
502
+ while jj < row_end and self.indices[jj] == j:
503
+ x += self.data[jj]
504
+ jj += 1
505
+ self.indices[nnz] = j
506
+ self.data[nnz] = x
507
+ nnz += 1
508
+ self.indptr[i+1] = nnz
509
+
510
+ self.prune() # nnz may have changed
511
+ self.has_canonical_format = True
512
+
513
+ def sort_indices(self):
514
+ """Sort the indices of this array/matrix *in place*
515
+ """
516
+ if self.has_sorted_indices:
517
+ return
518
+
519
+ R,C = self.blocksize
520
+ M,N = self.shape
521
+
522
+ bsr_sort_indices(M//R, N//C, R, C, self.indptr, self.indices, self.data.ravel())
523
+
524
+ self.has_sorted_indices = True
525
+
526
+ def prune(self):
527
+ """Remove empty space after all non-zero elements.
528
+ """
529
+
530
+ R,C = self.blocksize
531
+ M,N = self.shape
532
+
533
+ if len(self.indptr) != M//R + 1:
534
+ raise ValueError("index pointer has invalid length")
535
+
536
+ bnnz = self.indptr[-1]
537
+
538
+ if len(self.indices) < bnnz:
539
+ raise ValueError("indices array has too few elements")
540
+ if len(self.data) < bnnz:
541
+ raise ValueError("data array has too few elements")
542
+
543
+ self.data = self.data[:bnnz]
544
+ self.indices = self.indices[:bnnz]
545
+
546
+ # utility functions
547
+ def _binopt(self, other, op, in_shape=None, out_shape=None):
548
+ """Apply the binary operation fn to two sparse matrices."""
549
+
550
+ # Ideally we'd take the GCDs of the blocksize dimensions
551
+ # and explode self and other to match.
552
+ other = self.__class__(other, blocksize=self.blocksize)
553
+
554
+ # e.g. bsr_plus_bsr, etc.
555
+ fn = getattr(_sparsetools, self.format + op + self.format)
556
+
557
+ R,C = self.blocksize
558
+
559
+ max_bnnz = len(self.data) + len(other.data)
560
+ idx_dtype = self._get_index_dtype((self.indptr, self.indices,
561
+ other.indptr, other.indices),
562
+ maxval=max_bnnz)
563
+ indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
564
+ indices = np.empty(max_bnnz, dtype=idx_dtype)
565
+
566
+ bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_']
567
+ if op in bool_ops:
568
+ data = np.empty(R*C*max_bnnz, dtype=np.bool_)
569
+ else:
570
+ data = np.empty(R*C*max_bnnz, dtype=upcast(self.dtype,other.dtype))
571
+
572
+ fn(self.shape[0]//R, self.shape[1]//C, R, C,
573
+ self.indptr.astype(idx_dtype),
574
+ self.indices.astype(idx_dtype),
575
+ self.data,
576
+ other.indptr.astype(idx_dtype),
577
+ other.indices.astype(idx_dtype),
578
+ np.ravel(other.data),
579
+ indptr,
580
+ indices,
581
+ data)
582
+
583
+ actual_bnnz = indptr[-1]
584
+ indices = indices[:actual_bnnz]
585
+ data = data[:R*C*actual_bnnz]
586
+
587
+ if actual_bnnz < max_bnnz/2:
588
+ indices = indices.copy()
589
+ data = data.copy()
590
+
591
+ data = data.reshape(-1,R,C)
592
+
593
+ return self.__class__((data, indices, indptr), shape=self.shape)
594
+
595
+ # needed by _data_matrix
596
+ def _with_data(self,data,copy=True):
597
+ """Returns a matrix with the same sparsity structure as self,
598
+ but with different data. By default the structure arrays
599
+ (i.e. .indptr and .indices) are copied.
600
+ """
601
+ if copy:
602
+ return self.__class__((data,self.indices.copy(),self.indptr.copy()),
603
+ shape=self.shape,dtype=data.dtype)
604
+ else:
605
+ return self.__class__((data,self.indices,self.indptr),
606
+ shape=self.shape,dtype=data.dtype)
607
+
608
+ # # these functions are used by the parent class
609
+ # # to remove redundancy between bsc_matrix and bsr_matrix
610
+ # def _swap(self,x):
611
+ # """swap the members of x if this is a column-oriented matrix
612
+ # """
613
+ # return (x[0],x[1])
614
+
615
+
616
+ def isspmatrix_bsr(x):
617
+ """Is `x` of a bsr_matrix type?
618
+
619
+ Parameters
620
+ ----------
621
+ x
622
+ object to check for being a bsr matrix
623
+
624
+ Returns
625
+ -------
626
+ bool
627
+ True if `x` is a bsr matrix, False otherwise
628
+
629
+ Examples
630
+ --------
631
+ >>> from scipy.sparse import bsr_array, bsr_matrix, csr_matrix, isspmatrix_bsr
632
+ >>> isspmatrix_bsr(bsr_matrix([[5]]))
633
+ True
634
+ >>> isspmatrix_bsr(bsr_array([[5]]))
635
+ False
636
+ >>> isspmatrix_bsr(csr_matrix([[5]]))
637
+ False
638
+ """
639
+ return isinstance(x, bsr_matrix)
640
+
641
+
642
+ # This namespace class separates array from matrix with isinstance
643
+ class bsr_array(_bsr_base, sparray):
644
+ """
645
+ Block Sparse Row format sparse array.
646
+
647
+ This can be instantiated in several ways:
648
+ bsr_array(D, [blocksize=(R,C)])
649
+ where D is a 2-D ndarray.
650
+
651
+ bsr_array(S, [blocksize=(R,C)])
652
+ with another sparse array or matrix S (equivalent to S.tobsr())
653
+
654
+ bsr_array((M, N), [blocksize=(R,C), dtype])
655
+ to construct an empty sparse array with shape (M, N)
656
+ dtype is optional, defaulting to dtype='d'.
657
+
658
+ bsr_array((data, ij), [blocksize=(R,C), shape=(M, N)])
659
+ where ``data`` and ``ij`` satisfy ``a[ij[0, k], ij[1, k]] = data[k]``
660
+
661
+ bsr_array((data, indices, indptr), [shape=(M, N)])
662
+ is the standard BSR representation where the block column
663
+ indices for row i are stored in ``indices[indptr[i]:indptr[i+1]]``
664
+ and their corresponding block values are stored in
665
+ ``data[ indptr[i]: indptr[i+1] ]``. If the shape parameter is not
666
+ supplied, the array dimensions are inferred from the index arrays.
667
+
668
+ Attributes
669
+ ----------
670
+ dtype : dtype
671
+ Data type of the array
672
+ shape : 2-tuple
673
+ Shape of the array
674
+ ndim : int
675
+ Number of dimensions (this is always 2)
676
+ nnz
677
+ size
678
+ data
679
+ BSR format data array of the array
680
+ indices
681
+ BSR format index array of the array
682
+ indptr
683
+ BSR format index pointer array of the array
684
+ blocksize
685
+ Block size
686
+ has_sorted_indices : bool
687
+ Whether indices are sorted
688
+ has_canonical_format : bool
689
+ T
690
+
691
+ Notes
692
+ -----
693
+ Sparse arrays can be used in arithmetic operations: they support
694
+ addition, subtraction, multiplication, division, and matrix power.
695
+
696
+ **Summary of BSR format**
697
+
698
+ The Block Sparse Row (BSR) format is very similar to the Compressed
699
+ Sparse Row (CSR) format. BSR is appropriate for sparse matrices with dense
700
+ sub matrices like the last example below. Such sparse block matrices often
701
+ arise in vector-valued finite element discretizations. In such cases, BSR is
702
+ considerably more efficient than CSR and CSC for many sparse arithmetic
703
+ operations.
704
+
705
+ **Blocksize**
706
+
707
+ The blocksize (R,C) must evenly divide the shape of the sparse array (M,N).
708
+ That is, R and C must satisfy the relationship ``M % R = 0`` and
709
+ ``N % C = 0``.
710
+
711
+ If no blocksize is specified, a simple heuristic is applied to determine
712
+ an appropriate blocksize.
713
+
714
+ **Canonical Format**
715
+
716
+ In canonical format, there are no duplicate blocks and indices are sorted
717
+ per row.
718
+
719
+ Examples
720
+ --------
721
+ >>> import numpy as np
722
+ >>> from scipy.sparse import bsr_array
723
+ >>> bsr_array((3, 4), dtype=np.int8).toarray()
724
+ array([[0, 0, 0, 0],
725
+ [0, 0, 0, 0],
726
+ [0, 0, 0, 0]], dtype=int8)
727
+
728
+ >>> row = np.array([0, 0, 1, 2, 2, 2])
729
+ >>> col = np.array([0, 2, 2, 0, 1, 2])
730
+ >>> data = np.array([1, 2, 3 ,4, 5, 6])
731
+ >>> bsr_array((data, (row, col)), shape=(3, 3)).toarray()
732
+ array([[1, 0, 2],
733
+ [0, 0, 3],
734
+ [4, 5, 6]])
735
+
736
+ >>> indptr = np.array([0, 2, 3, 6])
737
+ >>> indices = np.array([0, 2, 2, 0, 1, 2])
738
+ >>> data = np.array([1, 2, 3, 4, 5, 6]).repeat(4).reshape(6, 2, 2)
739
+ >>> bsr_array((data,indices,indptr), shape=(6, 6)).toarray()
740
+ array([[1, 1, 0, 0, 2, 2],
741
+ [1, 1, 0, 0, 2, 2],
742
+ [0, 0, 0, 0, 3, 3],
743
+ [0, 0, 0, 0, 3, 3],
744
+ [4, 4, 5, 5, 6, 6],
745
+ [4, 4, 5, 5, 6, 6]])
746
+
747
+ """
748
+
749
+
750
+ class bsr_matrix(spmatrix, _bsr_base):
751
+ """
752
+ Block Sparse Row format sparse matrix.
753
+
754
+ This can be instantiated in several ways:
755
+ bsr_matrix(D, [blocksize=(R,C)])
756
+ where D is a 2-D ndarray.
757
+
758
+ bsr_matrix(S, [blocksize=(R,C)])
759
+ with another sparse array or matrix S (equivalent to S.tobsr())
760
+
761
+ bsr_matrix((M, N), [blocksize=(R,C), dtype])
762
+ to construct an empty sparse matrix with shape (M, N)
763
+ dtype is optional, defaulting to dtype='d'.
764
+
765
+ bsr_matrix((data, ij), [blocksize=(R,C), shape=(M, N)])
766
+ where ``data`` and ``ij`` satisfy ``a[ij[0, k], ij[1, k]] = data[k]``
767
+
768
+ bsr_matrix((data, indices, indptr), [shape=(M, N)])
769
+ is the standard BSR representation where the block column
770
+ indices for row i are stored in ``indices[indptr[i]:indptr[i+1]]``
771
+ and their corresponding block values are stored in
772
+ ``data[ indptr[i]: indptr[i+1] ]``. If the shape parameter is not
773
+ supplied, the matrix dimensions are inferred from the index arrays.
774
+
775
+ Attributes
776
+ ----------
777
+ dtype : dtype
778
+ Data type of the matrix
779
+ shape : 2-tuple
780
+ Shape of the matrix
781
+ ndim : int
782
+ Number of dimensions (this is always 2)
783
+ nnz
784
+ size
785
+ data
786
+ BSR format data array of the matrix
787
+ indices
788
+ BSR format index array of the matrix
789
+ indptr
790
+ BSR format index pointer array of the matrix
791
+ blocksize
792
+ Block size
793
+ has_sorted_indices : bool
794
+ Whether indices are sorted
795
+ has_canonical_format : bool
796
+ T
797
+
798
+ Notes
799
+ -----
800
+ Sparse matrices can be used in arithmetic operations: they support
801
+ addition, subtraction, multiplication, division, and matrix power.
802
+
803
+ **Summary of BSR format**
804
+
805
+ The Block Sparse Row (BSR) format is very similar to the Compressed
806
+ Sparse Row (CSR) format. BSR is appropriate for sparse matrices with dense
807
+ sub matrices like the last example below. Such sparse block matrices often
808
+ arise in vector-valued finite element discretizations. In such cases, BSR is
809
+ considerably more efficient than CSR and CSC for many sparse arithmetic
810
+ operations.
811
+
812
+ **Blocksize**
813
+
814
+ The blocksize (R,C) must evenly divide the shape of the sparse matrix (M,N).
815
+ That is, R and C must satisfy the relationship ``M % R = 0`` and
816
+ ``N % C = 0``.
817
+
818
+ If no blocksize is specified, a simple heuristic is applied to determine
819
+ an appropriate blocksize.
820
+
821
+ **Canonical Format**
822
+
823
+ In canonical format, there are no duplicate blocks and indices are sorted
824
+ per row.
825
+
826
+ Examples
827
+ --------
828
+ >>> import numpy as np
829
+ >>> from scipy.sparse import bsr_matrix
830
+ >>> bsr_matrix((3, 4), dtype=np.int8).toarray()
831
+ array([[0, 0, 0, 0],
832
+ [0, 0, 0, 0],
833
+ [0, 0, 0, 0]], dtype=int8)
834
+
835
+ >>> row = np.array([0, 0, 1, 2, 2, 2])
836
+ >>> col = np.array([0, 2, 2, 0, 1, 2])
837
+ >>> data = np.array([1, 2, 3 ,4, 5, 6])
838
+ >>> bsr_matrix((data, (row, col)), shape=(3, 3)).toarray()
839
+ array([[1, 0, 2],
840
+ [0, 0, 3],
841
+ [4, 5, 6]])
842
+
843
+ >>> indptr = np.array([0, 2, 3, 6])
844
+ >>> indices = np.array([0, 2, 2, 0, 1, 2])
845
+ >>> data = np.array([1, 2, 3, 4, 5, 6]).repeat(4).reshape(6, 2, 2)
846
+ >>> bsr_matrix((data,indices,indptr), shape=(6, 6)).toarray()
847
+ array([[1, 1, 0, 0, 2, 2],
848
+ [1, 1, 0, 0, 2, 2],
849
+ [0, 0, 0, 0, 3, 3],
850
+ [0, 0, 0, 0, 3, 3],
851
+ [4, 4, 5, 5, 6, 6],
852
+ [4, 4, 5, 5, 6, 6]])
853
+
854
+ """
855
+
llmeval-env/lib/python3.10/site-packages/scipy/sparse/_compressed.py ADDED
@@ -0,0 +1,1367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Base class for sparse matrix formats using compressed storage."""
2
+ __all__ = []
3
+
4
+ from warnings import warn
5
+ import operator
6
+
7
+ import numpy as np
8
+ from scipy._lib._util import _prune_array, copy_if_needed
9
+
10
+ from ._base import _spbase, issparse, SparseEfficiencyWarning
11
+ from ._data import _data_matrix, _minmax_mixin
12
+ from . import _sparsetools
13
+ from ._sparsetools import (get_csr_submatrix, csr_sample_offsets, csr_todense,
14
+ csr_sample_values, csr_row_index, csr_row_slice,
15
+ csr_column_index1, csr_column_index2)
16
+ from ._index import IndexMixin
17
+ from ._sputils import (upcast, upcast_char, to_native, isdense, isshape,
18
+ getdtype, isscalarlike, isintlike, downcast_intp_index,
19
+ get_sum_dtype, check_shape, is_pydata_spmatrix)
20
+
21
+
22
+ class _cs_matrix(_data_matrix, _minmax_mixin, IndexMixin):
23
+ """
24
+ base array/matrix class for compressed row- and column-oriented arrays/matrices
25
+ """
26
+
27
+ def __init__(self, arg1, shape=None, dtype=None, copy=False):
28
+ _data_matrix.__init__(self)
29
+
30
+ if issparse(arg1):
31
+ if arg1.format == self.format and copy:
32
+ arg1 = arg1.copy()
33
+ else:
34
+ arg1 = arg1.asformat(self.format)
35
+ self.indptr, self.indices, self.data, self._shape = (
36
+ arg1.indptr, arg1.indices, arg1.data, arg1._shape
37
+ )
38
+
39
+ elif isinstance(arg1, tuple):
40
+ if isshape(arg1):
41
+ # It's a tuple of matrix dimensions (M, N)
42
+ # create empty matrix
43
+ self._shape = check_shape(arg1)
44
+ M, N = self.shape
45
+ # Select index dtype large enough to pass array and
46
+ # scalar parameters to sparsetools
47
+ idx_dtype = self._get_index_dtype(maxval=max(M, N))
48
+ self.data = np.zeros(0, getdtype(dtype, default=float))
49
+ self.indices = np.zeros(0, idx_dtype)
50
+ self.indptr = np.zeros(self._swap((M, N))[0] + 1,
51
+ dtype=idx_dtype)
52
+ else:
53
+ if len(arg1) == 2:
54
+ # (data, ij) format
55
+ coo = self._coo_container(arg1, shape=shape, dtype=dtype)
56
+ arrays = coo._coo_to_compressed(self._swap)
57
+ self.indptr, self.indices, self.data, self._shape = arrays
58
+ elif len(arg1) == 3:
59
+ # (data, indices, indptr) format
60
+ (data, indices, indptr) = arg1
61
+
62
+ # Select index dtype large enough to pass array and
63
+ # scalar parameters to sparsetools
64
+ maxval = None
65
+ if shape is not None:
66
+ maxval = max(shape)
67
+ idx_dtype = self._get_index_dtype((indices, indptr),
68
+ maxval=maxval,
69
+ check_contents=True)
70
+
71
+ if not copy:
72
+ copy = copy_if_needed
73
+ self.indices = np.array(indices, copy=copy, dtype=idx_dtype)
74
+ self.indptr = np.array(indptr, copy=copy, dtype=idx_dtype)
75
+ self.data = np.array(data, copy=copy, dtype=dtype)
76
+ else:
77
+ raise ValueError(f"unrecognized {self.format}_matrix "
78
+ "constructor usage")
79
+
80
+ else:
81
+ # must be dense
82
+ try:
83
+ arg1 = np.asarray(arg1)
84
+ except Exception as e:
85
+ msg = f"unrecognized {self.format}_matrix constructor usage"
86
+ raise ValueError(msg) from e
87
+ coo = self._coo_container(arg1, dtype=dtype)
88
+ arrays = coo._coo_to_compressed(self._swap)
89
+ self.indptr, self.indices, self.data, self._shape = arrays
90
+
91
+ # Read matrix dimensions given, if any
92
+ if shape is not None:
93
+ self._shape = check_shape(shape)
94
+ else:
95
+ if self.shape is None:
96
+ # shape not already set, try to infer dimensions
97
+ try:
98
+ major_dim = len(self.indptr) - 1
99
+ minor_dim = self.indices.max() + 1
100
+ except Exception as e:
101
+ raise ValueError('unable to infer matrix dimensions') from e
102
+ else:
103
+ self._shape = check_shape(self._swap((major_dim, minor_dim)))
104
+
105
+ if dtype is not None:
106
+ self.data = self.data.astype(dtype, copy=False)
107
+
108
+ self.check_format(full_check=False)
109
+
110
+ def _getnnz(self, axis=None):
111
+ if axis is None:
112
+ return int(self.indptr[-1])
113
+ else:
114
+ if axis < 0:
115
+ axis += 2
116
+ axis, _ = self._swap((axis, 1 - axis))
117
+ _, N = self._swap(self.shape)
118
+ if axis == 0:
119
+ return np.bincount(downcast_intp_index(self.indices),
120
+ minlength=N)
121
+ elif axis == 1:
122
+ return np.diff(self.indptr)
123
+ raise ValueError('axis out of bounds')
124
+
125
+ _getnnz.__doc__ = _spbase._getnnz.__doc__
126
+
127
+ def check_format(self, full_check=True):
128
+ """Check whether the array/matrix respects the CSR or CSC format.
129
+
130
+ Parameters
131
+ ----------
132
+ full_check : bool, optional
133
+ If `True`, run rigorous check, scanning arrays for valid values.
134
+ Note that activating those check might copy arrays for casting,
135
+ modifying indices and index pointers' inplace.
136
+ If `False`, run basic checks on attributes. O(1) operations.
137
+ Default is `True`.
138
+ """
139
+ # use _swap to determine proper bounds
140
+ major_name, minor_name = self._swap(('row', 'column'))
141
+ major_dim, minor_dim = self._swap(self.shape)
142
+
143
+ # index arrays should have integer data types
144
+ if self.indptr.dtype.kind != 'i':
145
+ warn(f"indptr array has non-integer dtype ({self.indptr.dtype.name})",
146
+ stacklevel=3)
147
+ if self.indices.dtype.kind != 'i':
148
+ warn(f"indices array has non-integer dtype ({self.indices.dtype.name})",
149
+ stacklevel=3)
150
+
151
+ # check array shapes
152
+ for x in [self.data.ndim, self.indices.ndim, self.indptr.ndim]:
153
+ if x != 1:
154
+ raise ValueError('data, indices, and indptr should be 1-D')
155
+
156
+ # check index pointer
157
+ if (len(self.indptr) != major_dim + 1):
158
+ raise ValueError("index pointer size ({}) should be ({})"
159
+ "".format(len(self.indptr), major_dim + 1))
160
+ if (self.indptr[0] != 0):
161
+ raise ValueError("index pointer should start with 0")
162
+
163
+ # check index and data arrays
164
+ if (len(self.indices) != len(self.data)):
165
+ raise ValueError("indices and data should have the same size")
166
+ if (self.indptr[-1] > len(self.indices)):
167
+ raise ValueError("Last value of index pointer should be less than "
168
+ "the size of index and data arrays")
169
+
170
+ self.prune()
171
+
172
+ if full_check:
173
+ # check format validity (more expensive)
174
+ if self.nnz > 0:
175
+ if self.indices.max() >= minor_dim:
176
+ raise ValueError(f"{minor_name} index values must be < {minor_dim}")
177
+ if self.indices.min() < 0:
178
+ raise ValueError(f"{minor_name} index values must be >= 0")
179
+ if np.diff(self.indptr).min() < 0:
180
+ raise ValueError("index pointer values must form a "
181
+ "non-decreasing sequence")
182
+
183
+ idx_dtype = self._get_index_dtype((self.indptr, self.indices))
184
+ self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
185
+ self.indices = np.asarray(self.indices, dtype=idx_dtype)
186
+ self.data = to_native(self.data)
187
+
188
+ # if not self.has_sorted_indices():
189
+ # warn('Indices were not in sorted order. Sorting indices.')
190
+ # self.sort_indices()
191
+ # assert(self.has_sorted_indices())
192
+ # TODO check for duplicates?
193
+
194
+ #######################
195
+ # Boolean comparisons #
196
+ #######################
197
+
198
+ def _scalar_binopt(self, other, op):
199
+ """Scalar version of self._binopt, for cases in which no new nonzeros
200
+ are added. Produces a new sparse array in canonical form.
201
+ """
202
+ self.sum_duplicates()
203
+ res = self._with_data(op(self.data, other), copy=True)
204
+ res.eliminate_zeros()
205
+ return res
206
+
207
+ def __eq__(self, other):
208
+ # Scalar other.
209
+ if isscalarlike(other):
210
+ if np.isnan(other):
211
+ return self.__class__(self.shape, dtype=np.bool_)
212
+
213
+ if other == 0:
214
+ warn("Comparing a sparse matrix with 0 using == is inefficient"
215
+ ", try using != instead.", SparseEfficiencyWarning,
216
+ stacklevel=3)
217
+ all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
218
+ inv = self._scalar_binopt(other, operator.ne)
219
+ return all_true - inv
220
+ else:
221
+ return self._scalar_binopt(other, operator.eq)
222
+ # Dense other.
223
+ elif isdense(other):
224
+ return self.todense() == other
225
+ # Pydata sparse other.
226
+ elif is_pydata_spmatrix(other):
227
+ return NotImplemented
228
+ # Sparse other.
229
+ elif issparse(other):
230
+ warn("Comparing sparse matrices using == is inefficient, try using"
231
+ " != instead.", SparseEfficiencyWarning, stacklevel=3)
232
+ # TODO sparse broadcasting
233
+ if self.shape != other.shape:
234
+ return False
235
+ elif self.format != other.format:
236
+ other = other.asformat(self.format)
237
+ res = self._binopt(other, '_ne_')
238
+ all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
239
+ return all_true - res
240
+ else:
241
+ return NotImplemented
242
+
243
+ def __ne__(self, other):
244
+ # Scalar other.
245
+ if isscalarlike(other):
246
+ if np.isnan(other):
247
+ warn("Comparing a sparse matrix with nan using != is"
248
+ " inefficient", SparseEfficiencyWarning, stacklevel=3)
249
+ all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
250
+ return all_true
251
+ elif other != 0:
252
+ warn("Comparing a sparse matrix with a nonzero scalar using !="
253
+ " is inefficient, try using == instead.",
254
+ SparseEfficiencyWarning, stacklevel=3)
255
+ all_true = self.__class__(np.ones(self.shape), dtype=np.bool_)
256
+ inv = self._scalar_binopt(other, operator.eq)
257
+ return all_true - inv
258
+ else:
259
+ return self._scalar_binopt(other, operator.ne)
260
+ # Dense other.
261
+ elif isdense(other):
262
+ return self.todense() != other
263
+ # Pydata sparse other.
264
+ elif is_pydata_spmatrix(other):
265
+ return NotImplemented
266
+ # Sparse other.
267
+ elif issparse(other):
268
+ # TODO sparse broadcasting
269
+ if self.shape != other.shape:
270
+ return True
271
+ elif self.format != other.format:
272
+ other = other.asformat(self.format)
273
+ return self._binopt(other, '_ne_')
274
+ else:
275
+ return NotImplemented
276
+
277
+ def _inequality(self, other, op, op_name, bad_scalar_msg):
278
+ # Scalar other.
279
+ if isscalarlike(other):
280
+ if 0 == other and op_name in ('_le_', '_ge_'):
281
+ raise NotImplementedError(" >= and <= don't work with 0.")
282
+ elif op(0, other):
283
+ warn(bad_scalar_msg, SparseEfficiencyWarning, stacklevel=3)
284
+ other_arr = np.empty(self.shape, dtype=np.result_type(other))
285
+ other_arr.fill(other)
286
+ other_arr = self.__class__(other_arr)
287
+ return self._binopt(other_arr, op_name)
288
+ else:
289
+ return self._scalar_binopt(other, op)
290
+ # Dense other.
291
+ elif isdense(other):
292
+ return op(self.todense(), other)
293
+ # Sparse other.
294
+ elif issparse(other):
295
+ # TODO sparse broadcasting
296
+ if self.shape != other.shape:
297
+ raise ValueError("inconsistent shapes")
298
+ elif self.format != other.format:
299
+ other = other.asformat(self.format)
300
+ if op_name not in ('_ge_', '_le_'):
301
+ return self._binopt(other, op_name)
302
+
303
+ warn("Comparing sparse matrices using >= and <= is inefficient, "
304
+ "using <, >, or !=, instead.",
305
+ SparseEfficiencyWarning, stacklevel=3)
306
+ all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
307
+ res = self._binopt(other, '_gt_' if op_name == '_le_' else '_lt_')
308
+ return all_true - res
309
+ else:
310
+ return NotImplemented
311
+
312
+ def __lt__(self, other):
313
+ return self._inequality(other, operator.lt, '_lt_',
314
+ "Comparing a sparse matrix with a scalar "
315
+ "greater than zero using < is inefficient, "
316
+ "try using >= instead.")
317
+
318
+ def __gt__(self, other):
319
+ return self._inequality(other, operator.gt, '_gt_',
320
+ "Comparing a sparse matrix with a scalar "
321
+ "less than zero using > is inefficient, "
322
+ "try using <= instead.")
323
+
324
+ def __le__(self, other):
325
+ return self._inequality(other, operator.le, '_le_',
326
+ "Comparing a sparse matrix with a scalar "
327
+ "greater than zero using <= is inefficient, "
328
+ "try using > instead.")
329
+
330
+ def __ge__(self, other):
331
+ return self._inequality(other, operator.ge, '_ge_',
332
+ "Comparing a sparse matrix with a scalar "
333
+ "less than zero using >= is inefficient, "
334
+ "try using < instead.")
335
+
336
+ #################################
337
+ # Arithmetic operator overrides #
338
+ #################################
339
+
340
+ def _add_dense(self, other):
341
+ if other.shape != self.shape:
342
+ raise ValueError(f'Incompatible shapes ({self.shape} and {other.shape})')
343
+ dtype = upcast_char(self.dtype.char, other.dtype.char)
344
+ order = self._swap('CF')[0]
345
+ result = np.array(other, dtype=dtype, order=order, copy=True)
346
+ M, N = self._swap(self.shape)
347
+ y = result if result.flags.c_contiguous else result.T
348
+ csr_todense(M, N, self.indptr, self.indices, self.data, y)
349
+ return self._container(result, copy=False)
350
+
351
+ def _add_sparse(self, other):
352
+ return self._binopt(other, '_plus_')
353
+
354
+ def _sub_sparse(self, other):
355
+ return self._binopt(other, '_minus_')
356
+
357
+ def multiply(self, other):
358
+ """Point-wise multiplication by another array/matrix, vector, or
359
+ scalar.
360
+ """
361
+ # Scalar multiplication.
362
+ if isscalarlike(other):
363
+ return self._mul_scalar(other)
364
+ # Sparse matrix or vector.
365
+ if issparse(other):
366
+ if self.shape == other.shape:
367
+ other = self.__class__(other)
368
+ return self._binopt(other, '_elmul_')
369
+ if other.ndim == 1:
370
+ raise TypeError("broadcast from a 1d array not yet supported")
371
+ # Single element.
372
+ elif other.shape == (1, 1):
373
+ return self._mul_scalar(other.toarray()[0, 0])
374
+ elif self.shape == (1, 1):
375
+ return other._mul_scalar(self.toarray()[0, 0])
376
+ # A row times a column.
377
+ elif self.shape[1] == 1 and other.shape[0] == 1:
378
+ return self._matmul_sparse(other.tocsc())
379
+ elif self.shape[0] == 1 and other.shape[1] == 1:
380
+ return other._matmul_sparse(self.tocsc())
381
+ # Row vector times matrix. other is a row.
382
+ elif other.shape[0] == 1 and self.shape[1] == other.shape[1]:
383
+ other = self._dia_container(
384
+ (other.toarray().ravel(), [0]),
385
+ shape=(other.shape[1], other.shape[1])
386
+ )
387
+ return self._matmul_sparse(other)
388
+ # self is a row.
389
+ elif self.shape[0] == 1 and self.shape[1] == other.shape[1]:
390
+ copy = self._dia_container(
391
+ (self.toarray().ravel(), [0]),
392
+ shape=(self.shape[1], self.shape[1])
393
+ )
394
+ return other._matmul_sparse(copy)
395
+ # Column vector times matrix. other is a column.
396
+ elif other.shape[1] == 1 and self.shape[0] == other.shape[0]:
397
+ other = self._dia_container(
398
+ (other.toarray().ravel(), [0]),
399
+ shape=(other.shape[0], other.shape[0])
400
+ )
401
+ return other._matmul_sparse(self)
402
+ # self is a column.
403
+ elif self.shape[1] == 1 and self.shape[0] == other.shape[0]:
404
+ copy = self._dia_container(
405
+ (self.toarray().ravel(), [0]),
406
+ shape=(self.shape[0], self.shape[0])
407
+ )
408
+ return copy._matmul_sparse(other)
409
+ else:
410
+ raise ValueError("inconsistent shapes")
411
+
412
+ # Assume other is a dense matrix/array, which produces a single-item
413
+ # object array if other isn't convertible to ndarray.
414
+ other = np.atleast_2d(other)
415
+
416
+ if other.ndim != 2:
417
+ return np.multiply(self.toarray(), other)
418
+ # Single element / wrapped object.
419
+ if other.size == 1:
420
+ if other.dtype == np.object_:
421
+ # 'other' not convertible to ndarray.
422
+ return NotImplemented
423
+ return self._mul_scalar(other.flat[0])
424
+ # Fast case for trivial sparse matrix.
425
+ elif self.shape == (1, 1):
426
+ return np.multiply(self.toarray()[0, 0], other)
427
+
428
+ ret = self.tocoo()
429
+ # Matching shapes.
430
+ if self.shape == other.shape:
431
+ data = np.multiply(ret.data, other[ret.row, ret.col])
432
+ # Sparse row vector times...
433
+ elif self.shape[0] == 1:
434
+ if other.shape[1] == 1: # Dense column vector.
435
+ data = np.multiply(ret.data, other)
436
+ elif other.shape[1] == self.shape[1]: # Dense matrix.
437
+ data = np.multiply(ret.data, other[:, ret.col])
438
+ else:
439
+ raise ValueError("inconsistent shapes")
440
+ row = np.repeat(np.arange(other.shape[0]), len(ret.row))
441
+ col = np.tile(ret.col, other.shape[0])
442
+ return self._coo_container(
443
+ (data.view(np.ndarray).ravel(), (row, col)),
444
+ shape=(other.shape[0], self.shape[1]),
445
+ copy=False
446
+ )
447
+ # Sparse column vector times...
448
+ elif self.shape[1] == 1:
449
+ if other.shape[0] == 1: # Dense row vector.
450
+ data = np.multiply(ret.data[:, None], other)
451
+ elif other.shape[0] == self.shape[0]: # Dense matrix.
452
+ data = np.multiply(ret.data[:, None], other[ret.row])
453
+ else:
454
+ raise ValueError("inconsistent shapes")
455
+ row = np.repeat(ret.row, other.shape[1])
456
+ col = np.tile(np.arange(other.shape[1]), len(ret.col))
457
+ return self._coo_container(
458
+ (data.view(np.ndarray).ravel(), (row, col)),
459
+ shape=(self.shape[0], other.shape[1]),
460
+ copy=False
461
+ )
462
+ # Sparse matrix times dense row vector.
463
+ elif other.shape[0] == 1 and self.shape[1] == other.shape[1]:
464
+ data = np.multiply(ret.data, other[:, ret.col].ravel())
465
+ # Sparse matrix times dense column vector.
466
+ elif other.shape[1] == 1 and self.shape[0] == other.shape[0]:
467
+ data = np.multiply(ret.data, other[ret.row].ravel())
468
+ else:
469
+ raise ValueError("inconsistent shapes")
470
+ ret.data = data.view(np.ndarray).ravel()
471
+ return ret
472
+
473
+ ###########################
474
+ # Multiplication handlers #
475
+ ###########################
476
+
477
+ def _matmul_vector(self, other):
478
+ M, N = self.shape
479
+
480
+ # output array
481
+ result = np.zeros(M, dtype=upcast_char(self.dtype.char,
482
+ other.dtype.char))
483
+
484
+ # csr_matvec or csc_matvec
485
+ fn = getattr(_sparsetools, self.format + '_matvec')
486
+ fn(M, N, self.indptr, self.indices, self.data, other, result)
487
+
488
+ return result
489
+
490
+ def _matmul_multivector(self, other):
491
+ M, N = self.shape
492
+ n_vecs = other.shape[1] # number of column vectors
493
+
494
+ result = np.zeros((M, n_vecs),
495
+ dtype=upcast_char(self.dtype.char, other.dtype.char))
496
+
497
+ # csr_matvecs or csc_matvecs
498
+ fn = getattr(_sparsetools, self.format + '_matvecs')
499
+ fn(M, N, n_vecs, self.indptr, self.indices, self.data,
500
+ other.ravel(), result.ravel())
501
+
502
+ return result
503
+
504
+ def _matmul_sparse(self, other):
505
+ M, K1 = self.shape
506
+ K2, N = other.shape
507
+
508
+ major_axis = self._swap((M, N))[0]
509
+ other = self.__class__(other) # convert to this format
510
+
511
+ idx_dtype = self._get_index_dtype((self.indptr, self.indices,
512
+ other.indptr, other.indices))
513
+
514
+ fn = getattr(_sparsetools, self.format + '_matmat_maxnnz')
515
+ nnz = fn(M, N,
516
+ np.asarray(self.indptr, dtype=idx_dtype),
517
+ np.asarray(self.indices, dtype=idx_dtype),
518
+ np.asarray(other.indptr, dtype=idx_dtype),
519
+ np.asarray(other.indices, dtype=idx_dtype))
520
+
521
+ idx_dtype = self._get_index_dtype((self.indptr, self.indices,
522
+ other.indptr, other.indices),
523
+ maxval=nnz)
524
+
525
+ indptr = np.empty(major_axis + 1, dtype=idx_dtype)
526
+ indices = np.empty(nnz, dtype=idx_dtype)
527
+ data = np.empty(nnz, dtype=upcast(self.dtype, other.dtype))
528
+
529
+ fn = getattr(_sparsetools, self.format + '_matmat')
530
+ fn(M, N, np.asarray(self.indptr, dtype=idx_dtype),
531
+ np.asarray(self.indices, dtype=idx_dtype),
532
+ self.data,
533
+ np.asarray(other.indptr, dtype=idx_dtype),
534
+ np.asarray(other.indices, dtype=idx_dtype),
535
+ other.data,
536
+ indptr, indices, data)
537
+
538
+ return self.__class__((data, indices, indptr), shape=(M, N))
539
+
540
+ def diagonal(self, k=0):
541
+ rows, cols = self.shape
542
+ if k <= -rows or k >= cols:
543
+ return np.empty(0, dtype=self.data.dtype)
544
+ fn = getattr(_sparsetools, self.format + "_diagonal")
545
+ y = np.empty(min(rows + min(k, 0), cols - max(k, 0)),
546
+ dtype=upcast(self.dtype))
547
+ fn(k, self.shape[0], self.shape[1], self.indptr, self.indices,
548
+ self.data, y)
549
+ return y
550
+
551
+ diagonal.__doc__ = _spbase.diagonal.__doc__
552
+
553
+ #####################
554
+ # Other binary ops #
555
+ #####################
556
+
557
+ def _maximum_minimum(self, other, npop, op_name, dense_check):
558
+ if isscalarlike(other):
559
+ if dense_check(other):
560
+ warn("Taking maximum (minimum) with > 0 (< 0) number results"
561
+ " to a dense matrix.", SparseEfficiencyWarning,
562
+ stacklevel=3)
563
+ other_arr = np.empty(self.shape, dtype=np.asarray(other).dtype)
564
+ other_arr.fill(other)
565
+ other_arr = self.__class__(other_arr)
566
+ return self._binopt(other_arr, op_name)
567
+ else:
568
+ self.sum_duplicates()
569
+ new_data = npop(self.data, np.asarray(other))
570
+ mat = self.__class__((new_data, self.indices, self.indptr),
571
+ dtype=new_data.dtype, shape=self.shape)
572
+ return mat
573
+ elif isdense(other):
574
+ return npop(self.todense(), other)
575
+ elif issparse(other):
576
+ return self._binopt(other, op_name)
577
+ else:
578
+ raise ValueError("Operands not compatible.")
579
+
580
+ def maximum(self, other):
581
+ return self._maximum_minimum(other, np.maximum,
582
+ '_maximum_', lambda x: np.asarray(x) > 0)
583
+
584
+ maximum.__doc__ = _spbase.maximum.__doc__
585
+
586
+ def minimum(self, other):
587
+ return self._maximum_minimum(other, np.minimum,
588
+ '_minimum_', lambda x: np.asarray(x) < 0)
589
+
590
+ minimum.__doc__ = _spbase.minimum.__doc__
591
+
592
+ #####################
593
+ # Reduce operations #
594
+ #####################
595
+
596
+ def sum(self, axis=None, dtype=None, out=None):
597
+ """Sum the array/matrix over the given axis. If the axis is None, sum
598
+ over both rows and columns, returning a scalar.
599
+ """
600
+ # The _spbase base class already does axis=0 and axis=1 efficiently
601
+ # so we only do the case axis=None here
602
+ if (not hasattr(self, 'blocksize') and
603
+ axis in self._swap(((1, -1), (0, 2)))[0]):
604
+ # faster than multiplication for large minor axis in CSC/CSR
605
+ res_dtype = get_sum_dtype(self.dtype)
606
+ ret = np.zeros(len(self.indptr) - 1, dtype=res_dtype)
607
+
608
+ major_index, value = self._minor_reduce(np.add)
609
+ ret[major_index] = value
610
+ ret = self._ascontainer(ret)
611
+ if axis % 2 == 1:
612
+ ret = ret.T
613
+
614
+ if out is not None and out.shape != ret.shape:
615
+ raise ValueError('dimensions do not match')
616
+
617
+ return ret.sum(axis=(), dtype=dtype, out=out)
618
+ # _spbase will handle the remaining situations when axis
619
+ # is in {None, -1, 0, 1}
620
+ else:
621
+ return _spbase.sum(self, axis=axis, dtype=dtype, out=out)
622
+
623
+ sum.__doc__ = _spbase.sum.__doc__
624
+
625
+ def _minor_reduce(self, ufunc, data=None):
626
+ """Reduce nonzeros with a ufunc over the minor axis when non-empty
627
+
628
+ Can be applied to a function of self.data by supplying data parameter.
629
+
630
+ Warning: this does not call sum_duplicates()
631
+
632
+ Returns
633
+ -------
634
+ major_index : array of ints
635
+ Major indices where nonzero
636
+
637
+ value : array of self.dtype
638
+ Reduce result for nonzeros in each major_index
639
+ """
640
+ if data is None:
641
+ data = self.data
642
+ major_index = np.flatnonzero(np.diff(self.indptr))
643
+ value = ufunc.reduceat(data,
644
+ downcast_intp_index(self.indptr[major_index]))
645
+ return major_index, value
646
+
647
+ #######################
648
+ # Getting and Setting #
649
+ #######################
650
+
651
+ def _get_intXint(self, row, col):
652
+ M, N = self._swap(self.shape)
653
+ major, minor = self._swap((row, col))
654
+ indptr, indices, data = get_csr_submatrix(
655
+ M, N, self.indptr, self.indices, self.data,
656
+ major, major + 1, minor, minor + 1)
657
+ return data.sum(dtype=self.dtype)
658
+
659
+ def _get_sliceXslice(self, row, col):
660
+ major, minor = self._swap((row, col))
661
+ if major.step in (1, None) and minor.step in (1, None):
662
+ return self._get_submatrix(major, minor, copy=True)
663
+ return self._major_slice(major)._minor_slice(minor)
664
+
665
+ def _get_arrayXarray(self, row, col):
666
+ # inner indexing
667
+ idx_dtype = self.indices.dtype
668
+ M, N = self._swap(self.shape)
669
+ major, minor = self._swap((row, col))
670
+ major = np.asarray(major, dtype=idx_dtype)
671
+ minor = np.asarray(minor, dtype=idx_dtype)
672
+
673
+ val = np.empty(major.size, dtype=self.dtype)
674
+ csr_sample_values(M, N, self.indptr, self.indices, self.data,
675
+ major.size, major.ravel(), minor.ravel(), val)
676
+ if major.ndim == 1:
677
+ return self._ascontainer(val)
678
+ return self.__class__(val.reshape(major.shape))
679
+
680
+ def _get_columnXarray(self, row, col):
681
+ # outer indexing
682
+ major, minor = self._swap((row, col))
683
+ return self._major_index_fancy(major)._minor_index_fancy(minor)
684
+
685
+ def _major_index_fancy(self, idx):
686
+ """Index along the major axis where idx is an array of ints.
687
+ """
688
+ idx_dtype = self._get_index_dtype((self.indptr, self.indices))
689
+ indices = np.asarray(idx, dtype=idx_dtype).ravel()
690
+
691
+ _, N = self._swap(self.shape)
692
+ M = len(indices)
693
+ new_shape = self._swap((M, N))
694
+ if M == 0:
695
+ return self.__class__(new_shape, dtype=self.dtype)
696
+
697
+ row_nnz = (self.indptr[indices + 1] - self.indptr[indices]).astype(idx_dtype)
698
+
699
+ res_indptr = np.zeros(M+1, dtype=idx_dtype)
700
+ np.cumsum(row_nnz, out=res_indptr[1:])
701
+
702
+ nnz = res_indptr[-1]
703
+ res_indices = np.empty(nnz, dtype=idx_dtype)
704
+ res_data = np.empty(nnz, dtype=self.dtype)
705
+ csr_row_index(
706
+ M,
707
+ indices,
708
+ self.indptr.astype(idx_dtype, copy=False),
709
+ self.indices.astype(idx_dtype, copy=False),
710
+ self.data,
711
+ res_indices,
712
+ res_data
713
+ )
714
+
715
+ return self.__class__((res_data, res_indices, res_indptr),
716
+ shape=new_shape, copy=False)
717
+
718
+ def _major_slice(self, idx, copy=False):
719
+ """Index along the major axis where idx is a slice object.
720
+ """
721
+ if idx == slice(None):
722
+ return self.copy() if copy else self
723
+
724
+ M, N = self._swap(self.shape)
725
+ start, stop, step = idx.indices(M)
726
+ M = len(range(start, stop, step))
727
+ new_shape = self._swap((M, N))
728
+ if M == 0:
729
+ return self.__class__(new_shape, dtype=self.dtype)
730
+
731
+ # Work out what slices are needed for `row_nnz`
732
+ # start,stop can be -1, only if step is negative
733
+ start0, stop0 = start, stop
734
+ if stop == -1 and start >= 0:
735
+ stop0 = None
736
+ start1, stop1 = start + 1, stop + 1
737
+
738
+ row_nnz = self.indptr[start1:stop1:step] - \
739
+ self.indptr[start0:stop0:step]
740
+ idx_dtype = self.indices.dtype
741
+ res_indptr = np.zeros(M+1, dtype=idx_dtype)
742
+ np.cumsum(row_nnz, out=res_indptr[1:])
743
+
744
+ if step == 1:
745
+ all_idx = slice(self.indptr[start], self.indptr[stop])
746
+ res_indices = np.array(self.indices[all_idx], copy=copy)
747
+ res_data = np.array(self.data[all_idx], copy=copy)
748
+ else:
749
+ nnz = res_indptr[-1]
750
+ res_indices = np.empty(nnz, dtype=idx_dtype)
751
+ res_data = np.empty(nnz, dtype=self.dtype)
752
+ csr_row_slice(start, stop, step, self.indptr, self.indices,
753
+ self.data, res_indices, res_data)
754
+
755
+ return self.__class__((res_data, res_indices, res_indptr),
756
+ shape=new_shape, copy=False)
757
+
758
+ def _minor_index_fancy(self, idx):
759
+ """Index along the minor axis where idx is an array of ints.
760
+ """
761
+ idx_dtype = self._get_index_dtype((self.indices, self.indptr))
762
+ indices = self.indices.astype(idx_dtype, copy=False)
763
+ indptr = self.indptr.astype(idx_dtype, copy=False)
764
+
765
+ idx = np.asarray(idx, dtype=idx_dtype).ravel()
766
+
767
+ M, N = self._swap(self.shape)
768
+ k = len(idx)
769
+ new_shape = self._swap((M, k))
770
+ if k == 0:
771
+ return self.__class__(new_shape, dtype=self.dtype)
772
+
773
+ # pass 1: count idx entries and compute new indptr
774
+ col_offsets = np.zeros(N, dtype=idx_dtype)
775
+ res_indptr = np.empty_like(self.indptr, dtype=idx_dtype)
776
+ csr_column_index1(
777
+ k,
778
+ idx,
779
+ M,
780
+ N,
781
+ indptr,
782
+ indices,
783
+ col_offsets,
784
+ res_indptr,
785
+ )
786
+
787
+ # pass 2: copy indices/data for selected idxs
788
+ col_order = np.argsort(idx).astype(idx_dtype, copy=False)
789
+ nnz = res_indptr[-1]
790
+ res_indices = np.empty(nnz, dtype=idx_dtype)
791
+ res_data = np.empty(nnz, dtype=self.dtype)
792
+ csr_column_index2(col_order, col_offsets, len(self.indices),
793
+ indices, self.data, res_indices, res_data)
794
+ return self.__class__((res_data, res_indices, res_indptr),
795
+ shape=new_shape, copy=False)
796
+
797
+ def _minor_slice(self, idx, copy=False):
798
+ """Index along the minor axis where idx is a slice object.
799
+ """
800
+ if idx == slice(None):
801
+ return self.copy() if copy else self
802
+
803
+ M, N = self._swap(self.shape)
804
+ start, stop, step = idx.indices(N)
805
+ N = len(range(start, stop, step))
806
+ if N == 0:
807
+ return self.__class__(self._swap((M, N)), dtype=self.dtype)
808
+ if step == 1:
809
+ return self._get_submatrix(minor=idx, copy=copy)
810
+ # TODO: don't fall back to fancy indexing here
811
+ return self._minor_index_fancy(np.arange(start, stop, step))
812
+
813
+ def _get_submatrix(self, major=None, minor=None, copy=False):
814
+ """Return a submatrix of this matrix.
815
+
816
+ major, minor: None, int, or slice with step 1
817
+ """
818
+ M, N = self._swap(self.shape)
819
+ i0, i1 = _process_slice(major, M)
820
+ j0, j1 = _process_slice(minor, N)
821
+
822
+ if i0 == 0 and j0 == 0 and i1 == M and j1 == N:
823
+ return self.copy() if copy else self
824
+
825
+ indptr, indices, data = get_csr_submatrix(
826
+ M, N, self.indptr, self.indices, self.data, i0, i1, j0, j1)
827
+
828
+ shape = self._swap((i1 - i0, j1 - j0))
829
+ return self.__class__((data, indices, indptr), shape=shape,
830
+ dtype=self.dtype, copy=False)
831
+
832
+ def _set_intXint(self, row, col, x):
833
+ i, j = self._swap((row, col))
834
+ self._set_many(i, j, x)
835
+
836
+ def _set_arrayXarray(self, row, col, x):
837
+ i, j = self._swap((row, col))
838
+ self._set_many(i, j, x)
839
+
840
+ def _set_arrayXarray_sparse(self, row, col, x):
841
+ # clear entries that will be overwritten
842
+ self._zero_many(*self._swap((row, col)))
843
+
844
+ M, N = row.shape # matches col.shape
845
+ broadcast_row = M != 1 and x.shape[0] == 1
846
+ broadcast_col = N != 1 and x.shape[1] == 1
847
+ r, c = x.row, x.col
848
+
849
+ x = np.asarray(x.data, dtype=self.dtype)
850
+ if x.size == 0:
851
+ return
852
+
853
+ if broadcast_row:
854
+ r = np.repeat(np.arange(M), len(r))
855
+ c = np.tile(c, M)
856
+ x = np.tile(x, M)
857
+ if broadcast_col:
858
+ r = np.repeat(r, N)
859
+ c = np.tile(np.arange(N), len(c))
860
+ x = np.repeat(x, N)
861
+ # only assign entries in the new sparsity structure
862
+ i, j = self._swap((row[r, c], col[r, c]))
863
+ self._set_many(i, j, x)
864
+
865
+ def _setdiag(self, values, k):
866
+ if 0 in self.shape:
867
+ return
868
+
869
+ M, N = self.shape
870
+ broadcast = (values.ndim == 0)
871
+
872
+ if k < 0:
873
+ if broadcast:
874
+ max_index = min(M + k, N)
875
+ else:
876
+ max_index = min(M + k, N, len(values))
877
+ i = np.arange(-k, max_index - k, dtype=self.indices.dtype)
878
+ j = np.arange(max_index, dtype=self.indices.dtype)
879
+
880
+ else:
881
+ if broadcast:
882
+ max_index = min(M, N - k)
883
+ else:
884
+ max_index = min(M, N - k, len(values))
885
+ i = np.arange(max_index, dtype=self.indices.dtype)
886
+ j = np.arange(k, k + max_index, dtype=self.indices.dtype)
887
+
888
+ if not broadcast:
889
+ values = values[:len(i)]
890
+
891
+ x = np.atleast_1d(np.asarray(values, dtype=self.dtype)).ravel()
892
+ if x.squeeze().shape != i.squeeze().shape:
893
+ x = np.broadcast_to(x, i.shape)
894
+ if x.size == 0:
895
+ return
896
+
897
+ M, N = self._swap((M, N))
898
+ i, j = self._swap((i, j))
899
+ n_samples = x.size
900
+ offsets = np.empty(n_samples, dtype=self.indices.dtype)
901
+ ret = csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
902
+ i, j, offsets)
903
+ if ret == 1:
904
+ # rinse and repeat
905
+ self.sum_duplicates()
906
+ csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
907
+ i, j, offsets)
908
+ if -1 not in offsets:
909
+ # only affects existing non-zero cells
910
+ self.data[offsets] = x
911
+ return
912
+
913
+ mask = (offsets <= -1)
914
+ # Boundary between csc and convert to coo
915
+ # The value 0.001 is justified in gh-19962#issuecomment-1920499678
916
+ if mask.sum() < self.nnz * 0.001:
917
+ # create new entries
918
+ i = i[mask]
919
+ j = j[mask]
920
+ self._insert_many(i, j, x[mask])
921
+ # replace existing entries
922
+ mask = ~mask
923
+ self.data[offsets[mask]] = x[mask]
924
+ else:
925
+ # convert to coo for _set_diag
926
+ coo = self.tocoo()
927
+ coo._setdiag(values, k)
928
+ arrays = coo._coo_to_compressed(self._swap)
929
+ self.indptr, self.indices, self.data, _ = arrays
930
+
931
+ def _prepare_indices(self, i, j):
932
+ M, N = self._swap(self.shape)
933
+
934
+ def check_bounds(indices, bound):
935
+ idx = indices.max()
936
+ if idx >= bound:
937
+ raise IndexError('index (%d) out of range (>= %d)' %
938
+ (idx, bound))
939
+ idx = indices.min()
940
+ if idx < -bound:
941
+ raise IndexError('index (%d) out of range (< -%d)' %
942
+ (idx, bound))
943
+
944
+ i = np.atleast_1d(np.asarray(i, dtype=self.indices.dtype)).ravel()
945
+ j = np.atleast_1d(np.asarray(j, dtype=self.indices.dtype)).ravel()
946
+ check_bounds(i, M)
947
+ check_bounds(j, N)
948
+ return i, j, M, N
949
+
950
+ def _set_many(self, i, j, x):
951
+ """Sets value at each (i, j) to x
952
+
953
+ Here (i,j) index major and minor respectively, and must not contain
954
+ duplicate entries.
955
+ """
956
+ i, j, M, N = self._prepare_indices(i, j)
957
+ x = np.atleast_1d(np.asarray(x, dtype=self.dtype)).ravel()
958
+
959
+ n_samples = x.size
960
+ offsets = np.empty(n_samples, dtype=self.indices.dtype)
961
+ ret = csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
962
+ i, j, offsets)
963
+ if ret == 1:
964
+ # rinse and repeat
965
+ self.sum_duplicates()
966
+ csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
967
+ i, j, offsets)
968
+
969
+ if -1 not in offsets:
970
+ # only affects existing non-zero cells
971
+ self.data[offsets] = x
972
+ return
973
+
974
+ else:
975
+ warn("Changing the sparsity structure of a {}_matrix is expensive."
976
+ " lil_matrix is more efficient.".format(self.format),
977
+ SparseEfficiencyWarning, stacklevel=3)
978
+ # replace where possible
979
+ mask = offsets > -1
980
+ self.data[offsets[mask]] = x[mask]
981
+ # only insertions remain
982
+ mask = ~mask
983
+ i = i[mask]
984
+ i[i < 0] += M
985
+ j = j[mask]
986
+ j[j < 0] += N
987
+ self._insert_many(i, j, x[mask])
988
+
989
+ def _zero_many(self, i, j):
990
+ """Sets value at each (i, j) to zero, preserving sparsity structure.
991
+
992
+ Here (i,j) index major and minor respectively.
993
+ """
994
+ i, j, M, N = self._prepare_indices(i, j)
995
+
996
+ n_samples = len(i)
997
+ offsets = np.empty(n_samples, dtype=self.indices.dtype)
998
+ ret = csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
999
+ i, j, offsets)
1000
+ if ret == 1:
1001
+ # rinse and repeat
1002
+ self.sum_duplicates()
1003
+ csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
1004
+ i, j, offsets)
1005
+
1006
+ # only assign zeros to the existing sparsity structure
1007
+ self.data[offsets[offsets > -1]] = 0
1008
+
1009
+ def _insert_many(self, i, j, x):
1010
+ """Inserts new nonzero at each (i, j) with value x
1011
+
1012
+ Here (i,j) index major and minor respectively.
1013
+ i, j and x must be non-empty, 1d arrays.
1014
+ Inserts each major group (e.g. all entries per row) at a time.
1015
+ Maintains has_sorted_indices property.
1016
+ Modifies i, j, x in place.
1017
+ """
1018
+ order = np.argsort(i, kind='mergesort') # stable for duplicates
1019
+ i = i.take(order, mode='clip')
1020
+ j = j.take(order, mode='clip')
1021
+ x = x.take(order, mode='clip')
1022
+
1023
+ do_sort = self.has_sorted_indices
1024
+
1025
+ # Update index data type
1026
+ idx_dtype = self._get_index_dtype((self.indices, self.indptr),
1027
+ maxval=(self.indptr[-1] + x.size))
1028
+ self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
1029
+ self.indices = np.asarray(self.indices, dtype=idx_dtype)
1030
+ i = np.asarray(i, dtype=idx_dtype)
1031
+ j = np.asarray(j, dtype=idx_dtype)
1032
+
1033
+ # Collate old and new in chunks by major index
1034
+ indices_parts = []
1035
+ data_parts = []
1036
+ ui, ui_indptr = np.unique(i, return_index=True)
1037
+ ui_indptr = np.append(ui_indptr, len(j))
1038
+ new_nnzs = np.diff(ui_indptr)
1039
+ prev = 0
1040
+ for c, (ii, js, je) in enumerate(zip(ui, ui_indptr, ui_indptr[1:])):
1041
+ # old entries
1042
+ start = self.indptr[prev]
1043
+ stop = self.indptr[ii]
1044
+ indices_parts.append(self.indices[start:stop])
1045
+ data_parts.append(self.data[start:stop])
1046
+
1047
+ # handle duplicate j: keep last setting
1048
+ uj, uj_indptr = np.unique(j[js:je][::-1], return_index=True)
1049
+ if len(uj) == je - js:
1050
+ indices_parts.append(j[js:je])
1051
+ data_parts.append(x[js:je])
1052
+ else:
1053
+ indices_parts.append(j[js:je][::-1][uj_indptr])
1054
+ data_parts.append(x[js:je][::-1][uj_indptr])
1055
+ new_nnzs[c] = len(uj)
1056
+
1057
+ prev = ii
1058
+
1059
+ # remaining old entries
1060
+ start = self.indptr[ii]
1061
+ indices_parts.append(self.indices[start:])
1062
+ data_parts.append(self.data[start:])
1063
+
1064
+ # update attributes
1065
+ self.indices = np.concatenate(indices_parts)
1066
+ self.data = np.concatenate(data_parts)
1067
+ nnzs = np.empty(self.indptr.shape, dtype=idx_dtype)
1068
+ nnzs[0] = idx_dtype(0)
1069
+ indptr_diff = np.diff(self.indptr)
1070
+ indptr_diff[ui] += new_nnzs
1071
+ nnzs[1:] = indptr_diff
1072
+ self.indptr = np.cumsum(nnzs, out=nnzs)
1073
+
1074
+ if do_sort:
1075
+ # TODO: only sort where necessary
1076
+ self.has_sorted_indices = False
1077
+ self.sort_indices()
1078
+
1079
+ self.check_format(full_check=False)
1080
+
1081
+ ######################
1082
+ # Conversion methods #
1083
+ ######################
1084
+
1085
+ def tocoo(self, copy=True):
1086
+ major_dim, minor_dim = self._swap(self.shape)
1087
+ minor_indices = self.indices
1088
+ major_indices = np.empty(len(minor_indices), dtype=self.indices.dtype)
1089
+ _sparsetools.expandptr(major_dim, self.indptr, major_indices)
1090
+ coords = self._swap((major_indices, minor_indices))
1091
+
1092
+ return self._coo_container(
1093
+ (self.data, coords), self.shape, copy=copy, dtype=self.dtype
1094
+ )
1095
+
1096
+ tocoo.__doc__ = _spbase.tocoo.__doc__
1097
+
1098
+ def toarray(self, order=None, out=None):
1099
+ if out is None and order is None:
1100
+ order = self._swap('cf')[0]
1101
+ out = self._process_toarray_args(order, out)
1102
+ if not (out.flags.c_contiguous or out.flags.f_contiguous):
1103
+ raise ValueError('Output array must be C or F contiguous')
1104
+ # align ideal order with output array order
1105
+ if out.flags.c_contiguous:
1106
+ x = self.tocsr()
1107
+ y = out
1108
+ else:
1109
+ x = self.tocsc()
1110
+ y = out.T
1111
+ M, N = x._swap(x.shape)
1112
+ csr_todense(M, N, x.indptr, x.indices, x.data, y)
1113
+ return out
1114
+
1115
+ toarray.__doc__ = _spbase.toarray.__doc__
1116
+
1117
+ ##############################################################
1118
+ # methods that examine or modify the internal data structure #
1119
+ ##############################################################
1120
+
1121
+ def eliminate_zeros(self):
1122
+ """Remove zero entries from the array/matrix
1123
+
1124
+ This is an *in place* operation.
1125
+ """
1126
+ M, N = self._swap(self.shape)
1127
+ _sparsetools.csr_eliminate_zeros(M, N, self.indptr, self.indices,
1128
+ self.data)
1129
+ self.prune() # nnz may have changed
1130
+
1131
+ @property
1132
+ def has_canonical_format(self) -> bool:
1133
+ """Whether the array/matrix has sorted indices and no duplicates
1134
+
1135
+ Returns
1136
+ - True: if the above applies
1137
+ - False: otherwise
1138
+
1139
+ has_canonical_format implies has_sorted_indices, so if the latter flag
1140
+ is False, so will the former be; if the former is found True, the
1141
+ latter flag is also set.
1142
+ """
1143
+ # first check to see if result was cached
1144
+ if not getattr(self, '_has_sorted_indices', True):
1145
+ # not sorted => not canonical
1146
+ self._has_canonical_format = False
1147
+ elif not hasattr(self, '_has_canonical_format'):
1148
+ self.has_canonical_format = bool(
1149
+ _sparsetools.csr_has_canonical_format(
1150
+ len(self.indptr) - 1, self.indptr, self.indices)
1151
+ )
1152
+ return self._has_canonical_format
1153
+
1154
+ @has_canonical_format.setter
1155
+ def has_canonical_format(self, val: bool):
1156
+ self._has_canonical_format = bool(val)
1157
+ if val:
1158
+ self.has_sorted_indices = True
1159
+
1160
+ def sum_duplicates(self):
1161
+ """Eliminate duplicate entries by adding them together
1162
+
1163
+ This is an *in place* operation.
1164
+ """
1165
+ if self.has_canonical_format:
1166
+ return
1167
+ self.sort_indices()
1168
+
1169
+ M, N = self._swap(self.shape)
1170
+ _sparsetools.csr_sum_duplicates(M, N, self.indptr, self.indices,
1171
+ self.data)
1172
+
1173
+ self.prune() # nnz may have changed
1174
+ self.has_canonical_format = True
1175
+
1176
+ @property
1177
+ def has_sorted_indices(self) -> bool:
1178
+ """Whether the indices are sorted
1179
+
1180
+ Returns
1181
+ - True: if the indices of the array/matrix are in sorted order
1182
+ - False: otherwise
1183
+ """
1184
+ # first check to see if result was cached
1185
+ if not hasattr(self, '_has_sorted_indices'):
1186
+ self._has_sorted_indices = bool(
1187
+ _sparsetools.csr_has_sorted_indices(
1188
+ len(self.indptr) - 1, self.indptr, self.indices)
1189
+ )
1190
+ return self._has_sorted_indices
1191
+
1192
+ @has_sorted_indices.setter
1193
+ def has_sorted_indices(self, val: bool):
1194
+ self._has_sorted_indices = bool(val)
1195
+
1196
+
1197
+ def sorted_indices(self):
1198
+ """Return a copy of this array/matrix with sorted indices
1199
+ """
1200
+ A = self.copy()
1201
+ A.sort_indices()
1202
+ return A
1203
+
1204
+ # an alternative that has linear complexity is the following
1205
+ # although the previous option is typically faster
1206
+ # return self.toother().toother()
1207
+
1208
+ def sort_indices(self):
1209
+ """Sort the indices of this array/matrix *in place*
1210
+ """
1211
+
1212
+ if not self.has_sorted_indices:
1213
+ _sparsetools.csr_sort_indices(len(self.indptr) - 1, self.indptr,
1214
+ self.indices, self.data)
1215
+ self.has_sorted_indices = True
1216
+
1217
+ def prune(self):
1218
+ """Remove empty space after all non-zero elements.
1219
+ """
1220
+ major_dim = self._swap(self.shape)[0]
1221
+
1222
+ if len(self.indptr) != major_dim + 1:
1223
+ raise ValueError('index pointer has invalid length')
1224
+ if len(self.indices) < self.nnz:
1225
+ raise ValueError('indices array has fewer than nnz elements')
1226
+ if len(self.data) < self.nnz:
1227
+ raise ValueError('data array has fewer than nnz elements')
1228
+
1229
+ self.indices = _prune_array(self.indices[:self.nnz])
1230
+ self.data = _prune_array(self.data[:self.nnz])
1231
+
1232
+ def resize(self, *shape):
1233
+ shape = check_shape(shape)
1234
+ if hasattr(self, 'blocksize'):
1235
+ bm, bn = self.blocksize
1236
+ new_M, rm = divmod(shape[0], bm)
1237
+ new_N, rn = divmod(shape[1], bn)
1238
+ if rm or rn:
1239
+ raise ValueError("shape must be divisible into {} blocks. "
1240
+ "Got {}".format(self.blocksize, shape))
1241
+ M, N = self.shape[0] // bm, self.shape[1] // bn
1242
+ else:
1243
+ new_M, new_N = self._swap(shape)
1244
+ M, N = self._swap(self.shape)
1245
+
1246
+ if new_M < M:
1247
+ self.indices = self.indices[:self.indptr[new_M]]
1248
+ self.data = self.data[:self.indptr[new_M]]
1249
+ self.indptr = self.indptr[:new_M + 1]
1250
+ elif new_M > M:
1251
+ self.indptr = np.resize(self.indptr, new_M + 1)
1252
+ self.indptr[M + 1:].fill(self.indptr[M])
1253
+
1254
+ if new_N < N:
1255
+ mask = self.indices < new_N
1256
+ if not np.all(mask):
1257
+ self.indices = self.indices[mask]
1258
+ self.data = self.data[mask]
1259
+ major_index, val = self._minor_reduce(np.add, mask)
1260
+ self.indptr.fill(0)
1261
+ self.indptr[1:][major_index] = val
1262
+ np.cumsum(self.indptr, out=self.indptr)
1263
+
1264
+ self._shape = shape
1265
+
1266
+ resize.__doc__ = _spbase.resize.__doc__
1267
+
1268
+ ###################
1269
+ # utility methods #
1270
+ ###################
1271
+
1272
+ # needed by _data_matrix
1273
+ def _with_data(self, data, copy=True):
1274
+ """Returns a matrix with the same sparsity structure as self,
1275
+ but with different data. By default the structure arrays
1276
+ (i.e. .indptr and .indices) are copied.
1277
+ """
1278
+ if copy:
1279
+ return self.__class__((data, self.indices.copy(),
1280
+ self.indptr.copy()),
1281
+ shape=self.shape,
1282
+ dtype=data.dtype)
1283
+ else:
1284
+ return self.__class__((data, self.indices, self.indptr),
1285
+ shape=self.shape, dtype=data.dtype)
1286
+
1287
+ def _binopt(self, other, op):
1288
+ """apply the binary operation fn to two sparse matrices."""
1289
+ other = self.__class__(other)
1290
+
1291
+ # e.g. csr_plus_csr, csr_minus_csr, etc.
1292
+ fn = getattr(_sparsetools, self.format + op + self.format)
1293
+
1294
+ maxnnz = self.nnz + other.nnz
1295
+ idx_dtype = self._get_index_dtype((self.indptr, self.indices,
1296
+ other.indptr, other.indices),
1297
+ maxval=maxnnz)
1298
+ indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
1299
+ indices = np.empty(maxnnz, dtype=idx_dtype)
1300
+
1301
+ bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_']
1302
+ if op in bool_ops:
1303
+ data = np.empty(maxnnz, dtype=np.bool_)
1304
+ else:
1305
+ data = np.empty(maxnnz, dtype=upcast(self.dtype, other.dtype))
1306
+
1307
+ fn(self.shape[0], self.shape[1],
1308
+ np.asarray(self.indptr, dtype=idx_dtype),
1309
+ np.asarray(self.indices, dtype=idx_dtype),
1310
+ self.data,
1311
+ np.asarray(other.indptr, dtype=idx_dtype),
1312
+ np.asarray(other.indices, dtype=idx_dtype),
1313
+ other.data,
1314
+ indptr, indices, data)
1315
+
1316
+ A = self.__class__((data, indices, indptr), shape=self.shape)
1317
+ A.prune()
1318
+
1319
+ return A
1320
+
1321
+ def _divide_sparse(self, other):
1322
+ """
1323
+ Divide this matrix by a second sparse matrix.
1324
+ """
1325
+ if other.shape != self.shape:
1326
+ raise ValueError('inconsistent shapes')
1327
+
1328
+ r = self._binopt(other, '_eldiv_')
1329
+
1330
+ if np.issubdtype(r.dtype, np.inexact):
1331
+ # Eldiv leaves entries outside the combined sparsity
1332
+ # pattern empty, so they must be filled manually.
1333
+ # Everything outside of other's sparsity is NaN, and everything
1334
+ # inside it is either zero or defined by eldiv.
1335
+ out = np.empty(self.shape, dtype=self.dtype)
1336
+ out.fill(np.nan)
1337
+ row, col = other.nonzero()
1338
+ out[row, col] = 0
1339
+ r = r.tocoo()
1340
+ out[r.row, r.col] = r.data
1341
+ out = self._container(out)
1342
+ else:
1343
+ # integers types go with nan <-> 0
1344
+ out = r
1345
+
1346
+ return out
1347
+
1348
+
1349
+ def _process_slice(sl, num):
1350
+ if sl is None:
1351
+ i0, i1 = 0, num
1352
+ elif isinstance(sl, slice):
1353
+ i0, i1, stride = sl.indices(num)
1354
+ if stride != 1:
1355
+ raise ValueError('slicing with step != 1 not supported')
1356
+ i0 = min(i0, i1) # give an empty slice when i0 > i1
1357
+ elif isintlike(sl):
1358
+ if sl < 0:
1359
+ sl += num
1360
+ i0, i1 = sl, sl + 1
1361
+ if i0 < 0 or i1 > num:
1362
+ raise IndexError('index out of bounds: 0 <= %d < %d <= %d' %
1363
+ (i0, i1, num))
1364
+ else:
1365
+ raise TypeError('expected slice or scalar')
1366
+
1367
+ return i0, i1
llmeval-env/lib/python3.10/site-packages/scipy/sparse/_construct.py ADDED
@@ -0,0 +1,1401 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Functions to construct sparse matrices and arrays
2
+ """
3
+
4
+ __docformat__ = "restructuredtext en"
5
+
6
+ __all__ = ['spdiags', 'eye', 'identity', 'kron', 'kronsum',
7
+ 'hstack', 'vstack', 'bmat', 'rand', 'random', 'diags', 'block_diag',
8
+ 'diags_array', 'block_array', 'eye_array', 'random_array']
9
+
10
+ import numbers
11
+ import math
12
+ import numpy as np
13
+
14
+ from scipy._lib._util import check_random_state, rng_integers
15
+ from ._sputils import upcast, get_index_dtype, isscalarlike
16
+
17
+ from ._sparsetools import csr_hstack
18
+ from ._bsr import bsr_matrix, bsr_array
19
+ from ._coo import coo_matrix, coo_array
20
+ from ._csc import csc_matrix, csc_array
21
+ from ._csr import csr_matrix, csr_array
22
+ from ._dia import dia_matrix, dia_array
23
+
24
+ from ._base import issparse, sparray
25
+
26
+
27
+ def spdiags(data, diags, m=None, n=None, format=None):
28
+ """
29
+ Return a sparse matrix from diagonals.
30
+
31
+ Parameters
32
+ ----------
33
+ data : array_like
34
+ Matrix diagonals stored row-wise
35
+ diags : sequence of int or an int
36
+ Diagonals to set:
37
+
38
+ * k = 0 the main diagonal
39
+ * k > 0 the kth upper diagonal
40
+ * k < 0 the kth lower diagonal
41
+ m, n : int, tuple, optional
42
+ Shape of the result. If `n` is None and `m` is a given tuple,
43
+ the shape is this tuple. If omitted, the matrix is square and
44
+ its shape is len(data[0]).
45
+ format : str, optional
46
+ Format of the result. By default (format=None) an appropriate sparse
47
+ matrix format is returned. This choice is subject to change.
48
+
49
+ .. warning::
50
+
51
+ This function returns a sparse matrix -- not a sparse array.
52
+ You are encouraged to use ``diags_array`` to take advantage
53
+ of the sparse array functionality.
54
+
55
+ See Also
56
+ --------
57
+ diags_array : more convenient form of this function
58
+ diags : matrix version of diags_array
59
+ dia_matrix : the sparse DIAgonal format.
60
+
61
+ Examples
62
+ --------
63
+ >>> import numpy as np
64
+ >>> from scipy.sparse import spdiags
65
+ >>> data = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
66
+ >>> diags = np.array([0, -1, 2])
67
+ >>> spdiags(data, diags, 4, 4).toarray()
68
+ array([[1, 0, 3, 0],
69
+ [1, 2, 0, 4],
70
+ [0, 2, 3, 0],
71
+ [0, 0, 3, 4]])
72
+
73
+ """
74
+ if m is None and n is None:
75
+ m = n = len(data[0])
76
+ elif n is None:
77
+ m, n = m
78
+ return dia_matrix((data, diags), shape=(m, n)).asformat(format)
79
+
80
+
81
+ def diags_array(diagonals, /, *, offsets=0, shape=None, format=None, dtype=None):
82
+ """
83
+ Construct a sparse array from diagonals.
84
+
85
+ Parameters
86
+ ----------
87
+ diagonals : sequence of array_like
88
+ Sequence of arrays containing the array diagonals,
89
+ corresponding to `offsets`.
90
+ offsets : sequence of int or an int, optional
91
+ Diagonals to set:
92
+ - k = 0 the main diagonal (default)
93
+ - k > 0 the kth upper diagonal
94
+ - k < 0 the kth lower diagonal
95
+ shape : tuple of int, optional
96
+ Shape of the result. If omitted, a square array large enough
97
+ to contain the diagonals is returned.
98
+ format : {"dia", "csr", "csc", "lil", ...}, optional
99
+ Matrix format of the result. By default (format=None) an
100
+ appropriate sparse array format is returned. This choice is
101
+ subject to change.
102
+ dtype : dtype, optional
103
+ Data type of the array.
104
+
105
+ Notes
106
+ -----
107
+ The result from `diags_array` is the sparse equivalent of::
108
+
109
+ np.diag(diagonals[0], offsets[0])
110
+ + ...
111
+ + np.diag(diagonals[k], offsets[k])
112
+
113
+ Repeated diagonal offsets are disallowed.
114
+
115
+ .. versionadded:: 1.11
116
+
117
+ Examples
118
+ --------
119
+ >>> from scipy.sparse import diags_array
120
+ >>> diagonals = [[1, 2, 3, 4], [1, 2, 3], [1, 2]]
121
+ >>> diags_array(diagonals, offsets=[0, -1, 2]).toarray()
122
+ array([[1, 0, 1, 0],
123
+ [1, 2, 0, 2],
124
+ [0, 2, 3, 0],
125
+ [0, 0, 3, 4]])
126
+
127
+ Broadcasting of scalars is supported (but shape needs to be
128
+ specified):
129
+
130
+ >>> diags_array([1, -2, 1], offsets=[-1, 0, 1], shape=(4, 4)).toarray()
131
+ array([[-2., 1., 0., 0.],
132
+ [ 1., -2., 1., 0.],
133
+ [ 0., 1., -2., 1.],
134
+ [ 0., 0., 1., -2.]])
135
+
136
+
137
+ If only one diagonal is wanted (as in `numpy.diag`), the following
138
+ works as well:
139
+
140
+ >>> diags_array([1, 2, 3], offsets=1).toarray()
141
+ array([[ 0., 1., 0., 0.],
142
+ [ 0., 0., 2., 0.],
143
+ [ 0., 0., 0., 3.],
144
+ [ 0., 0., 0., 0.]])
145
+ """
146
+ # if offsets is not a sequence, assume that there's only one diagonal
147
+ if isscalarlike(offsets):
148
+ # now check that there's actually only one diagonal
149
+ if len(diagonals) == 0 or isscalarlike(diagonals[0]):
150
+ diagonals = [np.atleast_1d(diagonals)]
151
+ else:
152
+ raise ValueError("Different number of diagonals and offsets.")
153
+ else:
154
+ diagonals = list(map(np.atleast_1d, diagonals))
155
+
156
+ offsets = np.atleast_1d(offsets)
157
+
158
+ # Basic check
159
+ if len(diagonals) != len(offsets):
160
+ raise ValueError("Different number of diagonals and offsets.")
161
+
162
+ # Determine shape, if omitted
163
+ if shape is None:
164
+ m = len(diagonals[0]) + abs(int(offsets[0]))
165
+ shape = (m, m)
166
+
167
+ # Determine data type, if omitted
168
+ if dtype is None:
169
+ dtype = np.common_type(*diagonals)
170
+
171
+ # Construct data array
172
+ m, n = shape
173
+
174
+ M = max([min(m + offset, n - offset) + max(0, offset)
175
+ for offset in offsets])
176
+ M = max(0, M)
177
+ data_arr = np.zeros((len(offsets), M), dtype=dtype)
178
+
179
+ K = min(m, n)
180
+
181
+ for j, diagonal in enumerate(diagonals):
182
+ offset = offsets[j]
183
+ k = max(0, offset)
184
+ length = min(m + offset, n - offset, K)
185
+ if length < 0:
186
+ raise ValueError("Offset %d (index %d) out of bounds" % (offset, j))
187
+ try:
188
+ data_arr[j, k:k+length] = diagonal[...,:length]
189
+ except ValueError as e:
190
+ if len(diagonal) != length and len(diagonal) != 1:
191
+ raise ValueError(
192
+ "Diagonal length (index %d: %d at offset %d) does not "
193
+ "agree with array size (%d, %d)." % (
194
+ j, len(diagonal), offset, m, n)) from e
195
+ raise
196
+
197
+ return dia_array((data_arr, offsets), shape=(m, n)).asformat(format)
198
+
199
+
200
+ def diags(diagonals, offsets=0, shape=None, format=None, dtype=None):
201
+ """
202
+ Construct a sparse matrix from diagonals.
203
+
204
+ .. warning::
205
+
206
+ This function returns a sparse matrix -- not a sparse array.
207
+ You are encouraged to use ``diags_array`` to take advantage
208
+ of the sparse array functionality.
209
+
210
+ Parameters
211
+ ----------
212
+ diagonals : sequence of array_like
213
+ Sequence of arrays containing the matrix diagonals,
214
+ corresponding to `offsets`.
215
+ offsets : sequence of int or an int, optional
216
+ Diagonals to set:
217
+ - k = 0 the main diagonal (default)
218
+ - k > 0 the kth upper diagonal
219
+ - k < 0 the kth lower diagonal
220
+ shape : tuple of int, optional
221
+ Shape of the result. If omitted, a square matrix large enough
222
+ to contain the diagonals is returned.
223
+ format : {"dia", "csr", "csc", "lil", ...}, optional
224
+ Matrix format of the result. By default (format=None) an
225
+ appropriate sparse matrix format is returned. This choice is
226
+ subject to change.
227
+ dtype : dtype, optional
228
+ Data type of the matrix.
229
+
230
+ See Also
231
+ --------
232
+ spdiags : construct matrix from diagonals
233
+ diags_array : construct sparse array instead of sparse matrix
234
+
235
+ Notes
236
+ -----
237
+ This function differs from `spdiags` in the way it handles
238
+ off-diagonals.
239
+
240
+ The result from `diags` is the sparse equivalent of::
241
+
242
+ np.diag(diagonals[0], offsets[0])
243
+ + ...
244
+ + np.diag(diagonals[k], offsets[k])
245
+
246
+ Repeated diagonal offsets are disallowed.
247
+
248
+ .. versionadded:: 0.11
249
+
250
+ Examples
251
+ --------
252
+ >>> from scipy.sparse import diags
253
+ >>> diagonals = [[1, 2, 3, 4], [1, 2, 3], [1, 2]]
254
+ >>> diags(diagonals, [0, -1, 2]).toarray()
255
+ array([[1, 0, 1, 0],
256
+ [1, 2, 0, 2],
257
+ [0, 2, 3, 0],
258
+ [0, 0, 3, 4]])
259
+
260
+ Broadcasting of scalars is supported (but shape needs to be
261
+ specified):
262
+
263
+ >>> diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)).toarray()
264
+ array([[-2., 1., 0., 0.],
265
+ [ 1., -2., 1., 0.],
266
+ [ 0., 1., -2., 1.],
267
+ [ 0., 0., 1., -2.]])
268
+
269
+
270
+ If only one diagonal is wanted (as in `numpy.diag`), the following
271
+ works as well:
272
+
273
+ >>> diags([1, 2, 3], 1).toarray()
274
+ array([[ 0., 1., 0., 0.],
275
+ [ 0., 0., 2., 0.],
276
+ [ 0., 0., 0., 3.],
277
+ [ 0., 0., 0., 0.]])
278
+ """
279
+ A = diags_array(diagonals, offsets=offsets, shape=shape, dtype=dtype)
280
+ return dia_matrix(A).asformat(format)
281
+
282
+
283
+ def identity(n, dtype='d', format=None):
284
+ """Identity matrix in sparse format
285
+
286
+ Returns an identity matrix with shape (n,n) using a given
287
+ sparse format and dtype. This differs from `eye_array` in
288
+ that it has a square shape with ones only on the main diagonal.
289
+ It is thus the multiplicative identity. `eye_array` allows
290
+ rectangular shapes and the diagonal can be offset from the main one.
291
+
292
+ .. warning::
293
+
294
+ This function returns a sparse matrix -- not a sparse array.
295
+ You are encouraged to use ``eye_array`` to take advantage
296
+ of the sparse array functionality.
297
+
298
+ Parameters
299
+ ----------
300
+ n : int
301
+ Shape of the identity matrix.
302
+ dtype : dtype, optional
303
+ Data type of the matrix
304
+ format : str, optional
305
+ Sparse format of the result, e.g., format="csr", etc.
306
+
307
+ Examples
308
+ --------
309
+ >>> import scipy as sp
310
+ >>> sp.sparse.identity(3).toarray()
311
+ array([[ 1., 0., 0.],
312
+ [ 0., 1., 0.],
313
+ [ 0., 0., 1.]])
314
+ >>> sp.sparse.identity(3, dtype='int8', format='dia')
315
+ <3x3 sparse matrix of type '<class 'numpy.int8'>'
316
+ with 3 stored elements (1 diagonals) in DIAgonal format>
317
+ >>> sp.sparse.eye_array(3, dtype='int8', format='dia')
318
+ <3x3 sparse array of type '<class 'numpy.int8'>'
319
+ with 3 stored elements (1 diagonals) in DIAgonal format>
320
+
321
+ """
322
+ return eye(n, n, dtype=dtype, format=format)
323
+
324
+
325
+ def eye_array(m, n=None, *, k=0, dtype=float, format=None):
326
+ """Identity matrix in sparse array format
327
+
328
+ Return a sparse array with ones on diagonal.
329
+ Specifically a sparse array (m x n) where the kth diagonal
330
+ is all ones and everything else is zeros.
331
+
332
+ Parameters
333
+ ----------
334
+ m : int or tuple of ints
335
+ Number of rows requested.
336
+ n : int, optional
337
+ Number of columns. Default: `m`.
338
+ k : int, optional
339
+ Diagonal to place ones on. Default: 0 (main diagonal).
340
+ dtype : dtype, optional
341
+ Data type of the array
342
+ format : str, optional (default: "dia")
343
+ Sparse format of the result, e.g., format="csr", etc.
344
+
345
+ Examples
346
+ --------
347
+ >>> import numpy as np
348
+ >>> import scipy as sp
349
+ >>> sp.sparse.eye_array(3).toarray()
350
+ array([[ 1., 0., 0.],
351
+ [ 0., 1., 0.],
352
+ [ 0., 0., 1.]])
353
+ >>> sp.sparse.eye_array(3, dtype=np.int8)
354
+ <3x3 sparse array of type '<class 'numpy.int8'>'
355
+ with 3 stored elements (1 diagonals) in DIAgonal format>
356
+
357
+ """
358
+ # TODO: delete next 15 lines [combine with _eye()] once spmatrix removed
359
+ return _eye(m, n, k, dtype, format)
360
+
361
+
362
+ def _eye(m, n, k, dtype, format, as_sparray=True):
363
+ if as_sparray:
364
+ csr_sparse = csr_array
365
+ csc_sparse = csc_array
366
+ coo_sparse = coo_array
367
+ diags_sparse = diags_array
368
+ else:
369
+ csr_sparse = csr_matrix
370
+ csc_sparse = csc_matrix
371
+ coo_sparse = coo_matrix
372
+ diags_sparse = diags
373
+
374
+ if n is None:
375
+ n = m
376
+ m, n = int(m), int(n)
377
+
378
+ if m == n and k == 0:
379
+ # fast branch for special formats
380
+ if format in ['csr', 'csc']:
381
+ idx_dtype = get_index_dtype(maxval=n)
382
+ indptr = np.arange(n+1, dtype=idx_dtype)
383
+ indices = np.arange(n, dtype=idx_dtype)
384
+ data = np.ones(n, dtype=dtype)
385
+ cls = {'csr': csr_sparse, 'csc': csc_sparse}[format]
386
+ return cls((data, indices, indptr), (n, n))
387
+
388
+ elif format == 'coo':
389
+ idx_dtype = get_index_dtype(maxval=n)
390
+ row = np.arange(n, dtype=idx_dtype)
391
+ col = np.arange(n, dtype=idx_dtype)
392
+ data = np.ones(n, dtype=dtype)
393
+ return coo_sparse((data, (row, col)), (n, n))
394
+
395
+ data = np.ones((1, max(0, min(m + k, n))), dtype=dtype)
396
+ return diags_sparse(data, offsets=[k], shape=(m, n), dtype=dtype).asformat(format)
397
+
398
+
399
+ def eye(m, n=None, k=0, dtype=float, format=None):
400
+ """Sparse matrix with ones on diagonal
401
+
402
+ Returns a sparse matrix (m x n) where the kth diagonal
403
+ is all ones and everything else is zeros.
404
+
405
+ Parameters
406
+ ----------
407
+ m : int
408
+ Number of rows in the matrix.
409
+ n : int, optional
410
+ Number of columns. Default: `m`.
411
+ k : int, optional
412
+ Diagonal to place ones on. Default: 0 (main diagonal).
413
+ dtype : dtype, optional
414
+ Data type of the matrix.
415
+ format : str, optional
416
+ Sparse format of the result, e.g., format="csr", etc.
417
+
418
+ .. warning::
419
+
420
+ This function returns a sparse matrix -- not a sparse array.
421
+ You are encouraged to use ``eye_array`` to take advantage
422
+ of the sparse array functionality.
423
+
424
+ Examples
425
+ --------
426
+ >>> import numpy as np
427
+ >>> import scipy as sp
428
+ >>> sp.sparse.eye(3).toarray()
429
+ array([[ 1., 0., 0.],
430
+ [ 0., 1., 0.],
431
+ [ 0., 0., 1.]])
432
+ >>> sp.sparse.eye(3, dtype=np.int8)
433
+ <3x3 sparse matrix of type '<class 'numpy.int8'>'
434
+ with 3 stored elements (1 diagonals) in DIAgonal format>
435
+
436
+ """
437
+ return _eye(m, n, k, dtype, format, False)
438
+
439
+
440
+ def kron(A, B, format=None):
441
+ """kronecker product of sparse matrices A and B
442
+
443
+ Parameters
444
+ ----------
445
+ A : sparse or dense matrix
446
+ first matrix of the product
447
+ B : sparse or dense matrix
448
+ second matrix of the product
449
+ format : str, optional (default: 'bsr' or 'coo')
450
+ format of the result (e.g. "csr")
451
+ If None, choose 'bsr' for relatively dense array and 'coo' for others
452
+
453
+ Returns
454
+ -------
455
+ kronecker product in a sparse format.
456
+ Returns a sparse matrix unless either A or B is a
457
+ sparse array in which case returns a sparse array.
458
+
459
+ Examples
460
+ --------
461
+ >>> import numpy as np
462
+ >>> import scipy as sp
463
+ >>> A = sp.sparse.csr_array(np.array([[0, 2], [5, 0]]))
464
+ >>> B = sp.sparse.csr_array(np.array([[1, 2], [3, 4]]))
465
+ >>> sp.sparse.kron(A, B).toarray()
466
+ array([[ 0, 0, 2, 4],
467
+ [ 0, 0, 6, 8],
468
+ [ 5, 10, 0, 0],
469
+ [15, 20, 0, 0]])
470
+
471
+ >>> sp.sparse.kron(A, [[1, 2], [3, 4]]).toarray()
472
+ array([[ 0, 0, 2, 4],
473
+ [ 0, 0, 6, 8],
474
+ [ 5, 10, 0, 0],
475
+ [15, 20, 0, 0]])
476
+
477
+ """
478
+ # TODO: delete next 10 lines and replace _sparse with _array when spmatrix removed
479
+ if isinstance(A, sparray) or isinstance(B, sparray):
480
+ # convert to local variables
481
+ bsr_sparse = bsr_array
482
+ csr_sparse = csr_array
483
+ coo_sparse = coo_array
484
+ else: # use spmatrix
485
+ bsr_sparse = bsr_matrix
486
+ csr_sparse = csr_matrix
487
+ coo_sparse = coo_matrix
488
+
489
+ B = coo_sparse(B)
490
+
491
+ # B is fairly dense, use BSR
492
+ if (format is None or format == "bsr") and 2*B.nnz >= B.shape[0] * B.shape[1]:
493
+ A = csr_sparse(A,copy=True)
494
+ output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1])
495
+
496
+ if A.nnz == 0 or B.nnz == 0:
497
+ # kronecker product is the zero matrix
498
+ return coo_sparse(output_shape).asformat(format)
499
+
500
+ B = B.toarray()
501
+ data = A.data.repeat(B.size).reshape(-1,B.shape[0],B.shape[1])
502
+ data = data * B
503
+
504
+ return bsr_sparse((data,A.indices,A.indptr), shape=output_shape)
505
+ else:
506
+ # use COO
507
+ A = coo_sparse(A)
508
+ output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1])
509
+
510
+ if A.nnz == 0 or B.nnz == 0:
511
+ # kronecker product is the zero matrix
512
+ return coo_sparse(output_shape).asformat(format)
513
+
514
+ # expand entries of a into blocks
515
+ row = A.row.repeat(B.nnz)
516
+ col = A.col.repeat(B.nnz)
517
+ data = A.data.repeat(B.nnz)
518
+
519
+ if max(A.shape[0]*B.shape[0], A.shape[1]*B.shape[1]) > np.iinfo('int32').max:
520
+ row = row.astype(np.int64)
521
+ col = col.astype(np.int64)
522
+
523
+ row *= B.shape[0]
524
+ col *= B.shape[1]
525
+
526
+ # increment block indices
527
+ row,col = row.reshape(-1,B.nnz),col.reshape(-1,B.nnz)
528
+ row += B.row
529
+ col += B.col
530
+ row,col = row.reshape(-1),col.reshape(-1)
531
+
532
+ # compute block entries
533
+ data = data.reshape(-1,B.nnz) * B.data
534
+ data = data.reshape(-1)
535
+
536
+ return coo_sparse((data,(row,col)), shape=output_shape).asformat(format)
537
+
538
+
539
+ def kronsum(A, B, format=None):
540
+ """kronecker sum of square sparse matrices A and B
541
+
542
+ Kronecker sum of two sparse matrices is a sum of two Kronecker
543
+ products kron(I_n,A) + kron(B,I_m) where A has shape (m,m)
544
+ and B has shape (n,n) and I_m and I_n are identity matrices
545
+ of shape (m,m) and (n,n), respectively.
546
+
547
+ Parameters
548
+ ----------
549
+ A
550
+ square matrix
551
+ B
552
+ square matrix
553
+ format : str
554
+ format of the result (e.g. "csr")
555
+
556
+ Returns
557
+ -------
558
+ kronecker sum in a sparse matrix format
559
+
560
+ """
561
+ # TODO: delete next 8 lines and replace _sparse with _array when spmatrix removed
562
+ if isinstance(A, sparray) or isinstance(B, sparray):
563
+ # convert to local variables
564
+ coo_sparse = coo_array
565
+ identity_sparse = eye_array
566
+ else:
567
+ coo_sparse = coo_matrix
568
+ identity_sparse = identity
569
+
570
+ A = coo_sparse(A)
571
+ B = coo_sparse(B)
572
+
573
+ if A.shape[0] != A.shape[1]:
574
+ raise ValueError('A is not square')
575
+
576
+ if B.shape[0] != B.shape[1]:
577
+ raise ValueError('B is not square')
578
+
579
+ dtype = upcast(A.dtype, B.dtype)
580
+
581
+ I_n = identity_sparse(A.shape[0], dtype=dtype)
582
+ I_m = identity_sparse(B.shape[0], dtype=dtype)
583
+ L = kron(I_m, A, format='coo')
584
+ R = kron(B, I_n, format='coo')
585
+
586
+ return (L + R).asformat(format)
587
+
588
+
589
+ def _compressed_sparse_stack(blocks, axis, return_spmatrix):
590
+ """
591
+ Stacking fast path for CSR/CSC matrices or arrays
592
+ (i) vstack for CSR, (ii) hstack for CSC.
593
+ """
594
+ other_axis = 1 if axis == 0 else 0
595
+ data = np.concatenate([b.data for b in blocks])
596
+ constant_dim = blocks[0].shape[other_axis]
597
+ idx_dtype = get_index_dtype(arrays=[b.indptr for b in blocks],
598
+ maxval=max(data.size, constant_dim))
599
+ indices = np.empty(data.size, dtype=idx_dtype)
600
+ indptr = np.empty(sum(b.shape[axis] for b in blocks) + 1, dtype=idx_dtype)
601
+ last_indptr = idx_dtype(0)
602
+ sum_dim = 0
603
+ sum_indices = 0
604
+ for b in blocks:
605
+ if b.shape[other_axis] != constant_dim:
606
+ raise ValueError(f'incompatible dimensions for axis {other_axis}')
607
+ indices[sum_indices:sum_indices+b.indices.size] = b.indices
608
+ sum_indices += b.indices.size
609
+ idxs = slice(sum_dim, sum_dim + b.shape[axis])
610
+ indptr[idxs] = b.indptr[:-1]
611
+ indptr[idxs] += last_indptr
612
+ sum_dim += b.shape[axis]
613
+ last_indptr += b.indptr[-1]
614
+ indptr[-1] = last_indptr
615
+ # TODO remove this if-structure when sparse matrices removed
616
+ if return_spmatrix:
617
+ if axis == 0:
618
+ return csr_matrix((data, indices, indptr),
619
+ shape=(sum_dim, constant_dim))
620
+ else:
621
+ return csc_matrix((data, indices, indptr),
622
+ shape=(constant_dim, sum_dim))
623
+
624
+ if axis == 0:
625
+ return csr_array((data, indices, indptr),
626
+ shape=(sum_dim, constant_dim))
627
+ else:
628
+ return csc_array((data, indices, indptr),
629
+ shape=(constant_dim, sum_dim))
630
+
631
+
632
+ def _stack_along_minor_axis(blocks, axis):
633
+ """
634
+ Stacking fast path for CSR/CSC matrices along the minor axis
635
+ (i) hstack for CSR, (ii) vstack for CSC.
636
+ """
637
+ n_blocks = len(blocks)
638
+ if n_blocks == 0:
639
+ raise ValueError('Missing block matrices')
640
+
641
+ if n_blocks == 1:
642
+ return blocks[0]
643
+
644
+ # check for incompatible dimensions
645
+ other_axis = 1 if axis == 0 else 0
646
+ other_axis_dims = {b.shape[other_axis] for b in blocks}
647
+ if len(other_axis_dims) > 1:
648
+ raise ValueError(f'Mismatching dimensions along axis {other_axis}: '
649
+ f'{other_axis_dims}')
650
+ constant_dim, = other_axis_dims
651
+
652
+ # Do the stacking
653
+ indptr_list = [b.indptr for b in blocks]
654
+ data_cat = np.concatenate([b.data for b in blocks])
655
+
656
+ # Need to check if any indices/indptr, would be too large post-
657
+ # concatenation for np.int32:
658
+ # - The max value of indices is the output array's stacking-axis length - 1
659
+ # - The max value in indptr is the number of non-zero entries. This is
660
+ # exceedingly unlikely to require int64, but is checked out of an
661
+ # abundance of caution.
662
+ sum_dim = sum(b.shape[axis] for b in blocks)
663
+ nnz = sum(len(b.indices) for b in blocks)
664
+ idx_dtype = get_index_dtype(maxval=max(sum_dim - 1, nnz))
665
+ stack_dim_cat = np.array([b.shape[axis] for b in blocks], dtype=idx_dtype)
666
+ if data_cat.size > 0:
667
+ indptr_cat = np.concatenate(indptr_list).astype(idx_dtype)
668
+ indices_cat = (np.concatenate([b.indices for b in blocks])
669
+ .astype(idx_dtype))
670
+ indptr = np.empty(constant_dim + 1, dtype=idx_dtype)
671
+ indices = np.empty_like(indices_cat)
672
+ data = np.empty_like(data_cat)
673
+ csr_hstack(n_blocks, constant_dim, stack_dim_cat,
674
+ indptr_cat, indices_cat, data_cat,
675
+ indptr, indices, data)
676
+ else:
677
+ indptr = np.zeros(constant_dim + 1, dtype=idx_dtype)
678
+ indices = np.empty(0, dtype=idx_dtype)
679
+ data = np.empty(0, dtype=data_cat.dtype)
680
+
681
+ if axis == 0:
682
+ return blocks[0]._csc_container((data, indices, indptr),
683
+ shape=(sum_dim, constant_dim))
684
+ else:
685
+ return blocks[0]._csr_container((data, indices, indptr),
686
+ shape=(constant_dim, sum_dim))
687
+
688
+
689
+ def hstack(blocks, format=None, dtype=None):
690
+ """
691
+ Stack sparse matrices horizontally (column wise)
692
+
693
+ Parameters
694
+ ----------
695
+ blocks
696
+ sequence of sparse matrices with compatible shapes
697
+ format : str
698
+ sparse format of the result (e.g., "csr")
699
+ by default an appropriate sparse matrix format is returned.
700
+ This choice is subject to change.
701
+ dtype : dtype, optional
702
+ The data-type of the output matrix. If not given, the dtype is
703
+ determined from that of `blocks`.
704
+
705
+ Returns
706
+ -------
707
+ new_array : sparse matrix or array
708
+ If any block in blocks is a sparse array, return a sparse array.
709
+ Otherwise return a sparse matrix.
710
+
711
+ If you want a sparse array built from blocks that are not sparse
712
+ arrays, use `block(hstack(blocks))` or convert one block
713
+ e.g. `blocks[0] = csr_array(blocks[0])`.
714
+
715
+ See Also
716
+ --------
717
+ vstack : stack sparse matrices vertically (row wise)
718
+
719
+ Examples
720
+ --------
721
+ >>> from scipy.sparse import coo_matrix, hstack
722
+ >>> A = coo_matrix([[1, 2], [3, 4]])
723
+ >>> B = coo_matrix([[5], [6]])
724
+ >>> hstack([A,B]).toarray()
725
+ array([[1, 2, 5],
726
+ [3, 4, 6]])
727
+
728
+ """
729
+ blocks = np.asarray(blocks, dtype='object')
730
+ if any(isinstance(b, sparray) for b in blocks.flat):
731
+ return _block([blocks], format, dtype)
732
+ else:
733
+ return _block([blocks], format, dtype, return_spmatrix=True)
734
+
735
+
736
+ def vstack(blocks, format=None, dtype=None):
737
+ """
738
+ Stack sparse arrays vertically (row wise)
739
+
740
+ Parameters
741
+ ----------
742
+ blocks
743
+ sequence of sparse arrays with compatible shapes
744
+ format : str, optional
745
+ sparse format of the result (e.g., "csr")
746
+ by default an appropriate sparse array format is returned.
747
+ This choice is subject to change.
748
+ dtype : dtype, optional
749
+ The data-type of the output array. If not given, the dtype is
750
+ determined from that of `blocks`.
751
+
752
+ Returns
753
+ -------
754
+ new_array : sparse matrix or array
755
+ If any block in blocks is a sparse array, return a sparse array.
756
+ Otherwise return a sparse matrix.
757
+
758
+ If you want a sparse array built from blocks that are not sparse
759
+ arrays, use `block(vstack(blocks))` or convert one block
760
+ e.g. `blocks[0] = csr_array(blocks[0])`.
761
+
762
+ See Also
763
+ --------
764
+ hstack : stack sparse matrices horizontally (column wise)
765
+
766
+ Examples
767
+ --------
768
+ >>> from scipy.sparse import coo_array, vstack
769
+ >>> A = coo_array([[1, 2], [3, 4]])
770
+ >>> B = coo_array([[5, 6]])
771
+ >>> vstack([A, B]).toarray()
772
+ array([[1, 2],
773
+ [3, 4],
774
+ [5, 6]])
775
+
776
+ """
777
+ blocks = np.asarray(blocks, dtype='object')
778
+ if any(isinstance(b, sparray) for b in blocks.flat):
779
+ return _block([[b] for b in blocks], format, dtype)
780
+ else:
781
+ return _block([[b] for b in blocks], format, dtype, return_spmatrix=True)
782
+
783
+
784
+ def bmat(blocks, format=None, dtype=None):
785
+ """
786
+ Build a sparse array or matrix from sparse sub-blocks
787
+
788
+ Note: `block_array` is preferred over `bmat`. They are the same function
789
+ except that `bmat` can return a deprecated sparse matrix.
790
+ `bmat` returns a coo_matrix if none of the inputs are a sparse array.
791
+
792
+ .. warning::
793
+
794
+ This function returns a sparse matrix -- not a sparse array.
795
+ You are encouraged to use ``block_array`` to take advantage
796
+ of the sparse array functionality.
797
+
798
+ Parameters
799
+ ----------
800
+ blocks : array_like
801
+ Grid of sparse matrices with compatible shapes.
802
+ An entry of None implies an all-zero matrix.
803
+ format : {'bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil'}, optional
804
+ The sparse format of the result (e.g. "csr"). By default an
805
+ appropriate sparse matrix format is returned.
806
+ This choice is subject to change.
807
+ dtype : dtype, optional
808
+ The data-type of the output matrix. If not given, the dtype is
809
+ determined from that of `blocks`.
810
+
811
+ Returns
812
+ -------
813
+ bmat : sparse matrix or array
814
+ If any block in blocks is a sparse array, return a sparse array.
815
+ Otherwise return a sparse matrix.
816
+
817
+ If you want a sparse array built from blocks that are not sparse
818
+ arrays, use `block_array()`.
819
+
820
+ See Also
821
+ --------
822
+ block_array
823
+
824
+ Examples
825
+ --------
826
+ >>> from scipy.sparse import coo_array, bmat
827
+ >>> A = coo_array([[1, 2], [3, 4]])
828
+ >>> B = coo_array([[5], [6]])
829
+ >>> C = coo_array([[7]])
830
+ >>> bmat([[A, B], [None, C]]).toarray()
831
+ array([[1, 2, 5],
832
+ [3, 4, 6],
833
+ [0, 0, 7]])
834
+
835
+ >>> bmat([[A, None], [None, C]]).toarray()
836
+ array([[1, 2, 0],
837
+ [3, 4, 0],
838
+ [0, 0, 7]])
839
+
840
+ """
841
+ blocks = np.asarray(blocks, dtype='object')
842
+ if any(isinstance(b, sparray) for b in blocks.flat):
843
+ return _block(blocks, format, dtype)
844
+ else:
845
+ return _block(blocks, format, dtype, return_spmatrix=True)
846
+
847
+
848
+ def block_array(blocks, *, format=None, dtype=None):
849
+ """
850
+ Build a sparse array from sparse sub-blocks
851
+
852
+ Parameters
853
+ ----------
854
+ blocks : array_like
855
+ Grid of sparse arrays with compatible shapes.
856
+ An entry of None implies an all-zero array.
857
+ format : {'bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil'}, optional
858
+ The sparse format of the result (e.g. "csr"). By default an
859
+ appropriate sparse array format is returned.
860
+ This choice is subject to change.
861
+ dtype : dtype, optional
862
+ The data-type of the output array. If not given, the dtype is
863
+ determined from that of `blocks`.
864
+
865
+ Returns
866
+ -------
867
+ block : sparse array
868
+
869
+ See Also
870
+ --------
871
+ block_diag : specify blocks along the main diagonals
872
+ diags : specify (possibly offset) diagonals
873
+
874
+ Examples
875
+ --------
876
+ >>> from scipy.sparse import coo_array, block_array
877
+ >>> A = coo_array([[1, 2], [3, 4]])
878
+ >>> B = coo_array([[5], [6]])
879
+ >>> C = coo_array([[7]])
880
+ >>> block_array([[A, B], [None, C]]).toarray()
881
+ array([[1, 2, 5],
882
+ [3, 4, 6],
883
+ [0, 0, 7]])
884
+
885
+ >>> block_array([[A, None], [None, C]]).toarray()
886
+ array([[1, 2, 0],
887
+ [3, 4, 0],
888
+ [0, 0, 7]])
889
+
890
+ """
891
+ return _block(blocks, format, dtype)
892
+
893
+
894
+ def _block(blocks, format, dtype, return_spmatrix=False):
895
+ blocks = np.asarray(blocks, dtype='object')
896
+
897
+ if blocks.ndim != 2:
898
+ raise ValueError('blocks must be 2-D')
899
+
900
+ M,N = blocks.shape
901
+
902
+ # check for fast path cases
903
+ if (format in (None, 'csr') and
904
+ all(issparse(b) and b.format == 'csr' for b in blocks.flat)
905
+ ):
906
+ if N > 1:
907
+ # stack along columns (axis 1): must have shape (M, 1)
908
+ blocks = [[_stack_along_minor_axis(blocks[b, :], 1)] for b in range(M)]
909
+ blocks = np.asarray(blocks, dtype='object')
910
+
911
+ # stack along rows (axis 0):
912
+ A = _compressed_sparse_stack(blocks[:, 0], 0, return_spmatrix)
913
+ if dtype is not None:
914
+ A = A.astype(dtype)
915
+ return A
916
+ elif (format in (None, 'csc') and
917
+ all(issparse(b) and b.format == 'csc' for b in blocks.flat)
918
+ ):
919
+ if M > 1:
920
+ # stack along rows (axis 0): must have shape (1, N)
921
+ blocks = [[_stack_along_minor_axis(blocks[:, b], 0) for b in range(N)]]
922
+ blocks = np.asarray(blocks, dtype='object')
923
+
924
+ # stack along columns (axis 1):
925
+ A = _compressed_sparse_stack(blocks[0, :], 1, return_spmatrix)
926
+ if dtype is not None:
927
+ A = A.astype(dtype)
928
+ return A
929
+
930
+ block_mask = np.zeros(blocks.shape, dtype=bool)
931
+ brow_lengths = np.zeros(M, dtype=np.int64)
932
+ bcol_lengths = np.zeros(N, dtype=np.int64)
933
+
934
+ # convert everything to COO format
935
+ for i in range(M):
936
+ for j in range(N):
937
+ if blocks[i,j] is not None:
938
+ A = coo_array(blocks[i,j])
939
+ blocks[i,j] = A
940
+ block_mask[i,j] = True
941
+
942
+ if brow_lengths[i] == 0:
943
+ brow_lengths[i] = A.shape[0]
944
+ elif brow_lengths[i] != A.shape[0]:
945
+ msg = (f'blocks[{i},:] has incompatible row dimensions. '
946
+ f'Got blocks[{i},{j}].shape[0] == {A.shape[0]}, '
947
+ f'expected {brow_lengths[i]}.')
948
+ raise ValueError(msg)
949
+
950
+ if bcol_lengths[j] == 0:
951
+ bcol_lengths[j] = A.shape[1]
952
+ elif bcol_lengths[j] != A.shape[1]:
953
+ msg = (f'blocks[:,{j}] has incompatible column '
954
+ f'dimensions. '
955
+ f'Got blocks[{i},{j}].shape[1] == {A.shape[1]}, '
956
+ f'expected {bcol_lengths[j]}.')
957
+ raise ValueError(msg)
958
+
959
+ nnz = sum(block.nnz for block in blocks[block_mask])
960
+ if dtype is None:
961
+ all_dtypes = [blk.dtype for blk in blocks[block_mask]]
962
+ dtype = upcast(*all_dtypes) if all_dtypes else None
963
+
964
+ row_offsets = np.append(0, np.cumsum(brow_lengths))
965
+ col_offsets = np.append(0, np.cumsum(bcol_lengths))
966
+
967
+ shape = (row_offsets[-1], col_offsets[-1])
968
+
969
+ data = np.empty(nnz, dtype=dtype)
970
+ idx_dtype = get_index_dtype(maxval=max(shape))
971
+ row = np.empty(nnz, dtype=idx_dtype)
972
+ col = np.empty(nnz, dtype=idx_dtype)
973
+
974
+ nnz = 0
975
+ ii, jj = np.nonzero(block_mask)
976
+ for i, j in zip(ii, jj):
977
+ B = blocks[i, j]
978
+ idx = slice(nnz, nnz + B.nnz)
979
+ data[idx] = B.data
980
+ np.add(B.row, row_offsets[i], out=row[idx], dtype=idx_dtype)
981
+ np.add(B.col, col_offsets[j], out=col[idx], dtype=idx_dtype)
982
+ nnz += B.nnz
983
+
984
+ if return_spmatrix:
985
+ return coo_matrix((data, (row, col)), shape=shape).asformat(format)
986
+ return coo_array((data, (row, col)), shape=shape).asformat(format)
987
+
988
+
989
+ def block_diag(mats, format=None, dtype=None):
990
+ """
991
+ Build a block diagonal sparse matrix or array from provided matrices.
992
+
993
+ Parameters
994
+ ----------
995
+ mats : sequence of matrices or arrays
996
+ Input matrices or arrays.
997
+ format : str, optional
998
+ The sparse format of the result (e.g., "csr"). If not given, the result
999
+ is returned in "coo" format.
1000
+ dtype : dtype specifier, optional
1001
+ The data-type of the output. If not given, the dtype is
1002
+ determined from that of `blocks`.
1003
+
1004
+ Returns
1005
+ -------
1006
+ res : sparse matrix or array
1007
+ If at least one input is a sparse array, the output is a sparse array.
1008
+ Otherwise the output is a sparse matrix.
1009
+
1010
+ Notes
1011
+ -----
1012
+
1013
+ .. versionadded:: 0.11.0
1014
+
1015
+ See Also
1016
+ --------
1017
+ block_array
1018
+ diags_array
1019
+
1020
+ Examples
1021
+ --------
1022
+ >>> from scipy.sparse import coo_array, block_diag
1023
+ >>> A = coo_array([[1, 2], [3, 4]])
1024
+ >>> B = coo_array([[5], [6]])
1025
+ >>> C = coo_array([[7]])
1026
+ >>> block_diag((A, B, C)).toarray()
1027
+ array([[1, 2, 0, 0],
1028
+ [3, 4, 0, 0],
1029
+ [0, 0, 5, 0],
1030
+ [0, 0, 6, 0],
1031
+ [0, 0, 0, 7]])
1032
+
1033
+ """
1034
+ if any(isinstance(a, sparray) for a in mats):
1035
+ container = coo_array
1036
+ else:
1037
+ container = coo_matrix
1038
+
1039
+ row = []
1040
+ col = []
1041
+ data = []
1042
+ r_idx = 0
1043
+ c_idx = 0
1044
+ for a in mats:
1045
+ if isinstance(a, (list, numbers.Number)):
1046
+ a = coo_array(np.atleast_2d(a))
1047
+ if issparse(a):
1048
+ a = a.tocoo()
1049
+ nrows, ncols = a._shape_as_2d
1050
+ row.append(a.row + r_idx)
1051
+ col.append(a.col + c_idx)
1052
+ data.append(a.data)
1053
+ else:
1054
+ nrows, ncols = a.shape
1055
+ a_row, a_col = np.divmod(np.arange(nrows*ncols), ncols)
1056
+ row.append(a_row + r_idx)
1057
+ col.append(a_col + c_idx)
1058
+ data.append(a.ravel())
1059
+ r_idx += nrows
1060
+ c_idx += ncols
1061
+ row = np.concatenate(row)
1062
+ col = np.concatenate(col)
1063
+ data = np.concatenate(data)
1064
+ return container((data, (row, col)),
1065
+ shape=(r_idx, c_idx),
1066
+ dtype=dtype).asformat(format)
1067
+
1068
+
1069
+ def random_array(shape, *, density=0.01, format='coo', dtype=None,
1070
+ random_state=None, data_sampler=None):
1071
+ """Return a sparse array of uniformly random numbers in [0, 1)
1072
+
1073
+ Returns a sparse array with the given shape and density
1074
+ where values are generated uniformly randomly in the range [0, 1).
1075
+
1076
+ .. warning::
1077
+
1078
+ Since numpy 1.17, passing a ``np.random.Generator`` (e.g.
1079
+ ``np.random.default_rng``) for ``random_state`` will lead to much
1080
+ faster execution times.
1081
+
1082
+ A much slower implementation is used by default for backwards
1083
+ compatibility.
1084
+
1085
+ Parameters
1086
+ ----------
1087
+ shape : int or tuple of ints
1088
+ shape of the array
1089
+ density : real, optional (default: 0.01)
1090
+ density of the generated matrix: density equal to one means a full
1091
+ matrix, density of 0 means a matrix with no non-zero items.
1092
+ format : str, optional (default: 'coo')
1093
+ sparse matrix format.
1094
+ dtype : dtype, optional (default: np.float64)
1095
+ type of the returned matrix values.
1096
+ random_state : {None, int, `Generator`, `RandomState`}, optional
1097
+ A random number generator to determine nonzero structure. We recommend using
1098
+ a `numpy.random.Generator` manually provided for every call as it is much
1099
+ faster than RandomState.
1100
+
1101
+ - If `None` (or `np.random`), the `numpy.random.RandomState`
1102
+ singleton is used.
1103
+ - If an int, a new ``Generator`` instance is used,
1104
+ seeded with the int.
1105
+ - If a ``Generator`` or ``RandomState`` instance then
1106
+ that instance is used.
1107
+
1108
+ This random state will be used for sampling `indices` (the sparsity
1109
+ structure), and by default for the data values too (see `data_sampler`).
1110
+
1111
+ data_sampler : callable, optional (default depends on dtype)
1112
+ Sampler of random data values with keyword arg `size`.
1113
+ This function should take a single keyword argument `size` specifying
1114
+ the length of its returned ndarray. It is used to generate the nonzero
1115
+ values in the matrix after the locations of those values are chosen.
1116
+ By default, uniform [0, 1) random values are used unless `dtype` is
1117
+ an integer (default uniform integers from that dtype) or
1118
+ complex (default uniform over the unit square in the complex plane).
1119
+ For these, the `random_state` rng is used e.g. `rng.uniform(size=size)`.
1120
+
1121
+ Returns
1122
+ -------
1123
+ res : sparse array
1124
+
1125
+ Examples
1126
+ --------
1127
+
1128
+ Passing a ``np.random.Generator`` instance for better performance:
1129
+
1130
+ >>> import numpy as np
1131
+ >>> import scipy as sp
1132
+ >>> rng = np.random.default_rng()
1133
+
1134
+ Default sampling uniformly from [0, 1):
1135
+
1136
+ >>> S = sp.sparse.random_array((3, 4), density=0.25, random_state=rng)
1137
+
1138
+ Providing a sampler for the values:
1139
+
1140
+ >>> rvs = sp.stats.poisson(25, loc=10).rvs
1141
+ >>> S = sp.sparse.random_array((3, 4), density=0.25,
1142
+ ... random_state=rng, data_sampler=rvs)
1143
+ >>> S.toarray()
1144
+ array([[ 36., 0., 33., 0.], # random
1145
+ [ 0., 0., 0., 0.],
1146
+ [ 0., 0., 36., 0.]])
1147
+
1148
+ Building a custom distribution.
1149
+ This example builds a squared normal from np.random:
1150
+
1151
+ >>> def np_normal_squared(size=None, random_state=rng):
1152
+ ... return random_state.standard_normal(size) ** 2
1153
+ >>> S = sp.sparse.random_array((3, 4), density=0.25, random_state=rng,
1154
+ ... data_sampler=np_normal_squared)
1155
+
1156
+ Or we can build it from sp.stats style rvs functions:
1157
+
1158
+ >>> def sp_stats_normal_squared(size=None, random_state=rng):
1159
+ ... std_normal = sp.stats.distributions.norm_gen().rvs
1160
+ ... return std_normal(size=size, random_state=random_state) ** 2
1161
+ >>> S = sp.sparse.random_array((3, 4), density=0.25, random_state=rng,
1162
+ ... data_sampler=sp_stats_normal_squared)
1163
+
1164
+ Or we can subclass sp.stats rv_continous or rv_discrete:
1165
+
1166
+ >>> class NormalSquared(sp.stats.rv_continuous):
1167
+ ... def _rvs(self, size=None, random_state=rng):
1168
+ ... return random_state.standard_normal(size) ** 2
1169
+ >>> X = NormalSquared()
1170
+ >>> Y = X().rvs
1171
+ >>> S = sp.sparse.random_array((3, 4), density=0.25,
1172
+ ... random_state=rng, data_sampler=Y)
1173
+ """
1174
+ # Use the more efficient RNG by default.
1175
+ if random_state is None:
1176
+ random_state = np.random.default_rng()
1177
+ data, ind = _random(shape, density, format, dtype, random_state, data_sampler)
1178
+ return coo_array((data, ind), shape=shape).asformat(format)
1179
+
1180
+
1181
+ def _random(shape, density=0.01, format=None, dtype=None,
1182
+ random_state=None, data_sampler=None):
1183
+ if density < 0 or density > 1:
1184
+ raise ValueError("density expected to be 0 <= density <= 1")
1185
+
1186
+ tot_prod = math.prod(shape) # use `math` for when prod is >= 2**64
1187
+
1188
+ # Number of non zero values
1189
+ size = int(round(density * tot_prod))
1190
+
1191
+ rng = check_random_state(random_state)
1192
+
1193
+ if data_sampler is None:
1194
+ if np.issubdtype(dtype, np.integer):
1195
+ def data_sampler(size):
1196
+ return rng_integers(rng,
1197
+ np.iinfo(dtype).min,
1198
+ np.iinfo(dtype).max,
1199
+ size,
1200
+ dtype=dtype)
1201
+ elif np.issubdtype(dtype, np.complexfloating):
1202
+ def data_sampler(size):
1203
+ return (rng.uniform(size=size) +
1204
+ rng.uniform(size=size) * 1j)
1205
+ else:
1206
+ data_sampler = rng.uniform
1207
+
1208
+ # rng.choice uses int64 if first arg is an int
1209
+ if tot_prod < np.iinfo(np.int64).max:
1210
+ raveled_ind = rng.choice(tot_prod, size=size, replace=False)
1211
+ ind = np.unravel_index(raveled_ind, shape=shape, order='F')
1212
+ else:
1213
+ # for ravel indices bigger than dtype max, use sets to remove duplicates
1214
+ ndim = len(shape)
1215
+ seen = set()
1216
+ while len(seen) < size:
1217
+ dsize = size - len(seen)
1218
+ seen.update(map(tuple, rng_integers(rng, shape, size=(dsize, ndim))))
1219
+ ind = tuple(np.array(list(seen)).T)
1220
+
1221
+ # size kwarg allows eg data_sampler=partial(np.random.poisson, lam=5)
1222
+ vals = data_sampler(size=size).astype(dtype, copy=False)
1223
+ return vals, ind
1224
+
1225
+
1226
+ def random(m, n, density=0.01, format='coo', dtype=None,
1227
+ random_state=None, data_rvs=None):
1228
+ """Generate a sparse matrix of the given shape and density with randomly
1229
+ distributed values.
1230
+
1231
+ .. warning::
1232
+
1233
+ Since numpy 1.17, passing a ``np.random.Generator`` (e.g.
1234
+ ``np.random.default_rng``) for ``random_state`` will lead to much
1235
+ faster execution times.
1236
+
1237
+ A much slower implementation is used by default for backwards
1238
+ compatibility.
1239
+
1240
+ .. warning::
1241
+
1242
+ This function returns a sparse matrix -- not a sparse array.
1243
+ You are encouraged to use ``random_array`` to take advantage of the
1244
+ sparse array functionality.
1245
+
1246
+ Parameters
1247
+ ----------
1248
+ m, n : int
1249
+ shape of the matrix
1250
+ density : real, optional
1251
+ density of the generated matrix: density equal to one means a full
1252
+ matrix, density of 0 means a matrix with no non-zero items.
1253
+ format : str, optional
1254
+ sparse matrix format.
1255
+ dtype : dtype, optional
1256
+ type of the returned matrix values.
1257
+ random_state : {None, int, `numpy.random.Generator`,
1258
+ `numpy.random.RandomState`}, optional
1259
+
1260
+ - If `seed` is None (or `np.random`), the `numpy.random.RandomState`
1261
+ singleton is used.
1262
+ - If `seed` is an int, a new ``RandomState`` instance is used,
1263
+ seeded with `seed`.
1264
+ - If `seed` is already a ``Generator`` or ``RandomState`` instance then
1265
+ that instance is used.
1266
+
1267
+ This random state will be used for sampling the sparsity structure, but
1268
+ not necessarily for sampling the values of the structurally nonzero
1269
+ entries of the matrix.
1270
+ data_rvs : callable, optional
1271
+ Samples a requested number of random values.
1272
+ This function should take a single argument specifying the length
1273
+ of the ndarray that it will return. The structurally nonzero entries
1274
+ of the sparse random matrix will be taken from the array sampled
1275
+ by this function. By default, uniform [0, 1) random values will be
1276
+ sampled using the same random state as is used for sampling
1277
+ the sparsity structure.
1278
+
1279
+ Returns
1280
+ -------
1281
+ res : sparse matrix
1282
+
1283
+ See Also
1284
+ --------
1285
+ random_array : constructs sparse arrays instead of sparse matrices
1286
+
1287
+ Examples
1288
+ --------
1289
+
1290
+ Passing a ``np.random.Generator`` instance for better performance:
1291
+
1292
+ >>> import scipy as sp
1293
+ >>> import numpy as np
1294
+ >>> rng = np.random.default_rng()
1295
+ >>> S = sp.sparse.random(3, 4, density=0.25, random_state=rng)
1296
+
1297
+ Providing a sampler for the values:
1298
+
1299
+ >>> rvs = sp.stats.poisson(25, loc=10).rvs
1300
+ >>> S = sp.sparse.random(3, 4, density=0.25, random_state=rng, data_rvs=rvs)
1301
+ >>> S.toarray()
1302
+ array([[ 36., 0., 33., 0.], # random
1303
+ [ 0., 0., 0., 0.],
1304
+ [ 0., 0., 36., 0.]])
1305
+
1306
+ Building a custom distribution.
1307
+ This example builds a squared normal from np.random:
1308
+
1309
+ >>> def np_normal_squared(size=None, random_state=rng):
1310
+ ... return random_state.standard_normal(size) ** 2
1311
+ >>> S = sp.sparse.random(3, 4, density=0.25, random_state=rng,
1312
+ ... data_rvs=np_normal_squared)
1313
+
1314
+ Or we can build it from sp.stats style rvs functions:
1315
+
1316
+ >>> def sp_stats_normal_squared(size=None, random_state=rng):
1317
+ ... std_normal = sp.stats.distributions.norm_gen().rvs
1318
+ ... return std_normal(size=size, random_state=random_state) ** 2
1319
+ >>> S = sp.sparse.random(3, 4, density=0.25, random_state=rng,
1320
+ ... data_rvs=sp_stats_normal_squared)
1321
+
1322
+ Or we can subclass sp.stats rv_continous or rv_discrete:
1323
+
1324
+ >>> class NormalSquared(sp.stats.rv_continuous):
1325
+ ... def _rvs(self, size=None, random_state=rng):
1326
+ ... return random_state.standard_normal(size) ** 2
1327
+ >>> X = NormalSquared()
1328
+ >>> Y = X() # get a frozen version of the distribution
1329
+ >>> S = sp.sparse.random(3, 4, density=0.25, random_state=rng, data_rvs=Y.rvs)
1330
+ """
1331
+ if n is None:
1332
+ n = m
1333
+ m, n = int(m), int(n)
1334
+ # make keyword syntax work for data_rvs e.g. data_rvs(size=7)
1335
+ if data_rvs is not None:
1336
+ def data_rvs_kw(size):
1337
+ return data_rvs(size)
1338
+ else:
1339
+ data_rvs_kw = None
1340
+ vals, ind = _random((m, n), density, format, dtype, random_state, data_rvs_kw)
1341
+ return coo_matrix((vals, ind), shape=(m, n)).asformat(format)
1342
+
1343
+
1344
+ def rand(m, n, density=0.01, format="coo", dtype=None, random_state=None):
1345
+ """Generate a sparse matrix of the given shape and density with uniformly
1346
+ distributed values.
1347
+
1348
+ .. warning::
1349
+
1350
+ This function returns a sparse matrix -- not a sparse array.
1351
+ You are encouraged to use ``random_array`` to take advantage
1352
+ of the sparse array functionality.
1353
+
1354
+ Parameters
1355
+ ----------
1356
+ m, n : int
1357
+ shape of the matrix
1358
+ density : real, optional
1359
+ density of the generated matrix: density equal to one means a full
1360
+ matrix, density of 0 means a matrix with no non-zero items.
1361
+ format : str, optional
1362
+ sparse matrix format.
1363
+ dtype : dtype, optional
1364
+ type of the returned matrix values.
1365
+ random_state : {None, int, `numpy.random.Generator`,
1366
+ `numpy.random.RandomState`}, optional
1367
+
1368
+ If `seed` is None (or `np.random`), the `numpy.random.RandomState`
1369
+ singleton is used.
1370
+ If `seed` is an int, a new ``RandomState`` instance is used,
1371
+ seeded with `seed`.
1372
+ If `seed` is already a ``Generator`` or ``RandomState`` instance then
1373
+ that instance is used.
1374
+
1375
+ Returns
1376
+ -------
1377
+ res : sparse matrix
1378
+
1379
+ Notes
1380
+ -----
1381
+ Only float types are supported for now.
1382
+
1383
+ See Also
1384
+ --------
1385
+ random : Similar function allowing a custom random data sampler
1386
+ random_array : Similar to random() but returns a sparse array
1387
+
1388
+ Examples
1389
+ --------
1390
+ >>> from scipy.sparse import rand
1391
+ >>> matrix = rand(3, 4, density=0.25, format="csr", random_state=42)
1392
+ >>> matrix
1393
+ <3x4 sparse matrix of type '<class 'numpy.float64'>'
1394
+ with 3 stored elements in Compressed Sparse Row format>
1395
+ >>> matrix.toarray()
1396
+ array([[0.05641158, 0. , 0. , 0.65088847], # random
1397
+ [0. , 0. , 0. , 0.14286682],
1398
+ [0. , 0. , 0. , 0. ]])
1399
+
1400
+ """
1401
+ return random(m, n, density, format, dtype, random_state)
llmeval-env/lib/python3.10/site-packages/scipy/sparse/_coo.py ADDED
@@ -0,0 +1,858 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ A sparse matrix in COOrdinate or 'triplet' format"""
2
+
3
+ __docformat__ = "restructuredtext en"
4
+
5
+ __all__ = ['coo_array', 'coo_matrix', 'isspmatrix_coo']
6
+
7
+ import math
8
+ from warnings import warn
9
+
10
+ import numpy as np
11
+
12
+ from .._lib._util import copy_if_needed
13
+ from ._matrix import spmatrix
14
+ from ._sparsetools import coo_tocsr, coo_todense, coo_matvec
15
+ from ._base import issparse, SparseEfficiencyWarning, _spbase, sparray
16
+ from ._data import _data_matrix, _minmax_mixin
17
+ from ._sputils import (upcast_char, to_native, isshape, getdtype,
18
+ getdata, downcast_intp_index, get_index_dtype,
19
+ check_shape, check_reshape_kwargs)
20
+
21
+ import operator
22
+
23
+
24
+ class _coo_base(_data_matrix, _minmax_mixin):
25
+ _format = 'coo'
26
+
27
+ def __init__(self, arg1, shape=None, dtype=None, copy=False):
28
+ _data_matrix.__init__(self)
29
+ is_array = isinstance(self, sparray)
30
+ if not copy:
31
+ copy = copy_if_needed
32
+
33
+ if isinstance(arg1, tuple):
34
+ if isshape(arg1, allow_1d=is_array):
35
+ self._shape = check_shape(arg1, allow_1d=is_array)
36
+ idx_dtype = self._get_index_dtype(maxval=max(self._shape))
37
+ data_dtype = getdtype(dtype, default=float)
38
+ self.coords = tuple(np.array([], dtype=idx_dtype)
39
+ for _ in range(len(self._shape)))
40
+ self.data = np.array([], dtype=data_dtype)
41
+ self.has_canonical_format = True
42
+ else:
43
+ try:
44
+ obj, coords = arg1
45
+ except (TypeError, ValueError) as e:
46
+ raise TypeError('invalid input format') from e
47
+
48
+ if shape is None:
49
+ if any(len(idx) == 0 for idx in coords):
50
+ raise ValueError('cannot infer dimensions from zero '
51
+ 'sized index arrays')
52
+ shape = tuple(operator.index(np.max(idx)) + 1
53
+ for idx in coords)
54
+ self._shape = check_shape(shape, allow_1d=is_array)
55
+
56
+ idx_dtype = self._get_index_dtype(coords,
57
+ maxval=max(self.shape),
58
+ check_contents=True)
59
+ self.coords = tuple(np.array(idx, copy=copy, dtype=idx_dtype)
60
+ for idx in coords)
61
+ self.data = getdata(obj, copy=copy, dtype=dtype)
62
+ self.has_canonical_format = False
63
+ else:
64
+ if issparse(arg1):
65
+ if arg1.format == self.format and copy:
66
+ self.coords = tuple(idx.copy() for idx in arg1.coords)
67
+ self.data = arg1.data.copy()
68
+ self._shape = check_shape(arg1.shape, allow_1d=is_array)
69
+ self.has_canonical_format = arg1.has_canonical_format
70
+ else:
71
+ coo = arg1.tocoo()
72
+ self.coords = tuple(coo.coords)
73
+ self.data = coo.data
74
+ self._shape = check_shape(coo.shape, allow_1d=is_array)
75
+ self.has_canonical_format = False
76
+ else:
77
+ # dense argument
78
+ M = np.asarray(arg1)
79
+ if not is_array:
80
+ M = np.atleast_2d(M)
81
+ if M.ndim != 2:
82
+ raise TypeError('expected dimension <= 2 array or matrix')
83
+
84
+ self._shape = check_shape(M.shape, allow_1d=is_array)
85
+ if shape is not None:
86
+ if check_shape(shape, allow_1d=is_array) != self._shape:
87
+ message = f'inconsistent shapes: {shape} != {self._shape}'
88
+ raise ValueError(message)
89
+ index_dtype = self._get_index_dtype(maxval=max(self._shape))
90
+ coords = M.nonzero()
91
+ self.coords = tuple(idx.astype(index_dtype, copy=False)
92
+ for idx in coords)
93
+ self.data = M[coords]
94
+ self.has_canonical_format = True
95
+
96
+ if dtype is not None:
97
+ self.data = self.data.astype(dtype, copy=False)
98
+
99
+ self._check()
100
+
101
+ @property
102
+ def row(self):
103
+ if self.ndim > 1:
104
+ return self.coords[-2]
105
+ result = np.zeros_like(self.col)
106
+ result.setflags(write=False)
107
+ return result
108
+
109
+
110
+ @row.setter
111
+ def row(self, new_row):
112
+ if self.ndim < 2:
113
+ raise ValueError('cannot set row attribute of a 1-dimensional sparse array')
114
+ new_row = np.asarray(new_row, dtype=self.coords[-2].dtype)
115
+ self.coords = self.coords[:-2] + (new_row,) + self.coords[-1:]
116
+
117
+ @property
118
+ def col(self):
119
+ return self.coords[-1]
120
+
121
+ @col.setter
122
+ def col(self, new_col):
123
+ new_col = np.asarray(new_col, dtype=self.coords[-1].dtype)
124
+ self.coords = self.coords[:-1] + (new_col,)
125
+
126
+ def reshape(self, *args, **kwargs):
127
+ is_array = isinstance(self, sparray)
128
+ shape = check_shape(args, self.shape, allow_1d=is_array)
129
+ order, copy = check_reshape_kwargs(kwargs)
130
+
131
+ # Return early if reshape is not required
132
+ if shape == self.shape:
133
+ if copy:
134
+ return self.copy()
135
+ else:
136
+ return self
137
+
138
+ # When reducing the number of dimensions, we need to be careful about
139
+ # index overflow. This is why we can't simply call
140
+ # `np.ravel_multi_index()` followed by `np.unravel_index()` here.
141
+ flat_coords = _ravel_coords(self.coords, self.shape, order=order)
142
+ if len(shape) == 2:
143
+ if order == 'C':
144
+ new_coords = divmod(flat_coords, shape[1])
145
+ else:
146
+ new_coords = divmod(flat_coords, shape[0])[::-1]
147
+ else:
148
+ new_coords = np.unravel_index(flat_coords, shape, order=order)
149
+
150
+ # Handle copy here rather than passing on to the constructor so that no
151
+ # copy will be made of `new_coords` regardless.
152
+ if copy:
153
+ new_data = self.data.copy()
154
+ else:
155
+ new_data = self.data
156
+
157
+ return self.__class__((new_data, new_coords), shape=shape, copy=False)
158
+
159
+ reshape.__doc__ = _spbase.reshape.__doc__
160
+
161
+ def _getnnz(self, axis=None):
162
+ if axis is None or (axis == 0 and self.ndim == 1):
163
+ nnz = len(self.data)
164
+ if any(len(idx) != nnz for idx in self.coords):
165
+ raise ValueError('all index and data arrays must have the '
166
+ 'same length')
167
+
168
+ if self.data.ndim != 1 or any(idx.ndim != 1 for idx in self.coords):
169
+ raise ValueError('row, column, and data arrays must be 1-D')
170
+
171
+ return int(nnz)
172
+
173
+ if axis < 0:
174
+ axis += self.ndim
175
+ if axis >= self.ndim:
176
+ raise ValueError('axis out of bounds')
177
+ if self.ndim > 2:
178
+ raise NotImplementedError('per-axis nnz for COO arrays with >2 '
179
+ 'dimensions is not supported')
180
+ return np.bincount(downcast_intp_index(self.coords[1 - axis]),
181
+ minlength=self.shape[1 - axis])
182
+
183
+ _getnnz.__doc__ = _spbase._getnnz.__doc__
184
+
185
+ def _check(self):
186
+ """ Checks data structure for consistency """
187
+ if self.ndim != len(self.coords):
188
+ raise ValueError('mismatching number of index arrays for shape; '
189
+ f'got {len(self.coords)}, expected {self.ndim}')
190
+
191
+ # index arrays should have integer data types
192
+ for i, idx in enumerate(self.coords):
193
+ if idx.dtype.kind != 'i':
194
+ warn(f'index array {i} has non-integer dtype ({idx.dtype.name})',
195
+ stacklevel=3)
196
+
197
+ idx_dtype = self._get_index_dtype(self.coords, maxval=max(self.shape))
198
+ self.coords = tuple(np.asarray(idx, dtype=idx_dtype)
199
+ for idx in self.coords)
200
+ self.data = to_native(self.data)
201
+
202
+ if self.nnz > 0:
203
+ for i, idx in enumerate(self.coords):
204
+ if idx.max() >= self.shape[i]:
205
+ raise ValueError(f'axis {i} index {idx.max()} exceeds '
206
+ f'matrix dimension {self.shape[i]}')
207
+ if idx.min() < 0:
208
+ raise ValueError(f'negative axis {i} index: {idx.min()}')
209
+
210
+ def transpose(self, axes=None, copy=False):
211
+ if axes is None:
212
+ axes = range(self.ndim)[::-1]
213
+ elif isinstance(self, sparray):
214
+ if len(axes) != self.ndim:
215
+ raise ValueError("axes don't match matrix dimensions")
216
+ if len(set(axes)) != self.ndim:
217
+ raise ValueError("repeated axis in transpose")
218
+ elif axes != (1, 0):
219
+ raise ValueError("Sparse matrices do not support an 'axes' "
220
+ "parameter because swapping dimensions is the "
221
+ "only logical permutation.")
222
+
223
+ permuted_shape = tuple(self._shape[i] for i in axes)
224
+ permuted_coords = tuple(self.coords[i] for i in axes)
225
+ return self.__class__((self.data, permuted_coords),
226
+ shape=permuted_shape, copy=copy)
227
+
228
+ transpose.__doc__ = _spbase.transpose.__doc__
229
+
230
+ def resize(self, *shape) -> None:
231
+ is_array = isinstance(self, sparray)
232
+ shape = check_shape(shape, allow_1d=is_array)
233
+
234
+ # Check for added dimensions.
235
+ if len(shape) > self.ndim:
236
+ flat_coords = _ravel_coords(self.coords, self.shape)
237
+ max_size = math.prod(shape)
238
+ self.coords = np.unravel_index(flat_coords[:max_size], shape)
239
+ self.data = self.data[:max_size]
240
+ self._shape = shape
241
+ return
242
+
243
+ # Check for removed dimensions.
244
+ if len(shape) < self.ndim:
245
+ tmp_shape = (
246
+ self._shape[:len(shape) - 1] # Original shape without last axis
247
+ + (-1,) # Last axis is used to flatten the array
248
+ + (1,) * (self.ndim - len(shape)) # Pad with ones
249
+ )
250
+ tmp = self.reshape(tmp_shape)
251
+ self.coords = tmp.coords[:len(shape)]
252
+ self._shape = tmp.shape[:len(shape)]
253
+
254
+ # Handle truncation of existing dimensions.
255
+ is_truncating = any(old > new for old, new in zip(self.shape, shape))
256
+ if is_truncating:
257
+ mask = np.logical_and.reduce([
258
+ idx < size for idx, size in zip(self.coords, shape)
259
+ ])
260
+ if not mask.all():
261
+ self.coords = tuple(idx[mask] for idx in self.coords)
262
+ self.data = self.data[mask]
263
+
264
+ self._shape = shape
265
+
266
+ resize.__doc__ = _spbase.resize.__doc__
267
+
268
+ def toarray(self, order=None, out=None):
269
+ B = self._process_toarray_args(order, out)
270
+ fortran = int(B.flags.f_contiguous)
271
+ if not fortran and not B.flags.c_contiguous:
272
+ raise ValueError("Output array must be C or F contiguous")
273
+ if self.ndim > 2:
274
+ raise ValueError("Cannot densify higher-rank sparse array")
275
+ # This handles both 0D and 1D cases correctly regardless of the
276
+ # original shape.
277
+ M, N = self._shape_as_2d
278
+ coo_todense(M, N, self.nnz, self.row, self.col, self.data,
279
+ B.ravel('A'), fortran)
280
+ # Note: reshape() doesn't copy here, but does return a new array (view).
281
+ return B.reshape(self.shape)
282
+
283
+ toarray.__doc__ = _spbase.toarray.__doc__
284
+
285
+ def tocsc(self, copy=False):
286
+ """Convert this array/matrix to Compressed Sparse Column format
287
+
288
+ Duplicate entries will be summed together.
289
+
290
+ Examples
291
+ --------
292
+ >>> from numpy import array
293
+ >>> from scipy.sparse import coo_array
294
+ >>> row = array([0, 0, 1, 3, 1, 0, 0])
295
+ >>> col = array([0, 2, 1, 3, 1, 0, 0])
296
+ >>> data = array([1, 1, 1, 1, 1, 1, 1])
297
+ >>> A = coo_array((data, (row, col)), shape=(4, 4)).tocsc()
298
+ >>> A.toarray()
299
+ array([[3, 0, 1, 0],
300
+ [0, 2, 0, 0],
301
+ [0, 0, 0, 0],
302
+ [0, 0, 0, 1]])
303
+
304
+ """
305
+ if self.ndim != 2:
306
+ raise ValueError("Cannot convert a 1d sparse array to csc format")
307
+ if self.nnz == 0:
308
+ return self._csc_container(self.shape, dtype=self.dtype)
309
+ else:
310
+ from ._csc import csc_array
311
+ indptr, indices, data, shape = self._coo_to_compressed(csc_array._swap)
312
+
313
+ x = self._csc_container((data, indices, indptr), shape=shape)
314
+ if not self.has_canonical_format:
315
+ x.sum_duplicates()
316
+ return x
317
+
318
+ def tocsr(self, copy=False):
319
+ """Convert this array/matrix to Compressed Sparse Row format
320
+
321
+ Duplicate entries will be summed together.
322
+
323
+ Examples
324
+ --------
325
+ >>> from numpy import array
326
+ >>> from scipy.sparse import coo_array
327
+ >>> row = array([0, 0, 1, 3, 1, 0, 0])
328
+ >>> col = array([0, 2, 1, 3, 1, 0, 0])
329
+ >>> data = array([1, 1, 1, 1, 1, 1, 1])
330
+ >>> A = coo_array((data, (row, col)), shape=(4, 4)).tocsr()
331
+ >>> A.toarray()
332
+ array([[3, 0, 1, 0],
333
+ [0, 2, 0, 0],
334
+ [0, 0, 0, 0],
335
+ [0, 0, 0, 1]])
336
+
337
+ """
338
+ if self.ndim != 2:
339
+ raise ValueError("Cannot convert a 1d sparse array to csr format")
340
+ if self.nnz == 0:
341
+ return self._csr_container(self.shape, dtype=self.dtype)
342
+ else:
343
+ from ._csr import csr_array
344
+ indptr, indices, data, shape = self._coo_to_compressed(csr_array._swap)
345
+
346
+ x = self._csr_container((data, indices, indptr), shape=self.shape)
347
+ if not self.has_canonical_format:
348
+ x.sum_duplicates()
349
+ return x
350
+
351
+ def _coo_to_compressed(self, swap):
352
+ """convert (shape, coords, data) to (indptr, indices, data, shape)"""
353
+ M, N = swap(self.shape)
354
+ major, minor = swap(self.coords)
355
+ nnz = len(major)
356
+ # convert idx_dtype intc to int32 for pythran.
357
+ # tested in scipy/optimize/tests/test__numdiff.py::test_group_columns
358
+ idx_dtype = self._get_index_dtype(self.coords, maxval=max(self.nnz, N))
359
+ major = major.astype(idx_dtype, copy=False)
360
+ minor = minor.astype(idx_dtype, copy=False)
361
+
362
+ indptr = np.empty(M + 1, dtype=idx_dtype)
363
+ indices = np.empty_like(minor, dtype=idx_dtype)
364
+ data = np.empty_like(self.data, dtype=self.dtype)
365
+
366
+ coo_tocsr(M, N, nnz, major, minor, self.data, indptr, indices, data)
367
+ return indptr, indices, data, self.shape
368
+
369
+ def tocoo(self, copy=False):
370
+ if copy:
371
+ return self.copy()
372
+ else:
373
+ return self
374
+
375
+ tocoo.__doc__ = _spbase.tocoo.__doc__
376
+
377
+ def todia(self, copy=False):
378
+ if self.ndim != 2:
379
+ raise ValueError("Cannot convert a 1d sparse array to dia format")
380
+ self.sum_duplicates()
381
+ ks = self.col - self.row # the diagonal for each nonzero
382
+ diags, diag_idx = np.unique(ks, return_inverse=True)
383
+
384
+ if len(diags) > 100:
385
+ # probably undesired, should todia() have a maxdiags parameter?
386
+ warn("Constructing a DIA matrix with %d diagonals "
387
+ "is inefficient" % len(diags),
388
+ SparseEfficiencyWarning, stacklevel=2)
389
+
390
+ #initialize and fill in data array
391
+ if self.data.size == 0:
392
+ data = np.zeros((0, 0), dtype=self.dtype)
393
+ else:
394
+ data = np.zeros((len(diags), self.col.max()+1), dtype=self.dtype)
395
+ data[diag_idx, self.col] = self.data
396
+
397
+ return self._dia_container((data, diags), shape=self.shape)
398
+
399
+ todia.__doc__ = _spbase.todia.__doc__
400
+
401
+ def todok(self, copy=False):
402
+ self.sum_duplicates()
403
+ dok = self._dok_container(self.shape, dtype=self.dtype)
404
+ # ensure that 1d coordinates are not tuples
405
+ if self.ndim == 1:
406
+ coords = self.coords[0]
407
+ else:
408
+ coords = zip(*self.coords)
409
+
410
+ dok._dict = dict(zip(coords, self.data))
411
+ return dok
412
+
413
+ todok.__doc__ = _spbase.todok.__doc__
414
+
415
+ def diagonal(self, k=0):
416
+ if self.ndim != 2:
417
+ raise ValueError("diagonal requires two dimensions")
418
+ rows, cols = self.shape
419
+ if k <= -rows or k >= cols:
420
+ return np.empty(0, dtype=self.data.dtype)
421
+ diag = np.zeros(min(rows + min(k, 0), cols - max(k, 0)),
422
+ dtype=self.dtype)
423
+ diag_mask = (self.row + k) == self.col
424
+
425
+ if self.has_canonical_format:
426
+ row = self.row[diag_mask]
427
+ data = self.data[diag_mask]
428
+ else:
429
+ inds = tuple(idx[diag_mask] for idx in self.coords)
430
+ (row, _), data = self._sum_duplicates(inds, self.data[diag_mask])
431
+ diag[row + min(k, 0)] = data
432
+
433
+ return diag
434
+
435
+ diagonal.__doc__ = _data_matrix.diagonal.__doc__
436
+
437
+ def _setdiag(self, values, k):
438
+ if self.ndim != 2:
439
+ raise ValueError("setting a diagonal requires two dimensions")
440
+ M, N = self.shape
441
+ if values.ndim and not len(values):
442
+ return
443
+ idx_dtype = self.row.dtype
444
+
445
+ # Determine which triples to keep and where to put the new ones.
446
+ full_keep = self.col - self.row != k
447
+ if k < 0:
448
+ max_index = min(M+k, N)
449
+ if values.ndim:
450
+ max_index = min(max_index, len(values))
451
+ keep = np.logical_or(full_keep, self.col >= max_index)
452
+ new_row = np.arange(-k, -k + max_index, dtype=idx_dtype)
453
+ new_col = np.arange(max_index, dtype=idx_dtype)
454
+ else:
455
+ max_index = min(M, N-k)
456
+ if values.ndim:
457
+ max_index = min(max_index, len(values))
458
+ keep = np.logical_or(full_keep, self.row >= max_index)
459
+ new_row = np.arange(max_index, dtype=idx_dtype)
460
+ new_col = np.arange(k, k + max_index, dtype=idx_dtype)
461
+
462
+ # Define the array of data consisting of the entries to be added.
463
+ if values.ndim:
464
+ new_data = values[:max_index]
465
+ else:
466
+ new_data = np.empty(max_index, dtype=self.dtype)
467
+ new_data[:] = values
468
+
469
+ # Update the internal structure.
470
+ self.coords = (np.concatenate((self.row[keep], new_row)),
471
+ np.concatenate((self.col[keep], new_col)))
472
+ self.data = np.concatenate((self.data[keep], new_data))
473
+ self.has_canonical_format = False
474
+
475
+ # needed by _data_matrix
476
+ def _with_data(self, data, copy=True):
477
+ """Returns a matrix with the same sparsity structure as self,
478
+ but with different data. By default the index arrays are copied.
479
+ """
480
+ if copy:
481
+ coords = tuple(idx.copy() for idx in self.coords)
482
+ else:
483
+ coords = self.coords
484
+ return self.__class__((data, coords), shape=self.shape, dtype=data.dtype)
485
+
486
+ def sum_duplicates(self) -> None:
487
+ """Eliminate duplicate entries by adding them together
488
+
489
+ This is an *in place* operation
490
+ """
491
+ if self.has_canonical_format:
492
+ return
493
+ summed = self._sum_duplicates(self.coords, self.data)
494
+ self.coords, self.data = summed
495
+ self.has_canonical_format = True
496
+
497
+ def _sum_duplicates(self, coords, data):
498
+ # Assumes coords not in canonical format.
499
+ if len(data) == 0:
500
+ return coords, data
501
+ # Sort coords w.r.t. rows, then cols. This corresponds to C-order,
502
+ # which we rely on for argmin/argmax to return the first index in the
503
+ # same way that numpy does (in the case of ties).
504
+ order = np.lexsort(coords[::-1])
505
+ coords = tuple(idx[order] for idx in coords)
506
+ data = data[order]
507
+ unique_mask = np.logical_or.reduce([
508
+ idx[1:] != idx[:-1] for idx in coords
509
+ ])
510
+ unique_mask = np.append(True, unique_mask)
511
+ coords = tuple(idx[unique_mask] for idx in coords)
512
+ unique_inds, = np.nonzero(unique_mask)
513
+ data = np.add.reduceat(data, unique_inds, dtype=self.dtype)
514
+ return coords, data
515
+
516
+ def eliminate_zeros(self):
517
+ """Remove zero entries from the array/matrix
518
+
519
+ This is an *in place* operation
520
+ """
521
+ mask = self.data != 0
522
+ self.data = self.data[mask]
523
+ self.coords = tuple(idx[mask] for idx in self.coords)
524
+
525
+ #######################
526
+ # Arithmetic handlers #
527
+ #######################
528
+
529
+ def _add_dense(self, other):
530
+ if other.shape != self.shape:
531
+ raise ValueError(f'Incompatible shapes ({self.shape} and {other.shape})')
532
+ dtype = upcast_char(self.dtype.char, other.dtype.char)
533
+ result = np.array(other, dtype=dtype, copy=True)
534
+ fortran = int(result.flags.f_contiguous)
535
+ M, N = self._shape_as_2d
536
+ coo_todense(M, N, self.nnz, self.row, self.col, self.data,
537
+ result.ravel('A'), fortran)
538
+ return self._container(result, copy=False)
539
+
540
+ def _matmul_vector(self, other):
541
+ result_shape = self.shape[0] if self.ndim > 1 else 1
542
+ result = np.zeros(result_shape,
543
+ dtype=upcast_char(self.dtype.char, other.dtype.char))
544
+
545
+ if self.ndim == 2:
546
+ col = self.col
547
+ row = self.row
548
+ elif self.ndim == 1:
549
+ col = self.coords[0]
550
+ row = np.zeros_like(col)
551
+ else:
552
+ raise NotImplementedError(
553
+ f"coo_matvec not implemented for ndim={self.ndim}")
554
+
555
+ coo_matvec(self.nnz, row, col, self.data, other, result)
556
+ # Array semantics return a scalar here, not a single-element array.
557
+ if isinstance(self, sparray) and result_shape == 1:
558
+ return result[0]
559
+ return result
560
+
561
+ def _matmul_multivector(self, other):
562
+ result_dtype = upcast_char(self.dtype.char, other.dtype.char)
563
+ if self.ndim == 2:
564
+ result_shape = (other.shape[1], self.shape[0])
565
+ col = self.col
566
+ row = self.row
567
+ elif self.ndim == 1:
568
+ result_shape = (other.shape[1],)
569
+ col = self.coords[0]
570
+ row = np.zeros_like(col)
571
+ else:
572
+ raise NotImplementedError(
573
+ f"coo_matvec not implemented for ndim={self.ndim}")
574
+
575
+ result = np.zeros(result_shape, dtype=result_dtype)
576
+ for i, other_col in enumerate(other.T):
577
+ coo_matvec(self.nnz, row, col, self.data, other_col, result[i:i + 1])
578
+ return result.T.view(type=type(other))
579
+
580
+
581
+ def _ravel_coords(coords, shape, order='C'):
582
+ """Like np.ravel_multi_index, but avoids some overflow issues."""
583
+ if len(coords) == 1:
584
+ return coords[0]
585
+ # Handle overflow as in https://github.com/scipy/scipy/pull/9132
586
+ if len(coords) == 2:
587
+ nrows, ncols = shape
588
+ row, col = coords
589
+ if order == 'C':
590
+ maxval = (ncols * max(0, nrows - 1) + max(0, ncols - 1))
591
+ idx_dtype = get_index_dtype(maxval=maxval)
592
+ return np.multiply(ncols, row, dtype=idx_dtype) + col
593
+ elif order == 'F':
594
+ maxval = (nrows * max(0, ncols - 1) + max(0, nrows - 1))
595
+ idx_dtype = get_index_dtype(maxval=maxval)
596
+ return np.multiply(nrows, col, dtype=idx_dtype) + row
597
+ else:
598
+ raise ValueError("'order' must be 'C' or 'F'")
599
+ return np.ravel_multi_index(coords, shape, order=order)
600
+
601
+
602
+ def isspmatrix_coo(x):
603
+ """Is `x` of coo_matrix type?
604
+
605
+ Parameters
606
+ ----------
607
+ x
608
+ object to check for being a coo matrix
609
+
610
+ Returns
611
+ -------
612
+ bool
613
+ True if `x` is a coo matrix, False otherwise
614
+
615
+ Examples
616
+ --------
617
+ >>> from scipy.sparse import coo_array, coo_matrix, csr_matrix, isspmatrix_coo
618
+ >>> isspmatrix_coo(coo_matrix([[5]]))
619
+ True
620
+ >>> isspmatrix_coo(coo_array([[5]]))
621
+ False
622
+ >>> isspmatrix_coo(csr_matrix([[5]]))
623
+ False
624
+ """
625
+ return isinstance(x, coo_matrix)
626
+
627
+
628
+ # This namespace class separates array from matrix with isinstance
629
+ class coo_array(_coo_base, sparray):
630
+ """
631
+ A sparse array in COOrdinate format.
632
+
633
+ Also known as the 'ijv' or 'triplet' format.
634
+
635
+ This can be instantiated in several ways:
636
+ coo_array(D)
637
+ where D is an ndarray
638
+
639
+ coo_array(S)
640
+ with another sparse array or matrix S (equivalent to S.tocoo())
641
+
642
+ coo_array(shape, [dtype])
643
+ to construct an empty sparse array with shape `shape`
644
+ dtype is optional, defaulting to dtype='d'.
645
+
646
+ coo_array((data, coords), [shape])
647
+ to construct from existing data and index arrays:
648
+ 1. data[:] the entries of the sparse array, in any order
649
+ 2. coords[i][:] the axis-i coordinates of the data entries
650
+
651
+ Where ``A[coords] = data``, and coords is a tuple of index arrays.
652
+ When shape is not specified, it is inferred from the index arrays.
653
+
654
+ Attributes
655
+ ----------
656
+ dtype : dtype
657
+ Data type of the sparse array
658
+ shape : tuple of integers
659
+ Shape of the sparse array
660
+ ndim : int
661
+ Number of dimensions of the sparse array
662
+ nnz
663
+ size
664
+ data
665
+ COO format data array of the sparse array
666
+ coords
667
+ COO format tuple of index arrays
668
+ has_canonical_format : bool
669
+ Whether the matrix has sorted coordinates and no duplicates
670
+ format
671
+ T
672
+
673
+ Notes
674
+ -----
675
+
676
+ Sparse arrays can be used in arithmetic operations: they support
677
+ addition, subtraction, multiplication, division, and matrix power.
678
+
679
+ Advantages of the COO format
680
+ - facilitates fast conversion among sparse formats
681
+ - permits duplicate entries (see example)
682
+ - very fast conversion to and from CSR/CSC formats
683
+
684
+ Disadvantages of the COO format
685
+ - does not directly support:
686
+ + arithmetic operations
687
+ + slicing
688
+
689
+ Intended Usage
690
+ - COO is a fast format for constructing sparse arrays
691
+ - Once a COO array has been constructed, convert to CSR or
692
+ CSC format for fast arithmetic and matrix vector operations
693
+ - By default when converting to CSR or CSC format, duplicate (i,j)
694
+ entries will be summed together. This facilitates efficient
695
+ construction of finite element matrices and the like. (see example)
696
+
697
+ Canonical format
698
+ - Entries and coordinates sorted by row, then column.
699
+ - There are no duplicate entries (i.e. duplicate (i,j) locations)
700
+ - Data arrays MAY have explicit zeros.
701
+
702
+ Examples
703
+ --------
704
+
705
+ >>> # Constructing an empty sparse array
706
+ >>> import numpy as np
707
+ >>> from scipy.sparse import coo_array
708
+ >>> coo_array((3, 4), dtype=np.int8).toarray()
709
+ array([[0, 0, 0, 0],
710
+ [0, 0, 0, 0],
711
+ [0, 0, 0, 0]], dtype=int8)
712
+
713
+ >>> # Constructing a sparse array using ijv format
714
+ >>> row = np.array([0, 3, 1, 0])
715
+ >>> col = np.array([0, 3, 1, 2])
716
+ >>> data = np.array([4, 5, 7, 9])
717
+ >>> coo_array((data, (row, col)), shape=(4, 4)).toarray()
718
+ array([[4, 0, 9, 0],
719
+ [0, 7, 0, 0],
720
+ [0, 0, 0, 0],
721
+ [0, 0, 0, 5]])
722
+
723
+ >>> # Constructing a sparse array with duplicate coordinates
724
+ >>> row = np.array([0, 0, 1, 3, 1, 0, 0])
725
+ >>> col = np.array([0, 2, 1, 3, 1, 0, 0])
726
+ >>> data = np.array([1, 1, 1, 1, 1, 1, 1])
727
+ >>> coo = coo_array((data, (row, col)), shape=(4, 4))
728
+ >>> # Duplicate coordinates are maintained until implicitly or explicitly summed
729
+ >>> np.max(coo.data)
730
+ 1
731
+ >>> coo.toarray()
732
+ array([[3, 0, 1, 0],
733
+ [0, 2, 0, 0],
734
+ [0, 0, 0, 0],
735
+ [0, 0, 0, 1]])
736
+
737
+ """
738
+
739
+
740
+ class coo_matrix(spmatrix, _coo_base):
741
+ """
742
+ A sparse matrix in COOrdinate format.
743
+
744
+ Also known as the 'ijv' or 'triplet' format.
745
+
746
+ This can be instantiated in several ways:
747
+ coo_matrix(D)
748
+ where D is a 2-D ndarray
749
+
750
+ coo_matrix(S)
751
+ with another sparse array or matrix S (equivalent to S.tocoo())
752
+
753
+ coo_matrix((M, N), [dtype])
754
+ to construct an empty matrix with shape (M, N)
755
+ dtype is optional, defaulting to dtype='d'.
756
+
757
+ coo_matrix((data, (i, j)), [shape=(M, N)])
758
+ to construct from three arrays:
759
+ 1. data[:] the entries of the matrix, in any order
760
+ 2. i[:] the row indices of the matrix entries
761
+ 3. j[:] the column indices of the matrix entries
762
+
763
+ Where ``A[i[k], j[k]] = data[k]``. When shape is not
764
+ specified, it is inferred from the index arrays
765
+
766
+ Attributes
767
+ ----------
768
+ dtype : dtype
769
+ Data type of the matrix
770
+ shape : 2-tuple
771
+ Shape of the matrix
772
+ ndim : int
773
+ Number of dimensions (this is always 2)
774
+ nnz
775
+ size
776
+ data
777
+ COO format data array of the matrix
778
+ row
779
+ COO format row index array of the matrix
780
+ col
781
+ COO format column index array of the matrix
782
+ has_canonical_format : bool
783
+ Whether the matrix has sorted indices and no duplicates
784
+ format
785
+ T
786
+
787
+ Notes
788
+ -----
789
+
790
+ Sparse matrices can be used in arithmetic operations: they support
791
+ addition, subtraction, multiplication, division, and matrix power.
792
+
793
+ Advantages of the COO format
794
+ - facilitates fast conversion among sparse formats
795
+ - permits duplicate entries (see example)
796
+ - very fast conversion to and from CSR/CSC formats
797
+
798
+ Disadvantages of the COO format
799
+ - does not directly support:
800
+ + arithmetic operations
801
+ + slicing
802
+
803
+ Intended Usage
804
+ - COO is a fast format for constructing sparse matrices
805
+ - Once a COO matrix has been constructed, convert to CSR or
806
+ CSC format for fast arithmetic and matrix vector operations
807
+ - By default when converting to CSR or CSC format, duplicate (i,j)
808
+ entries will be summed together. This facilitates efficient
809
+ construction of finite element matrices and the like. (see example)
810
+
811
+ Canonical format
812
+ - Entries and coordinates sorted by row, then column.
813
+ - There are no duplicate entries (i.e. duplicate (i,j) locations)
814
+ - Data arrays MAY have explicit zeros.
815
+
816
+ Examples
817
+ --------
818
+
819
+ >>> # Constructing an empty matrix
820
+ >>> import numpy as np
821
+ >>> from scipy.sparse import coo_matrix
822
+ >>> coo_matrix((3, 4), dtype=np.int8).toarray()
823
+ array([[0, 0, 0, 0],
824
+ [0, 0, 0, 0],
825
+ [0, 0, 0, 0]], dtype=int8)
826
+
827
+ >>> # Constructing a matrix using ijv format
828
+ >>> row = np.array([0, 3, 1, 0])
829
+ >>> col = np.array([0, 3, 1, 2])
830
+ >>> data = np.array([4, 5, 7, 9])
831
+ >>> coo_matrix((data, (row, col)), shape=(4, 4)).toarray()
832
+ array([[4, 0, 9, 0],
833
+ [0, 7, 0, 0],
834
+ [0, 0, 0, 0],
835
+ [0, 0, 0, 5]])
836
+
837
+ >>> # Constructing a matrix with duplicate coordinates
838
+ >>> row = np.array([0, 0, 1, 3, 1, 0, 0])
839
+ >>> col = np.array([0, 2, 1, 3, 1, 0, 0])
840
+ >>> data = np.array([1, 1, 1, 1, 1, 1, 1])
841
+ >>> coo = coo_matrix((data, (row, col)), shape=(4, 4))
842
+ >>> # Duplicate coordinates are maintained until implicitly or explicitly summed
843
+ >>> np.max(coo.data)
844
+ 1
845
+ >>> coo.toarray()
846
+ array([[3, 0, 1, 0],
847
+ [0, 2, 0, 0],
848
+ [0, 0, 0, 0],
849
+ [0, 0, 0, 1]])
850
+
851
+ """
852
+
853
+ def __setstate__(self, state):
854
+ if 'coords' not in state:
855
+ # For retro-compatibility with the previous attributes
856
+ # storing nnz coordinates for 2D COO matrix.
857
+ state['coords'] = (state.pop('row'), state.pop('col'))
858
+ self.__dict__.update(state)
llmeval-env/lib/python3.10/site-packages/scipy/sparse/_csparsetools.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (823 kB). View file