applied-ai-018 commited on
Commit
110ba22
·
verified ·
1 Parent(s): 04dfba6

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/scipy/sparse/__init__.py +324 -0
  2. env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_base.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_bsr.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_compressed.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_construct.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_coo.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_csc.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_csr.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_data.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_dok.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_extract.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_index.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_lil.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_matrix.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_matrix_io.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_spfuncs.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/compressed.cpython-310.pyc +0 -0
  18. env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/construct.cpython-310.pyc +0 -0
  19. env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/coo.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/csc.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/data.cpython-310.pyc +0 -0
  22. env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/dia.cpython-310.pyc +0 -0
  23. env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/dok.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/lil.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/sparsetools.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/spfuncs.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/scipy/sparse/_bsr.py +855 -0
  28. env-llmeval/lib/python3.10/site-packages/scipy/sparse/_compressed.py +1367 -0
  29. env-llmeval/lib/python3.10/site-packages/scipy/sparse/_construct.py +1401 -0
  30. env-llmeval/lib/python3.10/site-packages/scipy/sparse/_csparsetools.cpython-310-x86_64-linux-gnu.so +0 -0
  31. env-llmeval/lib/python3.10/site-packages/scipy/sparse/_csr.py +491 -0
  32. env-llmeval/lib/python3.10/site-packages/scipy/sparse/_data.py +506 -0
  33. env-llmeval/lib/python3.10/site-packages/scipy/sparse/_dia.py +563 -0
  34. env-llmeval/lib/python3.10/site-packages/scipy/sparse/_dok.py +672 -0
  35. env-llmeval/lib/python3.10/site-packages/scipy/sparse/_index.py +392 -0
  36. env-llmeval/lib/python3.10/site-packages/scipy/sparse/_matrix.py +113 -0
  37. env-llmeval/lib/python3.10/site-packages/scipy/sparse/_matrix_io.py +167 -0
  38. env-llmeval/lib/python3.10/site-packages/scipy/sparse/_sputils.py +451 -0
  39. env-llmeval/lib/python3.10/site-packages/scipy/sparse/base.py +33 -0
  40. env-llmeval/lib/python3.10/site-packages/scipy/sparse/bsr.py +36 -0
  41. env-llmeval/lib/python3.10/site-packages/scipy/sparse/compressed.py +43 -0
  42. env-llmeval/lib/python3.10/site-packages/scipy/sparse/coo.py +37 -0
  43. env-llmeval/lib/python3.10/site-packages/scipy/sparse/csc.py +25 -0
  44. env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/__init__.py +208 -0
  45. env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/__pycache__/__init__.cpython-310.pyc +0 -0
  46. env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/__pycache__/_laplacian.cpython-310.pyc +0 -0
  47. env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/__pycache__/_validation.cpython-310.pyc +0 -0
  48. env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/_flow.cpython-310-x86_64-linux-gnu.so +0 -0
  49. env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/_laplacian.py +562 -0
  50. env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/_matching.cpython-310-x86_64-linux-gnu.so +0 -0
env-llmeval/lib/python3.10/site-packages/scipy/sparse/__init__.py ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ =====================================
3
+ Sparse matrices (:mod:`scipy.sparse`)
4
+ =====================================
5
+
6
+ .. currentmodule:: scipy.sparse
7
+
8
+ .. toctree::
9
+ :hidden:
10
+
11
+ sparse.csgraph
12
+ sparse.linalg
13
+
14
+ SciPy 2-D sparse array package for numeric data.
15
+
16
+ .. note::
17
+
18
+ This package is switching to an array interface, compatible with
19
+ NumPy arrays, from the older matrix interface. We recommend that
20
+ you use the array objects (`bsr_array`, `coo_array`, etc.) for
21
+ all new work.
22
+
23
+ When using the array interface, please note that:
24
+
25
+ - ``x * y`` no longer performs matrix multiplication, but
26
+ element-wise multiplication (just like with NumPy arrays). To
27
+ make code work with both arrays and matrices, use ``x @ y`` for
28
+ matrix multiplication.
29
+ - Operations such as `sum`, that used to produce dense matrices, now
30
+ produce arrays, whose multiplication behavior differs similarly.
31
+ - Sparse arrays currently must be two-dimensional. This also means
32
+ that all *slicing* operations on these objects must produce
33
+ two-dimensional results, or they will result in an error. This
34
+ will be addressed in a future version.
35
+
36
+ The construction utilities (`eye`, `kron`, `random`, `diags`, etc.)
37
+ have not yet been ported, but their results can be wrapped into arrays::
38
+
39
+ A = csr_array(eye(3))
40
+
41
+ Contents
42
+ ========
43
+
44
+ Sparse array classes
45
+ --------------------
46
+
47
+ .. autosummary::
48
+ :toctree: generated/
49
+
50
+ bsr_array - Block Sparse Row array
51
+ coo_array - A sparse array in COOrdinate format
52
+ csc_array - Compressed Sparse Column array
53
+ csr_array - Compressed Sparse Row array
54
+ dia_array - Sparse array with DIAgonal storage
55
+ dok_array - Dictionary Of Keys based sparse array
56
+ lil_array - Row-based list of lists sparse array
57
+ sparray - Sparse array base class
58
+
59
+ Sparse matrix classes
60
+ ---------------------
61
+
62
+ .. autosummary::
63
+ :toctree: generated/
64
+
65
+ bsr_matrix - Block Sparse Row matrix
66
+ coo_matrix - A sparse matrix in COOrdinate format
67
+ csc_matrix - Compressed Sparse Column matrix
68
+ csr_matrix - Compressed Sparse Row matrix
69
+ dia_matrix - Sparse matrix with DIAgonal storage
70
+ dok_matrix - Dictionary Of Keys based sparse matrix
71
+ lil_matrix - Row-based list of lists sparse matrix
72
+ spmatrix - Sparse matrix base class
73
+
74
+ Functions
75
+ ---------
76
+
77
+ Building sparse arrays:
78
+
79
+ .. autosummary::
80
+ :toctree: generated/
81
+
82
+ diags_array - Return a sparse array from diagonals
83
+ eye_array - Sparse MxN array whose k-th diagonal is all ones
84
+ random_array - Random values in a given shape array
85
+ block_array - Build a sparse array from sub-blocks
86
+
87
+ Building sparse matrices:
88
+
89
+ .. autosummary::
90
+ :toctree: generated/
91
+
92
+ eye - Sparse MxN matrix whose k-th diagonal is all ones
93
+ identity - Identity matrix in sparse matrix format
94
+ diags - Return a sparse matrix from diagonals
95
+ spdiags - Return a sparse matrix from diagonals
96
+ bmat - Build a sparse matrix from sparse sub-blocks
97
+ random - Random values in a given shape matrix
98
+ rand - Random values in a given shape matrix (old interface)
99
+
100
+ Building larger structures from smaller (array or matrix)
101
+
102
+ .. autosummary::
103
+ :toctree: generated/
104
+
105
+ kron - kronecker product of two sparse matrices
106
+ kronsum - kronecker sum of sparse matrices
107
+ block_diag - Build a block diagonal sparse matrix
108
+ tril - Lower triangular portion of a matrix in sparse format
109
+ triu - Upper triangular portion of a matrix in sparse format
110
+ hstack - Stack sparse matrices horizontally (column wise)
111
+ vstack - Stack sparse matrices vertically (row wise)
112
+
113
+ Save and load sparse matrices:
114
+
115
+ .. autosummary::
116
+ :toctree: generated/
117
+
118
+ save_npz - Save a sparse matrix/array to a file using ``.npz`` format.
119
+ load_npz - Load a sparse matrix/array from a file using ``.npz`` format.
120
+
121
+ Sparse tools:
122
+
123
+ .. autosummary::
124
+ :toctree: generated/
125
+
126
+ find
127
+
128
+ Identifying sparse arrays:
129
+
130
+ - use `isinstance(A, sp.sparse.sparray)` to check whether an array or matrix.
131
+ - use `A.format == 'csr'` to check the sparse format
132
+
133
+ Identifying sparse matrices:
134
+
135
+ .. autosummary::
136
+ :toctree: generated/
137
+
138
+ issparse
139
+ isspmatrix
140
+ isspmatrix_csc
141
+ isspmatrix_csr
142
+ isspmatrix_bsr
143
+ isspmatrix_lil
144
+ isspmatrix_dok
145
+ isspmatrix_coo
146
+ isspmatrix_dia
147
+
148
+ Submodules
149
+ ----------
150
+
151
+ .. autosummary::
152
+
153
+ csgraph - Compressed sparse graph routines
154
+ linalg - sparse linear algebra routines
155
+
156
+ Exceptions
157
+ ----------
158
+
159
+ .. autosummary::
160
+ :toctree: generated/
161
+
162
+ SparseEfficiencyWarning
163
+ SparseWarning
164
+
165
+
166
+ Usage information
167
+ =================
168
+
169
+ There are seven available sparse array types:
170
+
171
+ 1. `csc_array`: Compressed Sparse Column format
172
+ 2. `csr_array`: Compressed Sparse Row format
173
+ 3. `bsr_array`: Block Sparse Row format
174
+ 4. `lil_array`: List of Lists format
175
+ 5. `dok_array`: Dictionary of Keys format
176
+ 6. `coo_array`: COOrdinate format (aka IJV, triplet format)
177
+ 7. `dia_array`: DIAgonal format
178
+
179
+ To construct an array efficiently, use either `dok_array` or `lil_array`.
180
+ The `lil_array` class supports basic slicing and fancy indexing with a
181
+ similar syntax to NumPy arrays. As illustrated below, the COO format
182
+ may also be used to efficiently construct arrays. Despite their
183
+ similarity to NumPy arrays, it is **strongly discouraged** to use NumPy
184
+ functions directly on these arrays because NumPy may not properly convert
185
+ them for computations, leading to unexpected (and incorrect) results. If you
186
+ do want to apply a NumPy function to these arrays, first check if SciPy has
187
+ its own implementation for the given sparse array class, or **convert the
188
+ sparse array to a NumPy array** (e.g., using the ``toarray`` method of the
189
+ class) first before applying the method.
190
+
191
+ To perform manipulations such as multiplication or inversion, first
192
+ convert the array to either CSC or CSR format. The `lil_array` format is
193
+ row-based, so conversion to CSR is efficient, whereas conversion to CSC
194
+ is less so.
195
+
196
+ All conversions among the CSR, CSC, and COO formats are efficient,
197
+ linear-time operations.
198
+
199
+ Matrix vector product
200
+ ---------------------
201
+ To do a vector product between a sparse array and a vector simply use
202
+ the array ``dot`` method, as described in its docstring:
203
+
204
+ >>> import numpy as np
205
+ >>> from scipy.sparse import csr_array
206
+ >>> A = csr_array([[1, 2, 0], [0, 0, 3], [4, 0, 5]])
207
+ >>> v = np.array([1, 0, -1])
208
+ >>> A.dot(v)
209
+ array([ 1, -3, -1], dtype=int64)
210
+
211
+ .. warning:: As of NumPy 1.7, ``np.dot`` is not aware of sparse arrays,
212
+ therefore using it will result on unexpected results or errors.
213
+ The corresponding dense array should be obtained first instead:
214
+
215
+ >>> np.dot(A.toarray(), v)
216
+ array([ 1, -3, -1], dtype=int64)
217
+
218
+ but then all the performance advantages would be lost.
219
+
220
+ The CSR format is especially suitable for fast matrix vector products.
221
+
222
+ Example 1
223
+ ---------
224
+ Construct a 1000x1000 `lil_array` and add some values to it:
225
+
226
+ >>> from scipy.sparse import lil_array
227
+ >>> from scipy.sparse.linalg import spsolve
228
+ >>> from numpy.linalg import solve, norm
229
+ >>> from numpy.random import rand
230
+
231
+ >>> A = lil_array((1000, 1000))
232
+ >>> A[0, :100] = rand(100)
233
+ >>> A[1, 100:200] = A[0, :100]
234
+ >>> A.setdiag(rand(1000))
235
+
236
+ Now convert it to CSR format and solve A x = b for x:
237
+
238
+ >>> A = A.tocsr()
239
+ >>> b = rand(1000)
240
+ >>> x = spsolve(A, b)
241
+
242
+ Convert it to a dense array and solve, and check that the result
243
+ is the same:
244
+
245
+ >>> x_ = solve(A.toarray(), b)
246
+
247
+ Now we can compute norm of the error with:
248
+
249
+ >>> err = norm(x-x_)
250
+ >>> err < 1e-10
251
+ True
252
+
253
+ It should be small :)
254
+
255
+
256
+ Example 2
257
+ ---------
258
+
259
+ Construct an array in COO format:
260
+
261
+ >>> from scipy import sparse
262
+ >>> from numpy import array
263
+ >>> I = array([0,3,1,0])
264
+ >>> J = array([0,3,1,2])
265
+ >>> V = array([4,5,7,9])
266
+ >>> A = sparse.coo_array((V,(I,J)),shape=(4,4))
267
+
268
+ Notice that the indices do not need to be sorted.
269
+
270
+ Duplicate (i,j) entries are summed when converting to CSR or CSC.
271
+
272
+ >>> I = array([0,0,1,3,1,0,0])
273
+ >>> J = array([0,2,1,3,1,0,0])
274
+ >>> V = array([1,1,1,1,1,1,1])
275
+ >>> B = sparse.coo_array((V,(I,J)),shape=(4,4)).tocsr()
276
+
277
+ This is useful for constructing finite-element stiffness and mass matrices.
278
+
279
+ Further details
280
+ ---------------
281
+
282
+ CSR column indices are not necessarily sorted. Likewise for CSC row
283
+ indices. Use the ``.sorted_indices()`` and ``.sort_indices()`` methods when
284
+ sorted indices are required (e.g., when passing data to other libraries).
285
+
286
+ """
287
+
288
+ # Original code by Travis Oliphant.
289
+ # Modified and extended by Ed Schofield, Robert Cimrman,
290
+ # Nathan Bell, and Jake Vanderplas.
291
+
292
+ import warnings as _warnings
293
+
294
+ from ._base import *
295
+ from ._csr import *
296
+ from ._csc import *
297
+ from ._lil import *
298
+ from ._dok import *
299
+ from ._coo import *
300
+ from ._dia import *
301
+ from ._bsr import *
302
+ from ._construct import *
303
+ from ._extract import *
304
+ from ._matrix import spmatrix
305
+ from ._matrix_io import *
306
+
307
+ # For backward compatibility with v0.19.
308
+ from . import csgraph
309
+
310
+ # Deprecated namespaces, to be removed in v2.0.0
311
+ from . import (
312
+ base, bsr, compressed, construct, coo, csc, csr, data, dia, dok, extract,
313
+ lil, sparsetools, sputils
314
+ )
315
+
316
+ __all__ = [s for s in dir() if not s.startswith('_')]
317
+
318
+ # Filter PendingDeprecationWarning for np.matrix introduced with numpy 1.15
319
+ msg = 'the matrix subclass is not the recommended way'
320
+ _warnings.filterwarnings('ignore', message=msg)
321
+
322
+ from scipy._lib._testutils import PytestTester
323
+ test = PytestTester(__name__)
324
+ del PytestTester
env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_base.cpython-310.pyc ADDED
Binary file (46.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_bsr.cpython-310.pyc ADDED
Binary file (22.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_compressed.cpython-310.pyc ADDED
Binary file (33.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_construct.cpython-310.pyc ADDED
Binary file (42.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_coo.cpython-310.pyc ADDED
Binary file (27 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_csc.cpython-310.pyc ADDED
Binary file (11.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_csr.cpython-310.pyc ADDED
Binary file (14.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_data.cpython-310.pyc ADDED
Binary file (15.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_dok.cpython-310.pyc ADDED
Binary file (22.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_extract.cpython-310.pyc ADDED
Binary file (5.01 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_index.cpython-310.pyc ADDED
Binary file (11 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_lil.cpython-310.pyc ADDED
Binary file (18.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_matrix.cpython-310.pyc ADDED
Binary file (4.22 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_matrix_io.cpython-310.pyc ADDED
Binary file (5.36 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_spfuncs.cpython-310.pyc ADDED
Binary file (1.82 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/compressed.cpython-310.pyc ADDED
Binary file (947 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/construct.cpython-310.pyc ADDED
Binary file (856 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/coo.cpython-310.pyc ADDED
Binary file (811 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/csc.cpython-310.pyc ADDED
Binary file (648 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/data.cpython-310.pyc ADDED
Binary file (624 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/dia.cpython-310.pyc ADDED
Binary file (704 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/dok.cpython-310.pyc ADDED
Binary file (730 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/lil.cpython-310.pyc ADDED
Binary file (737 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/sparsetools.cpython-310.pyc ADDED
Binary file (1.78 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/spfuncs.cpython-310.pyc ADDED
Binary file (644 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/_bsr.py ADDED
@@ -0,0 +1,855 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Compressed Block Sparse Row format"""
2
+
3
+ __docformat__ = "restructuredtext en"
4
+
5
+ __all__ = ['bsr_array', 'bsr_matrix', 'isspmatrix_bsr']
6
+
7
+ from warnings import warn
8
+
9
+ import numpy as np
10
+
11
+ from scipy._lib._util import copy_if_needed
12
+ from ._matrix import spmatrix
13
+ from ._data import _data_matrix, _minmax_mixin
14
+ from ._compressed import _cs_matrix
15
+ from ._base import issparse, _formats, _spbase, sparray
16
+ from ._sputils import (isshape, getdtype, getdata, to_native, upcast,
17
+ check_shape)
18
+ from . import _sparsetools
19
+ from ._sparsetools import (bsr_matvec, bsr_matvecs, csr_matmat_maxnnz,
20
+ bsr_matmat, bsr_transpose, bsr_sort_indices,
21
+ bsr_tocsr)
22
+
23
+
24
+ class _bsr_base(_cs_matrix, _minmax_mixin):
25
+ _format = 'bsr'
26
+
27
+ def __init__(self, arg1, shape=None, dtype=None, copy=False, blocksize=None):
28
+ _data_matrix.__init__(self)
29
+
30
+ if issparse(arg1):
31
+ if arg1.format == self.format and copy:
32
+ arg1 = arg1.copy()
33
+ else:
34
+ arg1 = arg1.tobsr(blocksize=blocksize)
35
+ self.indptr, self.indices, self.data, self._shape = (
36
+ arg1.indptr, arg1.indices, arg1.data, arg1._shape
37
+ )
38
+
39
+ elif isinstance(arg1,tuple):
40
+ if isshape(arg1):
41
+ # it's a tuple of matrix dimensions (M,N)
42
+ self._shape = check_shape(arg1)
43
+ M,N = self.shape
44
+ # process blocksize
45
+ if blocksize is None:
46
+ blocksize = (1,1)
47
+ else:
48
+ if not isshape(blocksize):
49
+ raise ValueError('invalid blocksize=%s' % blocksize)
50
+ blocksize = tuple(blocksize)
51
+ self.data = np.zeros((0,) + blocksize, getdtype(dtype, default=float))
52
+
53
+ R,C = blocksize
54
+ if (M % R) != 0 or (N % C) != 0:
55
+ raise ValueError('shape must be multiple of blocksize')
56
+
57
+ # Select index dtype large enough to pass array and
58
+ # scalar parameters to sparsetools
59
+ idx_dtype = self._get_index_dtype(maxval=max(M//R, N//C, R, C))
60
+ self.indices = np.zeros(0, dtype=idx_dtype)
61
+ self.indptr = np.zeros(M//R + 1, dtype=idx_dtype)
62
+
63
+ elif len(arg1) == 2:
64
+ # (data,(row,col)) format
65
+ coo = self._coo_container(arg1, dtype=dtype, shape=shape)
66
+ bsr = coo.tobsr(blocksize=blocksize)
67
+ self.indptr, self.indices, self.data, self._shape = (
68
+ bsr.indptr, bsr.indices, bsr.data, bsr._shape
69
+ )
70
+
71
+ elif len(arg1) == 3:
72
+ # (data,indices,indptr) format
73
+ (data, indices, indptr) = arg1
74
+
75
+ # Select index dtype large enough to pass array and
76
+ # scalar parameters to sparsetools
77
+ maxval = 1
78
+ if shape is not None:
79
+ maxval = max(shape)
80
+ if blocksize is not None:
81
+ maxval = max(maxval, max(blocksize))
82
+ idx_dtype = self._get_index_dtype((indices, indptr), maxval=maxval,
83
+ check_contents=True)
84
+ if not copy:
85
+ copy = copy_if_needed
86
+ self.indices = np.array(indices, copy=copy, dtype=idx_dtype)
87
+ self.indptr = np.array(indptr, copy=copy, dtype=idx_dtype)
88
+ self.data = getdata(data, copy=copy, dtype=dtype)
89
+ if self.data.ndim != 3:
90
+ raise ValueError(
91
+ f'BSR data must be 3-dimensional, got shape={self.data.shape}'
92
+ )
93
+ if blocksize is not None:
94
+ if not isshape(blocksize):
95
+ raise ValueError(f'invalid blocksize={blocksize}')
96
+ if tuple(blocksize) != self.data.shape[1:]:
97
+ raise ValueError('mismatching blocksize={} vs {}'.format(
98
+ blocksize, self.data.shape[1:]))
99
+ else:
100
+ raise ValueError('unrecognized bsr_array constructor usage')
101
+ else:
102
+ # must be dense
103
+ try:
104
+ arg1 = np.asarray(arg1)
105
+ except Exception as e:
106
+ raise ValueError("unrecognized form for"
107
+ " %s_matrix constructor" % self.format) from e
108
+ arg1 = self._coo_container(
109
+ arg1, dtype=dtype
110
+ ).tobsr(blocksize=blocksize)
111
+ self.indptr, self.indices, self.data, self._shape = (
112
+ arg1.indptr, arg1.indices, arg1.data, arg1._shape
113
+ )
114
+
115
+ if shape is not None:
116
+ self._shape = check_shape(shape)
117
+ else:
118
+ if self.shape is None:
119
+ # shape not already set, try to infer dimensions
120
+ try:
121
+ M = len(self.indptr) - 1
122
+ N = self.indices.max() + 1
123
+ except Exception as e:
124
+ raise ValueError('unable to infer matrix dimensions') from e
125
+ else:
126
+ R,C = self.blocksize
127
+ self._shape = check_shape((M*R,N*C))
128
+
129
+ if self.shape is None:
130
+ if shape is None:
131
+ # TODO infer shape here
132
+ raise ValueError('need to infer shape')
133
+ else:
134
+ self._shape = check_shape(shape)
135
+
136
+ if dtype is not None:
137
+ self.data = self.data.astype(dtype, copy=False)
138
+
139
+ self.check_format(full_check=False)
140
+
141
+ def check_format(self, full_check=True):
142
+ """Check whether the array/matrix respects the BSR format.
143
+
144
+ Parameters
145
+ ----------
146
+ full_check : bool, optional
147
+ If `True`, run rigorous check, scanning arrays for valid values.
148
+ Note that activating those check might copy arrays for casting,
149
+ modifying indices and index pointers' inplace.
150
+ If `False`, run basic checks on attributes. O(1) operations.
151
+ Default is `True`.
152
+ """
153
+ M,N = self.shape
154
+ R,C = self.blocksize
155
+
156
+ # index arrays should have integer data types
157
+ if self.indptr.dtype.kind != 'i':
158
+ warn(f"indptr array has non-integer dtype ({self.indptr.dtype.name})",
159
+ stacklevel=2)
160
+ if self.indices.dtype.kind != 'i':
161
+ warn(f"indices array has non-integer dtype ({self.indices.dtype.name})",
162
+ stacklevel=2)
163
+
164
+ # check array shapes
165
+ if self.indices.ndim != 1 or self.indptr.ndim != 1:
166
+ raise ValueError("indices, and indptr should be 1-D")
167
+ if self.data.ndim != 3:
168
+ raise ValueError("data should be 3-D")
169
+
170
+ # check index pointer
171
+ if (len(self.indptr) != M//R + 1):
172
+ raise ValueError("index pointer size (%d) should be (%d)" %
173
+ (len(self.indptr), M//R + 1))
174
+ if (self.indptr[0] != 0):
175
+ raise ValueError("index pointer should start with 0")
176
+
177
+ # check index and data arrays
178
+ if (len(self.indices) != len(self.data)):
179
+ raise ValueError("indices and data should have the same size")
180
+ if (self.indptr[-1] > len(self.indices)):
181
+ raise ValueError("Last value of index pointer should be less than "
182
+ "the size of index and data arrays")
183
+
184
+ self.prune()
185
+
186
+ if full_check:
187
+ # check format validity (more expensive)
188
+ if self.nnz > 0:
189
+ if self.indices.max() >= N//C:
190
+ raise ValueError("column index values must be < %d (now max %d)"
191
+ % (N//C, self.indices.max()))
192
+ if self.indices.min() < 0:
193
+ raise ValueError("column index values must be >= 0")
194
+ if np.diff(self.indptr).min() < 0:
195
+ raise ValueError("index pointer values must form a "
196
+ "non-decreasing sequence")
197
+
198
+ idx_dtype = self._get_index_dtype((self.indices, self.indptr))
199
+ self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
200
+ self.indices = np.asarray(self.indices, dtype=idx_dtype)
201
+ self.data = to_native(self.data)
202
+ # if not self.has_sorted_indices():
203
+ # warn('Indices were not in sorted order. Sorting indices.')
204
+ # self.sort_indices(check_first=False)
205
+
206
+ @property
207
+ def blocksize(self) -> tuple:
208
+ """Block size of the matrix."""
209
+ return self.data.shape[1:]
210
+
211
+ def _getnnz(self, axis=None):
212
+ if axis is not None:
213
+ raise NotImplementedError("_getnnz over an axis is not implemented "
214
+ "for BSR format")
215
+ R,C = self.blocksize
216
+ return int(self.indptr[-1] * R * C)
217
+
218
+ _getnnz.__doc__ = _spbase._getnnz.__doc__
219
+
220
+ def __repr__(self):
221
+ _, fmt = _formats[self.format]
222
+ sparse_cls = 'array' if isinstance(self, sparray) else 'matrix'
223
+ shape_str = 'x'.join(str(x) for x in self.shape)
224
+ blksz = 'x'.join(str(x) for x in self.blocksize)
225
+ return (
226
+ f"<{shape_str} sparse {sparse_cls} of type '{self.dtype.type}'\n"
227
+ f"\twith {self.nnz} stored elements (blocksize = {blksz}) in {fmt} format>"
228
+ )
229
+
230
+ def diagonal(self, k=0):
231
+ rows, cols = self.shape
232
+ if k <= -rows or k >= cols:
233
+ return np.empty(0, dtype=self.data.dtype)
234
+ R, C = self.blocksize
235
+ y = np.zeros(min(rows + min(k, 0), cols - max(k, 0)),
236
+ dtype=upcast(self.dtype))
237
+ _sparsetools.bsr_diagonal(k, rows // R, cols // C, R, C,
238
+ self.indptr, self.indices,
239
+ np.ravel(self.data), y)
240
+ return y
241
+
242
+ diagonal.__doc__ = _spbase.diagonal.__doc__
243
+
244
+ ##########################
245
+ # NotImplemented methods #
246
+ ##########################
247
+
248
+ def __getitem__(self,key):
249
+ raise NotImplementedError
250
+
251
+ def __setitem__(self,key,val):
252
+ raise NotImplementedError
253
+
254
+ ######################
255
+ # Arithmetic methods #
256
+ ######################
257
+
258
+ def _add_dense(self, other):
259
+ return self.tocoo(copy=False)._add_dense(other)
260
+
261
+ def _matmul_vector(self, other):
262
+ M,N = self.shape
263
+ R,C = self.blocksize
264
+
265
+ result = np.zeros(self.shape[0], dtype=upcast(self.dtype, other.dtype))
266
+
267
+ bsr_matvec(M//R, N//C, R, C,
268
+ self.indptr, self.indices, self.data.ravel(),
269
+ other, result)
270
+
271
+ return result
272
+
273
+ def _matmul_multivector(self,other):
274
+ R,C = self.blocksize
275
+ M,N = self.shape
276
+ n_vecs = other.shape[1] # number of column vectors
277
+
278
+ result = np.zeros((M,n_vecs), dtype=upcast(self.dtype,other.dtype))
279
+
280
+ bsr_matvecs(M//R, N//C, n_vecs, R, C,
281
+ self.indptr, self.indices, self.data.ravel(),
282
+ other.ravel(), result.ravel())
283
+
284
+ return result
285
+
286
+ def _matmul_sparse(self, other):
287
+ M, K1 = self.shape
288
+ K2, N = other.shape
289
+
290
+ R,n = self.blocksize
291
+
292
+ # convert to this format
293
+ if other.format == "bsr":
294
+ C = other.blocksize[1]
295
+ else:
296
+ C = 1
297
+
298
+ if other.format == "csr" and n == 1:
299
+ other = other.tobsr(blocksize=(n,C), copy=False) # lightweight conversion
300
+ else:
301
+ other = other.tobsr(blocksize=(n,C))
302
+
303
+ idx_dtype = self._get_index_dtype((self.indptr, self.indices,
304
+ other.indptr, other.indices))
305
+
306
+ bnnz = csr_matmat_maxnnz(M//R, N//C,
307
+ self.indptr.astype(idx_dtype),
308
+ self.indices.astype(idx_dtype),
309
+ other.indptr.astype(idx_dtype),
310
+ other.indices.astype(idx_dtype))
311
+
312
+ idx_dtype = self._get_index_dtype((self.indptr, self.indices,
313
+ other.indptr, other.indices),
314
+ maxval=bnnz)
315
+ indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
316
+ indices = np.empty(bnnz, dtype=idx_dtype)
317
+ data = np.empty(R*C*bnnz, dtype=upcast(self.dtype,other.dtype))
318
+
319
+ bsr_matmat(bnnz, M//R, N//C, R, C, n,
320
+ self.indptr.astype(idx_dtype),
321
+ self.indices.astype(idx_dtype),
322
+ np.ravel(self.data),
323
+ other.indptr.astype(idx_dtype),
324
+ other.indices.astype(idx_dtype),
325
+ np.ravel(other.data),
326
+ indptr,
327
+ indices,
328
+ data)
329
+
330
+ data = data.reshape(-1,R,C)
331
+
332
+ # TODO eliminate zeros
333
+
334
+ return self._bsr_container(
335
+ (data, indices, indptr), shape=(M, N), blocksize=(R, C)
336
+ )
337
+
338
+ ######################
339
+ # Conversion methods #
340
+ ######################
341
+
342
+ def tobsr(self, blocksize=None, copy=False):
343
+ """Convert this array/matrix into Block Sparse Row Format.
344
+
345
+ With copy=False, the data/indices may be shared between this
346
+ array/matrix and the resultant bsr_array/bsr_matrix.
347
+
348
+ If blocksize=(R, C) is provided, it will be used for determining
349
+ block size of the bsr_array/bsr_matrix.
350
+ """
351
+ if blocksize not in [None, self.blocksize]:
352
+ return self.tocsr().tobsr(blocksize=blocksize)
353
+ if copy:
354
+ return self.copy()
355
+ else:
356
+ return self
357
+
358
+ def tocsr(self, copy=False):
359
+ M, N = self.shape
360
+ R, C = self.blocksize
361
+ nnz = self.nnz
362
+ idx_dtype = self._get_index_dtype((self.indptr, self.indices),
363
+ maxval=max(nnz, N))
364
+ indptr = np.empty(M + 1, dtype=idx_dtype)
365
+ indices = np.empty(nnz, dtype=idx_dtype)
366
+ data = np.empty(nnz, dtype=upcast(self.dtype))
367
+
368
+ bsr_tocsr(M // R, # n_brow
369
+ N // C, # n_bcol
370
+ R, C,
371
+ self.indptr.astype(idx_dtype, copy=False),
372
+ self.indices.astype(idx_dtype, copy=False),
373
+ self.data,
374
+ indptr,
375
+ indices,
376
+ data)
377
+ return self._csr_container((data, indices, indptr), shape=self.shape)
378
+
379
+ tocsr.__doc__ = _spbase.tocsr.__doc__
380
+
381
+ def tocsc(self, copy=False):
382
+ return self.tocsr(copy=False).tocsc(copy=copy)
383
+
384
+ tocsc.__doc__ = _spbase.tocsc.__doc__
385
+
386
+ def tocoo(self, copy=True):
387
+ """Convert this array/matrix to COOrdinate format.
388
+
389
+ When copy=False the data array will be shared between
390
+ this array/matrix and the resultant coo_array/coo_matrix.
391
+ """
392
+
393
+ M,N = self.shape
394
+ R,C = self.blocksize
395
+
396
+ indptr_diff = np.diff(self.indptr)
397
+ if indptr_diff.dtype.itemsize > np.dtype(np.intp).itemsize:
398
+ # Check for potential overflow
399
+ indptr_diff_limited = indptr_diff.astype(np.intp)
400
+ if np.any(indptr_diff_limited != indptr_diff):
401
+ raise ValueError("Matrix too big to convert")
402
+ indptr_diff = indptr_diff_limited
403
+
404
+ idx_dtype = self._get_index_dtype(maxval=max(M, N))
405
+ row = (R * np.arange(M//R, dtype=idx_dtype)).repeat(indptr_diff)
406
+ row = row.repeat(R*C).reshape(-1,R,C)
407
+ row += np.tile(np.arange(R, dtype=idx_dtype).reshape(-1,1), (1,C))
408
+ row = row.reshape(-1)
409
+
410
+ col = ((C * self.indices).astype(idx_dtype, copy=False)
411
+ .repeat(R*C).reshape(-1,R,C))
412
+ col += np.tile(np.arange(C, dtype=idx_dtype), (R,1))
413
+ col = col.reshape(-1)
414
+
415
+ data = self.data.reshape(-1)
416
+
417
+ if copy:
418
+ data = data.copy()
419
+
420
+ return self._coo_container(
421
+ (data, (row, col)), shape=self.shape
422
+ )
423
+
424
+ def toarray(self, order=None, out=None):
425
+ return self.tocoo(copy=False).toarray(order=order, out=out)
426
+
427
+ toarray.__doc__ = _spbase.toarray.__doc__
428
+
429
+ def transpose(self, axes=None, copy=False):
430
+ if axes is not None and axes != (1, 0):
431
+ raise ValueError("Sparse matrices do not support "
432
+ "an 'axes' parameter because swapping "
433
+ "dimensions is the only logical permutation.")
434
+
435
+ R, C = self.blocksize
436
+ M, N = self.shape
437
+ NBLK = self.nnz//(R*C)
438
+
439
+ if self.nnz == 0:
440
+ return self._bsr_container((N, M), blocksize=(C, R),
441
+ dtype=self.dtype, copy=copy)
442
+
443
+ indptr = np.empty(N//C + 1, dtype=self.indptr.dtype)
444
+ indices = np.empty(NBLK, dtype=self.indices.dtype)
445
+ data = np.empty((NBLK, C, R), dtype=self.data.dtype)
446
+
447
+ bsr_transpose(M//R, N//C, R, C,
448
+ self.indptr, self.indices, self.data.ravel(),
449
+ indptr, indices, data.ravel())
450
+
451
+ return self._bsr_container((data, indices, indptr),
452
+ shape=(N, M), copy=copy)
453
+
454
+ transpose.__doc__ = _spbase.transpose.__doc__
455
+
456
+ ##############################################################
457
+ # methods that examine or modify the internal data structure #
458
+ ##############################################################
459
+
460
+ def eliminate_zeros(self):
461
+ """Remove zero elements in-place."""
462
+
463
+ if not self.nnz:
464
+ return # nothing to do
465
+
466
+ R,C = self.blocksize
467
+ M,N = self.shape
468
+
469
+ mask = (self.data != 0).reshape(-1,R*C).sum(axis=1) # nonzero blocks
470
+
471
+ nonzero_blocks = mask.nonzero()[0]
472
+
473
+ self.data[:len(nonzero_blocks)] = self.data[nonzero_blocks]
474
+
475
+ # modifies self.indptr and self.indices *in place*
476
+ _sparsetools.csr_eliminate_zeros(M//R, N//C, self.indptr,
477
+ self.indices, mask)
478
+ self.prune()
479
+
480
+ def sum_duplicates(self):
481
+ """Eliminate duplicate array/matrix entries by adding them together
482
+
483
+ The is an *in place* operation
484
+ """
485
+ if self.has_canonical_format:
486
+ return
487
+ self.sort_indices()
488
+ R, C = self.blocksize
489
+ M, N = self.shape
490
+
491
+ # port of _sparsetools.csr_sum_duplicates
492
+ n_row = M // R
493
+ nnz = 0
494
+ row_end = 0
495
+ for i in range(n_row):
496
+ jj = row_end
497
+ row_end = self.indptr[i+1]
498
+ while jj < row_end:
499
+ j = self.indices[jj]
500
+ x = self.data[jj]
501
+ jj += 1
502
+ while jj < row_end and self.indices[jj] == j:
503
+ x += self.data[jj]
504
+ jj += 1
505
+ self.indices[nnz] = j
506
+ self.data[nnz] = x
507
+ nnz += 1
508
+ self.indptr[i+1] = nnz
509
+
510
+ self.prune() # nnz may have changed
511
+ self.has_canonical_format = True
512
+
513
+ def sort_indices(self):
514
+ """Sort the indices of this array/matrix *in place*
515
+ """
516
+ if self.has_sorted_indices:
517
+ return
518
+
519
+ R,C = self.blocksize
520
+ M,N = self.shape
521
+
522
+ bsr_sort_indices(M//R, N//C, R, C, self.indptr, self.indices, self.data.ravel())
523
+
524
+ self.has_sorted_indices = True
525
+
526
+ def prune(self):
527
+ """Remove empty space after all non-zero elements.
528
+ """
529
+
530
+ R,C = self.blocksize
531
+ M,N = self.shape
532
+
533
+ if len(self.indptr) != M//R + 1:
534
+ raise ValueError("index pointer has invalid length")
535
+
536
+ bnnz = self.indptr[-1]
537
+
538
+ if len(self.indices) < bnnz:
539
+ raise ValueError("indices array has too few elements")
540
+ if len(self.data) < bnnz:
541
+ raise ValueError("data array has too few elements")
542
+
543
+ self.data = self.data[:bnnz]
544
+ self.indices = self.indices[:bnnz]
545
+
546
+ # utility functions
547
+ def _binopt(self, other, op, in_shape=None, out_shape=None):
548
+ """Apply the binary operation fn to two sparse matrices."""
549
+
550
+ # Ideally we'd take the GCDs of the blocksize dimensions
551
+ # and explode self and other to match.
552
+ other = self.__class__(other, blocksize=self.blocksize)
553
+
554
+ # e.g. bsr_plus_bsr, etc.
555
+ fn = getattr(_sparsetools, self.format + op + self.format)
556
+
557
+ R,C = self.blocksize
558
+
559
+ max_bnnz = len(self.data) + len(other.data)
560
+ idx_dtype = self._get_index_dtype((self.indptr, self.indices,
561
+ other.indptr, other.indices),
562
+ maxval=max_bnnz)
563
+ indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
564
+ indices = np.empty(max_bnnz, dtype=idx_dtype)
565
+
566
+ bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_']
567
+ if op in bool_ops:
568
+ data = np.empty(R*C*max_bnnz, dtype=np.bool_)
569
+ else:
570
+ data = np.empty(R*C*max_bnnz, dtype=upcast(self.dtype,other.dtype))
571
+
572
+ fn(self.shape[0]//R, self.shape[1]//C, R, C,
573
+ self.indptr.astype(idx_dtype),
574
+ self.indices.astype(idx_dtype),
575
+ self.data,
576
+ other.indptr.astype(idx_dtype),
577
+ other.indices.astype(idx_dtype),
578
+ np.ravel(other.data),
579
+ indptr,
580
+ indices,
581
+ data)
582
+
583
+ actual_bnnz = indptr[-1]
584
+ indices = indices[:actual_bnnz]
585
+ data = data[:R*C*actual_bnnz]
586
+
587
+ if actual_bnnz < max_bnnz/2:
588
+ indices = indices.copy()
589
+ data = data.copy()
590
+
591
+ data = data.reshape(-1,R,C)
592
+
593
+ return self.__class__((data, indices, indptr), shape=self.shape)
594
+
595
+ # needed by _data_matrix
596
+ def _with_data(self,data,copy=True):
597
+ """Returns a matrix with the same sparsity structure as self,
598
+ but with different data. By default the structure arrays
599
+ (i.e. .indptr and .indices) are copied.
600
+ """
601
+ if copy:
602
+ return self.__class__((data,self.indices.copy(),self.indptr.copy()),
603
+ shape=self.shape,dtype=data.dtype)
604
+ else:
605
+ return self.__class__((data,self.indices,self.indptr),
606
+ shape=self.shape,dtype=data.dtype)
607
+
608
+ # # these functions are used by the parent class
609
+ # # to remove redundancy between bsc_matrix and bsr_matrix
610
+ # def _swap(self,x):
611
+ # """swap the members of x if this is a column-oriented matrix
612
+ # """
613
+ # return (x[0],x[1])
614
+
615
+
616
+ def isspmatrix_bsr(x):
617
+ """Is `x` of a bsr_matrix type?
618
+
619
+ Parameters
620
+ ----------
621
+ x
622
+ object to check for being a bsr matrix
623
+
624
+ Returns
625
+ -------
626
+ bool
627
+ True if `x` is a bsr matrix, False otherwise
628
+
629
+ Examples
630
+ --------
631
+ >>> from scipy.sparse import bsr_array, bsr_matrix, csr_matrix, isspmatrix_bsr
632
+ >>> isspmatrix_bsr(bsr_matrix([[5]]))
633
+ True
634
+ >>> isspmatrix_bsr(bsr_array([[5]]))
635
+ False
636
+ >>> isspmatrix_bsr(csr_matrix([[5]]))
637
+ False
638
+ """
639
+ return isinstance(x, bsr_matrix)
640
+
641
+
642
+ # This namespace class separates array from matrix with isinstance
643
+ class bsr_array(_bsr_base, sparray):
644
+ """
645
+ Block Sparse Row format sparse array.
646
+
647
+ This can be instantiated in several ways:
648
+ bsr_array(D, [blocksize=(R,C)])
649
+ where D is a 2-D ndarray.
650
+
651
+ bsr_array(S, [blocksize=(R,C)])
652
+ with another sparse array or matrix S (equivalent to S.tobsr())
653
+
654
+ bsr_array((M, N), [blocksize=(R,C), dtype])
655
+ to construct an empty sparse array with shape (M, N)
656
+ dtype is optional, defaulting to dtype='d'.
657
+
658
+ bsr_array((data, ij), [blocksize=(R,C), shape=(M, N)])
659
+ where ``data`` and ``ij`` satisfy ``a[ij[0, k], ij[1, k]] = data[k]``
660
+
661
+ bsr_array((data, indices, indptr), [shape=(M, N)])
662
+ is the standard BSR representation where the block column
663
+ indices for row i are stored in ``indices[indptr[i]:indptr[i+1]]``
664
+ and their corresponding block values are stored in
665
+ ``data[ indptr[i]: indptr[i+1] ]``. If the shape parameter is not
666
+ supplied, the array dimensions are inferred from the index arrays.
667
+
668
+ Attributes
669
+ ----------
670
+ dtype : dtype
671
+ Data type of the array
672
+ shape : 2-tuple
673
+ Shape of the array
674
+ ndim : int
675
+ Number of dimensions (this is always 2)
676
+ nnz
677
+ size
678
+ data
679
+ BSR format data array of the array
680
+ indices
681
+ BSR format index array of the array
682
+ indptr
683
+ BSR format index pointer array of the array
684
+ blocksize
685
+ Block size
686
+ has_sorted_indices : bool
687
+ Whether indices are sorted
688
+ has_canonical_format : bool
689
+ T
690
+
691
+ Notes
692
+ -----
693
+ Sparse arrays can be used in arithmetic operations: they support
694
+ addition, subtraction, multiplication, division, and matrix power.
695
+
696
+ **Summary of BSR format**
697
+
698
+ The Block Sparse Row (BSR) format is very similar to the Compressed
699
+ Sparse Row (CSR) format. BSR is appropriate for sparse matrices with dense
700
+ sub matrices like the last example below. Such sparse block matrices often
701
+ arise in vector-valued finite element discretizations. In such cases, BSR is
702
+ considerably more efficient than CSR and CSC for many sparse arithmetic
703
+ operations.
704
+
705
+ **Blocksize**
706
+
707
+ The blocksize (R,C) must evenly divide the shape of the sparse array (M,N).
708
+ That is, R and C must satisfy the relationship ``M % R = 0`` and
709
+ ``N % C = 0``.
710
+
711
+ If no blocksize is specified, a simple heuristic is applied to determine
712
+ an appropriate blocksize.
713
+
714
+ **Canonical Format**
715
+
716
+ In canonical format, there are no duplicate blocks and indices are sorted
717
+ per row.
718
+
719
+ Examples
720
+ --------
721
+ >>> import numpy as np
722
+ >>> from scipy.sparse import bsr_array
723
+ >>> bsr_array((3, 4), dtype=np.int8).toarray()
724
+ array([[0, 0, 0, 0],
725
+ [0, 0, 0, 0],
726
+ [0, 0, 0, 0]], dtype=int8)
727
+
728
+ >>> row = np.array([0, 0, 1, 2, 2, 2])
729
+ >>> col = np.array([0, 2, 2, 0, 1, 2])
730
+ >>> data = np.array([1, 2, 3 ,4, 5, 6])
731
+ >>> bsr_array((data, (row, col)), shape=(3, 3)).toarray()
732
+ array([[1, 0, 2],
733
+ [0, 0, 3],
734
+ [4, 5, 6]])
735
+
736
+ >>> indptr = np.array([0, 2, 3, 6])
737
+ >>> indices = np.array([0, 2, 2, 0, 1, 2])
738
+ >>> data = np.array([1, 2, 3, 4, 5, 6]).repeat(4).reshape(6, 2, 2)
739
+ >>> bsr_array((data,indices,indptr), shape=(6, 6)).toarray()
740
+ array([[1, 1, 0, 0, 2, 2],
741
+ [1, 1, 0, 0, 2, 2],
742
+ [0, 0, 0, 0, 3, 3],
743
+ [0, 0, 0, 0, 3, 3],
744
+ [4, 4, 5, 5, 6, 6],
745
+ [4, 4, 5, 5, 6, 6]])
746
+
747
+ """
748
+
749
+
750
+ class bsr_matrix(spmatrix, _bsr_base):
751
+ """
752
+ Block Sparse Row format sparse matrix.
753
+
754
+ This can be instantiated in several ways:
755
+ bsr_matrix(D, [blocksize=(R,C)])
756
+ where D is a 2-D ndarray.
757
+
758
+ bsr_matrix(S, [blocksize=(R,C)])
759
+ with another sparse array or matrix S (equivalent to S.tobsr())
760
+
761
+ bsr_matrix((M, N), [blocksize=(R,C), dtype])
762
+ to construct an empty sparse matrix with shape (M, N)
763
+ dtype is optional, defaulting to dtype='d'.
764
+
765
+ bsr_matrix((data, ij), [blocksize=(R,C), shape=(M, N)])
766
+ where ``data`` and ``ij`` satisfy ``a[ij[0, k], ij[1, k]] = data[k]``
767
+
768
+ bsr_matrix((data, indices, indptr), [shape=(M, N)])
769
+ is the standard BSR representation where the block column
770
+ indices for row i are stored in ``indices[indptr[i]:indptr[i+1]]``
771
+ and their corresponding block values are stored in
772
+ ``data[ indptr[i]: indptr[i+1] ]``. If the shape parameter is not
773
+ supplied, the matrix dimensions are inferred from the index arrays.
774
+
775
+ Attributes
776
+ ----------
777
+ dtype : dtype
778
+ Data type of the matrix
779
+ shape : 2-tuple
780
+ Shape of the matrix
781
+ ndim : int
782
+ Number of dimensions (this is always 2)
783
+ nnz
784
+ size
785
+ data
786
+ BSR format data array of the matrix
787
+ indices
788
+ BSR format index array of the matrix
789
+ indptr
790
+ BSR format index pointer array of the matrix
791
+ blocksize
792
+ Block size
793
+ has_sorted_indices : bool
794
+ Whether indices are sorted
795
+ has_canonical_format : bool
796
+ T
797
+
798
+ Notes
799
+ -----
800
+ Sparse matrices can be used in arithmetic operations: they support
801
+ addition, subtraction, multiplication, division, and matrix power.
802
+
803
+ **Summary of BSR format**
804
+
805
+ The Block Sparse Row (BSR) format is very similar to the Compressed
806
+ Sparse Row (CSR) format. BSR is appropriate for sparse matrices with dense
807
+ sub matrices like the last example below. Such sparse block matrices often
808
+ arise in vector-valued finite element discretizations. In such cases, BSR is
809
+ considerably more efficient than CSR and CSC for many sparse arithmetic
810
+ operations.
811
+
812
+ **Blocksize**
813
+
814
+ The blocksize (R,C) must evenly divide the shape of the sparse matrix (M,N).
815
+ That is, R and C must satisfy the relationship ``M % R = 0`` and
816
+ ``N % C = 0``.
817
+
818
+ If no blocksize is specified, a simple heuristic is applied to determine
819
+ an appropriate blocksize.
820
+
821
+ **Canonical Format**
822
+
823
+ In canonical format, there are no duplicate blocks and indices are sorted
824
+ per row.
825
+
826
+ Examples
827
+ --------
828
+ >>> import numpy as np
829
+ >>> from scipy.sparse import bsr_matrix
830
+ >>> bsr_matrix((3, 4), dtype=np.int8).toarray()
831
+ array([[0, 0, 0, 0],
832
+ [0, 0, 0, 0],
833
+ [0, 0, 0, 0]], dtype=int8)
834
+
835
+ >>> row = np.array([0, 0, 1, 2, 2, 2])
836
+ >>> col = np.array([0, 2, 2, 0, 1, 2])
837
+ >>> data = np.array([1, 2, 3 ,4, 5, 6])
838
+ >>> bsr_matrix((data, (row, col)), shape=(3, 3)).toarray()
839
+ array([[1, 0, 2],
840
+ [0, 0, 3],
841
+ [4, 5, 6]])
842
+
843
+ >>> indptr = np.array([0, 2, 3, 6])
844
+ >>> indices = np.array([0, 2, 2, 0, 1, 2])
845
+ >>> data = np.array([1, 2, 3, 4, 5, 6]).repeat(4).reshape(6, 2, 2)
846
+ >>> bsr_matrix((data,indices,indptr), shape=(6, 6)).toarray()
847
+ array([[1, 1, 0, 0, 2, 2],
848
+ [1, 1, 0, 0, 2, 2],
849
+ [0, 0, 0, 0, 3, 3],
850
+ [0, 0, 0, 0, 3, 3],
851
+ [4, 4, 5, 5, 6, 6],
852
+ [4, 4, 5, 5, 6, 6]])
853
+
854
+ """
855
+
env-llmeval/lib/python3.10/site-packages/scipy/sparse/_compressed.py ADDED
@@ -0,0 +1,1367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Base class for sparse matrix formats using compressed storage."""
2
+ __all__ = []
3
+
4
+ from warnings import warn
5
+ import operator
6
+
7
+ import numpy as np
8
+ from scipy._lib._util import _prune_array, copy_if_needed
9
+
10
+ from ._base import _spbase, issparse, SparseEfficiencyWarning
11
+ from ._data import _data_matrix, _minmax_mixin
12
+ from . import _sparsetools
13
+ from ._sparsetools import (get_csr_submatrix, csr_sample_offsets, csr_todense,
14
+ csr_sample_values, csr_row_index, csr_row_slice,
15
+ csr_column_index1, csr_column_index2)
16
+ from ._index import IndexMixin
17
+ from ._sputils import (upcast, upcast_char, to_native, isdense, isshape,
18
+ getdtype, isscalarlike, isintlike, downcast_intp_index,
19
+ get_sum_dtype, check_shape, is_pydata_spmatrix)
20
+
21
+
22
+ class _cs_matrix(_data_matrix, _minmax_mixin, IndexMixin):
23
+ """
24
+ base array/matrix class for compressed row- and column-oriented arrays/matrices
25
+ """
26
+
27
+ def __init__(self, arg1, shape=None, dtype=None, copy=False):
28
+ _data_matrix.__init__(self)
29
+
30
+ if issparse(arg1):
31
+ if arg1.format == self.format and copy:
32
+ arg1 = arg1.copy()
33
+ else:
34
+ arg1 = arg1.asformat(self.format)
35
+ self.indptr, self.indices, self.data, self._shape = (
36
+ arg1.indptr, arg1.indices, arg1.data, arg1._shape
37
+ )
38
+
39
+ elif isinstance(arg1, tuple):
40
+ if isshape(arg1):
41
+ # It's a tuple of matrix dimensions (M, N)
42
+ # create empty matrix
43
+ self._shape = check_shape(arg1)
44
+ M, N = self.shape
45
+ # Select index dtype large enough to pass array and
46
+ # scalar parameters to sparsetools
47
+ idx_dtype = self._get_index_dtype(maxval=max(M, N))
48
+ self.data = np.zeros(0, getdtype(dtype, default=float))
49
+ self.indices = np.zeros(0, idx_dtype)
50
+ self.indptr = np.zeros(self._swap((M, N))[0] + 1,
51
+ dtype=idx_dtype)
52
+ else:
53
+ if len(arg1) == 2:
54
+ # (data, ij) format
55
+ coo = self._coo_container(arg1, shape=shape, dtype=dtype)
56
+ arrays = coo._coo_to_compressed(self._swap)
57
+ self.indptr, self.indices, self.data, self._shape = arrays
58
+ elif len(arg1) == 3:
59
+ # (data, indices, indptr) format
60
+ (data, indices, indptr) = arg1
61
+
62
+ # Select index dtype large enough to pass array and
63
+ # scalar parameters to sparsetools
64
+ maxval = None
65
+ if shape is not None:
66
+ maxval = max(shape)
67
+ idx_dtype = self._get_index_dtype((indices, indptr),
68
+ maxval=maxval,
69
+ check_contents=True)
70
+
71
+ if not copy:
72
+ copy = copy_if_needed
73
+ self.indices = np.array(indices, copy=copy, dtype=idx_dtype)
74
+ self.indptr = np.array(indptr, copy=copy, dtype=idx_dtype)
75
+ self.data = np.array(data, copy=copy, dtype=dtype)
76
+ else:
77
+ raise ValueError(f"unrecognized {self.format}_matrix "
78
+ "constructor usage")
79
+
80
+ else:
81
+ # must be dense
82
+ try:
83
+ arg1 = np.asarray(arg1)
84
+ except Exception as e:
85
+ msg = f"unrecognized {self.format}_matrix constructor usage"
86
+ raise ValueError(msg) from e
87
+ coo = self._coo_container(arg1, dtype=dtype)
88
+ arrays = coo._coo_to_compressed(self._swap)
89
+ self.indptr, self.indices, self.data, self._shape = arrays
90
+
91
+ # Read matrix dimensions given, if any
92
+ if shape is not None:
93
+ self._shape = check_shape(shape)
94
+ else:
95
+ if self.shape is None:
96
+ # shape not already set, try to infer dimensions
97
+ try:
98
+ major_dim = len(self.indptr) - 1
99
+ minor_dim = self.indices.max() + 1
100
+ except Exception as e:
101
+ raise ValueError('unable to infer matrix dimensions') from e
102
+ else:
103
+ self._shape = check_shape(self._swap((major_dim, minor_dim)))
104
+
105
+ if dtype is not None:
106
+ self.data = self.data.astype(dtype, copy=False)
107
+
108
+ self.check_format(full_check=False)
109
+
110
+ def _getnnz(self, axis=None):
111
+ if axis is None:
112
+ return int(self.indptr[-1])
113
+ else:
114
+ if axis < 0:
115
+ axis += 2
116
+ axis, _ = self._swap((axis, 1 - axis))
117
+ _, N = self._swap(self.shape)
118
+ if axis == 0:
119
+ return np.bincount(downcast_intp_index(self.indices),
120
+ minlength=N)
121
+ elif axis == 1:
122
+ return np.diff(self.indptr)
123
+ raise ValueError('axis out of bounds')
124
+
125
+ _getnnz.__doc__ = _spbase._getnnz.__doc__
126
+
127
+ def check_format(self, full_check=True):
128
+ """Check whether the array/matrix respects the CSR or CSC format.
129
+
130
+ Parameters
131
+ ----------
132
+ full_check : bool, optional
133
+ If `True`, run rigorous check, scanning arrays for valid values.
134
+ Note that activating those check might copy arrays for casting,
135
+ modifying indices and index pointers' inplace.
136
+ If `False`, run basic checks on attributes. O(1) operations.
137
+ Default is `True`.
138
+ """
139
+ # use _swap to determine proper bounds
140
+ major_name, minor_name = self._swap(('row', 'column'))
141
+ major_dim, minor_dim = self._swap(self.shape)
142
+
143
+ # index arrays should have integer data types
144
+ if self.indptr.dtype.kind != 'i':
145
+ warn(f"indptr array has non-integer dtype ({self.indptr.dtype.name})",
146
+ stacklevel=3)
147
+ if self.indices.dtype.kind != 'i':
148
+ warn(f"indices array has non-integer dtype ({self.indices.dtype.name})",
149
+ stacklevel=3)
150
+
151
+ # check array shapes
152
+ for x in [self.data.ndim, self.indices.ndim, self.indptr.ndim]:
153
+ if x != 1:
154
+ raise ValueError('data, indices, and indptr should be 1-D')
155
+
156
+ # check index pointer
157
+ if (len(self.indptr) != major_dim + 1):
158
+ raise ValueError("index pointer size ({}) should be ({})"
159
+ "".format(len(self.indptr), major_dim + 1))
160
+ if (self.indptr[0] != 0):
161
+ raise ValueError("index pointer should start with 0")
162
+
163
+ # check index and data arrays
164
+ if (len(self.indices) != len(self.data)):
165
+ raise ValueError("indices and data should have the same size")
166
+ if (self.indptr[-1] > len(self.indices)):
167
+ raise ValueError("Last value of index pointer should be less than "
168
+ "the size of index and data arrays")
169
+
170
+ self.prune()
171
+
172
+ if full_check:
173
+ # check format validity (more expensive)
174
+ if self.nnz > 0:
175
+ if self.indices.max() >= minor_dim:
176
+ raise ValueError(f"{minor_name} index values must be < {minor_dim}")
177
+ if self.indices.min() < 0:
178
+ raise ValueError(f"{minor_name} index values must be >= 0")
179
+ if np.diff(self.indptr).min() < 0:
180
+ raise ValueError("index pointer values must form a "
181
+ "non-decreasing sequence")
182
+
183
+ idx_dtype = self._get_index_dtype((self.indptr, self.indices))
184
+ self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
185
+ self.indices = np.asarray(self.indices, dtype=idx_dtype)
186
+ self.data = to_native(self.data)
187
+
188
+ # if not self.has_sorted_indices():
189
+ # warn('Indices were not in sorted order. Sorting indices.')
190
+ # self.sort_indices()
191
+ # assert(self.has_sorted_indices())
192
+ # TODO check for duplicates?
193
+
194
+ #######################
195
+ # Boolean comparisons #
196
+ #######################
197
+
198
+ def _scalar_binopt(self, other, op):
199
+ """Scalar version of self._binopt, for cases in which no new nonzeros
200
+ are added. Produces a new sparse array in canonical form.
201
+ """
202
+ self.sum_duplicates()
203
+ res = self._with_data(op(self.data, other), copy=True)
204
+ res.eliminate_zeros()
205
+ return res
206
+
207
+ def __eq__(self, other):
208
+ # Scalar other.
209
+ if isscalarlike(other):
210
+ if np.isnan(other):
211
+ return self.__class__(self.shape, dtype=np.bool_)
212
+
213
+ if other == 0:
214
+ warn("Comparing a sparse matrix with 0 using == is inefficient"
215
+ ", try using != instead.", SparseEfficiencyWarning,
216
+ stacklevel=3)
217
+ all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
218
+ inv = self._scalar_binopt(other, operator.ne)
219
+ return all_true - inv
220
+ else:
221
+ return self._scalar_binopt(other, operator.eq)
222
+ # Dense other.
223
+ elif isdense(other):
224
+ return self.todense() == other
225
+ # Pydata sparse other.
226
+ elif is_pydata_spmatrix(other):
227
+ return NotImplemented
228
+ # Sparse other.
229
+ elif issparse(other):
230
+ warn("Comparing sparse matrices using == is inefficient, try using"
231
+ " != instead.", SparseEfficiencyWarning, stacklevel=3)
232
+ # TODO sparse broadcasting
233
+ if self.shape != other.shape:
234
+ return False
235
+ elif self.format != other.format:
236
+ other = other.asformat(self.format)
237
+ res = self._binopt(other, '_ne_')
238
+ all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
239
+ return all_true - res
240
+ else:
241
+ return NotImplemented
242
+
243
+ def __ne__(self, other):
244
+ # Scalar other.
245
+ if isscalarlike(other):
246
+ if np.isnan(other):
247
+ warn("Comparing a sparse matrix with nan using != is"
248
+ " inefficient", SparseEfficiencyWarning, stacklevel=3)
249
+ all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
250
+ return all_true
251
+ elif other != 0:
252
+ warn("Comparing a sparse matrix with a nonzero scalar using !="
253
+ " is inefficient, try using == instead.",
254
+ SparseEfficiencyWarning, stacklevel=3)
255
+ all_true = self.__class__(np.ones(self.shape), dtype=np.bool_)
256
+ inv = self._scalar_binopt(other, operator.eq)
257
+ return all_true - inv
258
+ else:
259
+ return self._scalar_binopt(other, operator.ne)
260
+ # Dense other.
261
+ elif isdense(other):
262
+ return self.todense() != other
263
+ # Pydata sparse other.
264
+ elif is_pydata_spmatrix(other):
265
+ return NotImplemented
266
+ # Sparse other.
267
+ elif issparse(other):
268
+ # TODO sparse broadcasting
269
+ if self.shape != other.shape:
270
+ return True
271
+ elif self.format != other.format:
272
+ other = other.asformat(self.format)
273
+ return self._binopt(other, '_ne_')
274
+ else:
275
+ return NotImplemented
276
+
277
+ def _inequality(self, other, op, op_name, bad_scalar_msg):
278
+ # Scalar other.
279
+ if isscalarlike(other):
280
+ if 0 == other and op_name in ('_le_', '_ge_'):
281
+ raise NotImplementedError(" >= and <= don't work with 0.")
282
+ elif op(0, other):
283
+ warn(bad_scalar_msg, SparseEfficiencyWarning, stacklevel=3)
284
+ other_arr = np.empty(self.shape, dtype=np.result_type(other))
285
+ other_arr.fill(other)
286
+ other_arr = self.__class__(other_arr)
287
+ return self._binopt(other_arr, op_name)
288
+ else:
289
+ return self._scalar_binopt(other, op)
290
+ # Dense other.
291
+ elif isdense(other):
292
+ return op(self.todense(), other)
293
+ # Sparse other.
294
+ elif issparse(other):
295
+ # TODO sparse broadcasting
296
+ if self.shape != other.shape:
297
+ raise ValueError("inconsistent shapes")
298
+ elif self.format != other.format:
299
+ other = other.asformat(self.format)
300
+ if op_name not in ('_ge_', '_le_'):
301
+ return self._binopt(other, op_name)
302
+
303
+ warn("Comparing sparse matrices using >= and <= is inefficient, "
304
+ "using <, >, or !=, instead.",
305
+ SparseEfficiencyWarning, stacklevel=3)
306
+ all_true = self.__class__(np.ones(self.shape, dtype=np.bool_))
307
+ res = self._binopt(other, '_gt_' if op_name == '_le_' else '_lt_')
308
+ return all_true - res
309
+ else:
310
+ return NotImplemented
311
+
312
+ def __lt__(self, other):
313
+ return self._inequality(other, operator.lt, '_lt_',
314
+ "Comparing a sparse matrix with a scalar "
315
+ "greater than zero using < is inefficient, "
316
+ "try using >= instead.")
317
+
318
+ def __gt__(self, other):
319
+ return self._inequality(other, operator.gt, '_gt_',
320
+ "Comparing a sparse matrix with a scalar "
321
+ "less than zero using > is inefficient, "
322
+ "try using <= instead.")
323
+
324
+ def __le__(self, other):
325
+ return self._inequality(other, operator.le, '_le_',
326
+ "Comparing a sparse matrix with a scalar "
327
+ "greater than zero using <= is inefficient, "
328
+ "try using > instead.")
329
+
330
+ def __ge__(self, other):
331
+ return self._inequality(other, operator.ge, '_ge_',
332
+ "Comparing a sparse matrix with a scalar "
333
+ "less than zero using >= is inefficient, "
334
+ "try using < instead.")
335
+
336
+ #################################
337
+ # Arithmetic operator overrides #
338
+ #################################
339
+
340
+ def _add_dense(self, other):
341
+ if other.shape != self.shape:
342
+ raise ValueError(f'Incompatible shapes ({self.shape} and {other.shape})')
343
+ dtype = upcast_char(self.dtype.char, other.dtype.char)
344
+ order = self._swap('CF')[0]
345
+ result = np.array(other, dtype=dtype, order=order, copy=True)
346
+ M, N = self._swap(self.shape)
347
+ y = result if result.flags.c_contiguous else result.T
348
+ csr_todense(M, N, self.indptr, self.indices, self.data, y)
349
+ return self._container(result, copy=False)
350
+
351
+ def _add_sparse(self, other):
352
+ return self._binopt(other, '_plus_')
353
+
354
+ def _sub_sparse(self, other):
355
+ return self._binopt(other, '_minus_')
356
+
357
+ def multiply(self, other):
358
+ """Point-wise multiplication by another array/matrix, vector, or
359
+ scalar.
360
+ """
361
+ # Scalar multiplication.
362
+ if isscalarlike(other):
363
+ return self._mul_scalar(other)
364
+ # Sparse matrix or vector.
365
+ if issparse(other):
366
+ if self.shape == other.shape:
367
+ other = self.__class__(other)
368
+ return self._binopt(other, '_elmul_')
369
+ if other.ndim == 1:
370
+ raise TypeError("broadcast from a 1d array not yet supported")
371
+ # Single element.
372
+ elif other.shape == (1, 1):
373
+ return self._mul_scalar(other.toarray()[0, 0])
374
+ elif self.shape == (1, 1):
375
+ return other._mul_scalar(self.toarray()[0, 0])
376
+ # A row times a column.
377
+ elif self.shape[1] == 1 and other.shape[0] == 1:
378
+ return self._matmul_sparse(other.tocsc())
379
+ elif self.shape[0] == 1 and other.shape[1] == 1:
380
+ return other._matmul_sparse(self.tocsc())
381
+ # Row vector times matrix. other is a row.
382
+ elif other.shape[0] == 1 and self.shape[1] == other.shape[1]:
383
+ other = self._dia_container(
384
+ (other.toarray().ravel(), [0]),
385
+ shape=(other.shape[1], other.shape[1])
386
+ )
387
+ return self._matmul_sparse(other)
388
+ # self is a row.
389
+ elif self.shape[0] == 1 and self.shape[1] == other.shape[1]:
390
+ copy = self._dia_container(
391
+ (self.toarray().ravel(), [0]),
392
+ shape=(self.shape[1], self.shape[1])
393
+ )
394
+ return other._matmul_sparse(copy)
395
+ # Column vector times matrix. other is a column.
396
+ elif other.shape[1] == 1 and self.shape[0] == other.shape[0]:
397
+ other = self._dia_container(
398
+ (other.toarray().ravel(), [0]),
399
+ shape=(other.shape[0], other.shape[0])
400
+ )
401
+ return other._matmul_sparse(self)
402
+ # self is a column.
403
+ elif self.shape[1] == 1 and self.shape[0] == other.shape[0]:
404
+ copy = self._dia_container(
405
+ (self.toarray().ravel(), [0]),
406
+ shape=(self.shape[0], self.shape[0])
407
+ )
408
+ return copy._matmul_sparse(other)
409
+ else:
410
+ raise ValueError("inconsistent shapes")
411
+
412
+ # Assume other is a dense matrix/array, which produces a single-item
413
+ # object array if other isn't convertible to ndarray.
414
+ other = np.atleast_2d(other)
415
+
416
+ if other.ndim != 2:
417
+ return np.multiply(self.toarray(), other)
418
+ # Single element / wrapped object.
419
+ if other.size == 1:
420
+ if other.dtype == np.object_:
421
+ # 'other' not convertible to ndarray.
422
+ return NotImplemented
423
+ return self._mul_scalar(other.flat[0])
424
+ # Fast case for trivial sparse matrix.
425
+ elif self.shape == (1, 1):
426
+ return np.multiply(self.toarray()[0, 0], other)
427
+
428
+ ret = self.tocoo()
429
+ # Matching shapes.
430
+ if self.shape == other.shape:
431
+ data = np.multiply(ret.data, other[ret.row, ret.col])
432
+ # Sparse row vector times...
433
+ elif self.shape[0] == 1:
434
+ if other.shape[1] == 1: # Dense column vector.
435
+ data = np.multiply(ret.data, other)
436
+ elif other.shape[1] == self.shape[1]: # Dense matrix.
437
+ data = np.multiply(ret.data, other[:, ret.col])
438
+ else:
439
+ raise ValueError("inconsistent shapes")
440
+ row = np.repeat(np.arange(other.shape[0]), len(ret.row))
441
+ col = np.tile(ret.col, other.shape[0])
442
+ return self._coo_container(
443
+ (data.view(np.ndarray).ravel(), (row, col)),
444
+ shape=(other.shape[0], self.shape[1]),
445
+ copy=False
446
+ )
447
+ # Sparse column vector times...
448
+ elif self.shape[1] == 1:
449
+ if other.shape[0] == 1: # Dense row vector.
450
+ data = np.multiply(ret.data[:, None], other)
451
+ elif other.shape[0] == self.shape[0]: # Dense matrix.
452
+ data = np.multiply(ret.data[:, None], other[ret.row])
453
+ else:
454
+ raise ValueError("inconsistent shapes")
455
+ row = np.repeat(ret.row, other.shape[1])
456
+ col = np.tile(np.arange(other.shape[1]), len(ret.col))
457
+ return self._coo_container(
458
+ (data.view(np.ndarray).ravel(), (row, col)),
459
+ shape=(self.shape[0], other.shape[1]),
460
+ copy=False
461
+ )
462
+ # Sparse matrix times dense row vector.
463
+ elif other.shape[0] == 1 and self.shape[1] == other.shape[1]:
464
+ data = np.multiply(ret.data, other[:, ret.col].ravel())
465
+ # Sparse matrix times dense column vector.
466
+ elif other.shape[1] == 1 and self.shape[0] == other.shape[0]:
467
+ data = np.multiply(ret.data, other[ret.row].ravel())
468
+ else:
469
+ raise ValueError("inconsistent shapes")
470
+ ret.data = data.view(np.ndarray).ravel()
471
+ return ret
472
+
473
+ ###########################
474
+ # Multiplication handlers #
475
+ ###########################
476
+
477
+ def _matmul_vector(self, other):
478
+ M, N = self.shape
479
+
480
+ # output array
481
+ result = np.zeros(M, dtype=upcast_char(self.dtype.char,
482
+ other.dtype.char))
483
+
484
+ # csr_matvec or csc_matvec
485
+ fn = getattr(_sparsetools, self.format + '_matvec')
486
+ fn(M, N, self.indptr, self.indices, self.data, other, result)
487
+
488
+ return result
489
+
490
+ def _matmul_multivector(self, other):
491
+ M, N = self.shape
492
+ n_vecs = other.shape[1] # number of column vectors
493
+
494
+ result = np.zeros((M, n_vecs),
495
+ dtype=upcast_char(self.dtype.char, other.dtype.char))
496
+
497
+ # csr_matvecs or csc_matvecs
498
+ fn = getattr(_sparsetools, self.format + '_matvecs')
499
+ fn(M, N, n_vecs, self.indptr, self.indices, self.data,
500
+ other.ravel(), result.ravel())
501
+
502
+ return result
503
+
504
+ def _matmul_sparse(self, other):
505
+ M, K1 = self.shape
506
+ K2, N = other.shape
507
+
508
+ major_axis = self._swap((M, N))[0]
509
+ other = self.__class__(other) # convert to this format
510
+
511
+ idx_dtype = self._get_index_dtype((self.indptr, self.indices,
512
+ other.indptr, other.indices))
513
+
514
+ fn = getattr(_sparsetools, self.format + '_matmat_maxnnz')
515
+ nnz = fn(M, N,
516
+ np.asarray(self.indptr, dtype=idx_dtype),
517
+ np.asarray(self.indices, dtype=idx_dtype),
518
+ np.asarray(other.indptr, dtype=idx_dtype),
519
+ np.asarray(other.indices, dtype=idx_dtype))
520
+
521
+ idx_dtype = self._get_index_dtype((self.indptr, self.indices,
522
+ other.indptr, other.indices),
523
+ maxval=nnz)
524
+
525
+ indptr = np.empty(major_axis + 1, dtype=idx_dtype)
526
+ indices = np.empty(nnz, dtype=idx_dtype)
527
+ data = np.empty(nnz, dtype=upcast(self.dtype, other.dtype))
528
+
529
+ fn = getattr(_sparsetools, self.format + '_matmat')
530
+ fn(M, N, np.asarray(self.indptr, dtype=idx_dtype),
531
+ np.asarray(self.indices, dtype=idx_dtype),
532
+ self.data,
533
+ np.asarray(other.indptr, dtype=idx_dtype),
534
+ np.asarray(other.indices, dtype=idx_dtype),
535
+ other.data,
536
+ indptr, indices, data)
537
+
538
+ return self.__class__((data, indices, indptr), shape=(M, N))
539
+
540
+ def diagonal(self, k=0):
541
+ rows, cols = self.shape
542
+ if k <= -rows or k >= cols:
543
+ return np.empty(0, dtype=self.data.dtype)
544
+ fn = getattr(_sparsetools, self.format + "_diagonal")
545
+ y = np.empty(min(rows + min(k, 0), cols - max(k, 0)),
546
+ dtype=upcast(self.dtype))
547
+ fn(k, self.shape[0], self.shape[1], self.indptr, self.indices,
548
+ self.data, y)
549
+ return y
550
+
551
+ diagonal.__doc__ = _spbase.diagonal.__doc__
552
+
553
+ #####################
554
+ # Other binary ops #
555
+ #####################
556
+
557
+ def _maximum_minimum(self, other, npop, op_name, dense_check):
558
+ if isscalarlike(other):
559
+ if dense_check(other):
560
+ warn("Taking maximum (minimum) with > 0 (< 0) number results"
561
+ " to a dense matrix.", SparseEfficiencyWarning,
562
+ stacklevel=3)
563
+ other_arr = np.empty(self.shape, dtype=np.asarray(other).dtype)
564
+ other_arr.fill(other)
565
+ other_arr = self.__class__(other_arr)
566
+ return self._binopt(other_arr, op_name)
567
+ else:
568
+ self.sum_duplicates()
569
+ new_data = npop(self.data, np.asarray(other))
570
+ mat = self.__class__((new_data, self.indices, self.indptr),
571
+ dtype=new_data.dtype, shape=self.shape)
572
+ return mat
573
+ elif isdense(other):
574
+ return npop(self.todense(), other)
575
+ elif issparse(other):
576
+ return self._binopt(other, op_name)
577
+ else:
578
+ raise ValueError("Operands not compatible.")
579
+
580
+ def maximum(self, other):
581
+ return self._maximum_minimum(other, np.maximum,
582
+ '_maximum_', lambda x: np.asarray(x) > 0)
583
+
584
+ maximum.__doc__ = _spbase.maximum.__doc__
585
+
586
+ def minimum(self, other):
587
+ return self._maximum_minimum(other, np.minimum,
588
+ '_minimum_', lambda x: np.asarray(x) < 0)
589
+
590
+ minimum.__doc__ = _spbase.minimum.__doc__
591
+
592
+ #####################
593
+ # Reduce operations #
594
+ #####################
595
+
596
+ def sum(self, axis=None, dtype=None, out=None):
597
+ """Sum the array/matrix over the given axis. If the axis is None, sum
598
+ over both rows and columns, returning a scalar.
599
+ """
600
+ # The _spbase base class already does axis=0 and axis=1 efficiently
601
+ # so we only do the case axis=None here
602
+ if (not hasattr(self, 'blocksize') and
603
+ axis in self._swap(((1, -1), (0, 2)))[0]):
604
+ # faster than multiplication for large minor axis in CSC/CSR
605
+ res_dtype = get_sum_dtype(self.dtype)
606
+ ret = np.zeros(len(self.indptr) - 1, dtype=res_dtype)
607
+
608
+ major_index, value = self._minor_reduce(np.add)
609
+ ret[major_index] = value
610
+ ret = self._ascontainer(ret)
611
+ if axis % 2 == 1:
612
+ ret = ret.T
613
+
614
+ if out is not None and out.shape != ret.shape:
615
+ raise ValueError('dimensions do not match')
616
+
617
+ return ret.sum(axis=(), dtype=dtype, out=out)
618
+ # _spbase will handle the remaining situations when axis
619
+ # is in {None, -1, 0, 1}
620
+ else:
621
+ return _spbase.sum(self, axis=axis, dtype=dtype, out=out)
622
+
623
+ sum.__doc__ = _spbase.sum.__doc__
624
+
625
+ def _minor_reduce(self, ufunc, data=None):
626
+ """Reduce nonzeros with a ufunc over the minor axis when non-empty
627
+
628
+ Can be applied to a function of self.data by supplying data parameter.
629
+
630
+ Warning: this does not call sum_duplicates()
631
+
632
+ Returns
633
+ -------
634
+ major_index : array of ints
635
+ Major indices where nonzero
636
+
637
+ value : array of self.dtype
638
+ Reduce result for nonzeros in each major_index
639
+ """
640
+ if data is None:
641
+ data = self.data
642
+ major_index = np.flatnonzero(np.diff(self.indptr))
643
+ value = ufunc.reduceat(data,
644
+ downcast_intp_index(self.indptr[major_index]))
645
+ return major_index, value
646
+
647
+ #######################
648
+ # Getting and Setting #
649
+ #######################
650
+
651
+ def _get_intXint(self, row, col):
652
+ M, N = self._swap(self.shape)
653
+ major, minor = self._swap((row, col))
654
+ indptr, indices, data = get_csr_submatrix(
655
+ M, N, self.indptr, self.indices, self.data,
656
+ major, major + 1, minor, minor + 1)
657
+ return data.sum(dtype=self.dtype)
658
+
659
+ def _get_sliceXslice(self, row, col):
660
+ major, minor = self._swap((row, col))
661
+ if major.step in (1, None) and minor.step in (1, None):
662
+ return self._get_submatrix(major, minor, copy=True)
663
+ return self._major_slice(major)._minor_slice(minor)
664
+
665
+ def _get_arrayXarray(self, row, col):
666
+ # inner indexing
667
+ idx_dtype = self.indices.dtype
668
+ M, N = self._swap(self.shape)
669
+ major, minor = self._swap((row, col))
670
+ major = np.asarray(major, dtype=idx_dtype)
671
+ minor = np.asarray(minor, dtype=idx_dtype)
672
+
673
+ val = np.empty(major.size, dtype=self.dtype)
674
+ csr_sample_values(M, N, self.indptr, self.indices, self.data,
675
+ major.size, major.ravel(), minor.ravel(), val)
676
+ if major.ndim == 1:
677
+ return self._ascontainer(val)
678
+ return self.__class__(val.reshape(major.shape))
679
+
680
+ def _get_columnXarray(self, row, col):
681
+ # outer indexing
682
+ major, minor = self._swap((row, col))
683
+ return self._major_index_fancy(major)._minor_index_fancy(minor)
684
+
685
+ def _major_index_fancy(self, idx):
686
+ """Index along the major axis where idx is an array of ints.
687
+ """
688
+ idx_dtype = self._get_index_dtype((self.indptr, self.indices))
689
+ indices = np.asarray(idx, dtype=idx_dtype).ravel()
690
+
691
+ _, N = self._swap(self.shape)
692
+ M = len(indices)
693
+ new_shape = self._swap((M, N))
694
+ if M == 0:
695
+ return self.__class__(new_shape, dtype=self.dtype)
696
+
697
+ row_nnz = (self.indptr[indices + 1] - self.indptr[indices]).astype(idx_dtype)
698
+
699
+ res_indptr = np.zeros(M+1, dtype=idx_dtype)
700
+ np.cumsum(row_nnz, out=res_indptr[1:])
701
+
702
+ nnz = res_indptr[-1]
703
+ res_indices = np.empty(nnz, dtype=idx_dtype)
704
+ res_data = np.empty(nnz, dtype=self.dtype)
705
+ csr_row_index(
706
+ M,
707
+ indices,
708
+ self.indptr.astype(idx_dtype, copy=False),
709
+ self.indices.astype(idx_dtype, copy=False),
710
+ self.data,
711
+ res_indices,
712
+ res_data
713
+ )
714
+
715
+ return self.__class__((res_data, res_indices, res_indptr),
716
+ shape=new_shape, copy=False)
717
+
718
+ def _major_slice(self, idx, copy=False):
719
+ """Index along the major axis where idx is a slice object.
720
+ """
721
+ if idx == slice(None):
722
+ return self.copy() if copy else self
723
+
724
+ M, N = self._swap(self.shape)
725
+ start, stop, step = idx.indices(M)
726
+ M = len(range(start, stop, step))
727
+ new_shape = self._swap((M, N))
728
+ if M == 0:
729
+ return self.__class__(new_shape, dtype=self.dtype)
730
+
731
+ # Work out what slices are needed for `row_nnz`
732
+ # start,stop can be -1, only if step is negative
733
+ start0, stop0 = start, stop
734
+ if stop == -1 and start >= 0:
735
+ stop0 = None
736
+ start1, stop1 = start + 1, stop + 1
737
+
738
+ row_nnz = self.indptr[start1:stop1:step] - \
739
+ self.indptr[start0:stop0:step]
740
+ idx_dtype = self.indices.dtype
741
+ res_indptr = np.zeros(M+1, dtype=idx_dtype)
742
+ np.cumsum(row_nnz, out=res_indptr[1:])
743
+
744
+ if step == 1:
745
+ all_idx = slice(self.indptr[start], self.indptr[stop])
746
+ res_indices = np.array(self.indices[all_idx], copy=copy)
747
+ res_data = np.array(self.data[all_idx], copy=copy)
748
+ else:
749
+ nnz = res_indptr[-1]
750
+ res_indices = np.empty(nnz, dtype=idx_dtype)
751
+ res_data = np.empty(nnz, dtype=self.dtype)
752
+ csr_row_slice(start, stop, step, self.indptr, self.indices,
753
+ self.data, res_indices, res_data)
754
+
755
+ return self.__class__((res_data, res_indices, res_indptr),
756
+ shape=new_shape, copy=False)
757
+
758
+ def _minor_index_fancy(self, idx):
759
+ """Index along the minor axis where idx is an array of ints.
760
+ """
761
+ idx_dtype = self._get_index_dtype((self.indices, self.indptr))
762
+ indices = self.indices.astype(idx_dtype, copy=False)
763
+ indptr = self.indptr.astype(idx_dtype, copy=False)
764
+
765
+ idx = np.asarray(idx, dtype=idx_dtype).ravel()
766
+
767
+ M, N = self._swap(self.shape)
768
+ k = len(idx)
769
+ new_shape = self._swap((M, k))
770
+ if k == 0:
771
+ return self.__class__(new_shape, dtype=self.dtype)
772
+
773
+ # pass 1: count idx entries and compute new indptr
774
+ col_offsets = np.zeros(N, dtype=idx_dtype)
775
+ res_indptr = np.empty_like(self.indptr, dtype=idx_dtype)
776
+ csr_column_index1(
777
+ k,
778
+ idx,
779
+ M,
780
+ N,
781
+ indptr,
782
+ indices,
783
+ col_offsets,
784
+ res_indptr,
785
+ )
786
+
787
+ # pass 2: copy indices/data for selected idxs
788
+ col_order = np.argsort(idx).astype(idx_dtype, copy=False)
789
+ nnz = res_indptr[-1]
790
+ res_indices = np.empty(nnz, dtype=idx_dtype)
791
+ res_data = np.empty(nnz, dtype=self.dtype)
792
+ csr_column_index2(col_order, col_offsets, len(self.indices),
793
+ indices, self.data, res_indices, res_data)
794
+ return self.__class__((res_data, res_indices, res_indptr),
795
+ shape=new_shape, copy=False)
796
+
797
+ def _minor_slice(self, idx, copy=False):
798
+ """Index along the minor axis where idx is a slice object.
799
+ """
800
+ if idx == slice(None):
801
+ return self.copy() if copy else self
802
+
803
+ M, N = self._swap(self.shape)
804
+ start, stop, step = idx.indices(N)
805
+ N = len(range(start, stop, step))
806
+ if N == 0:
807
+ return self.__class__(self._swap((M, N)), dtype=self.dtype)
808
+ if step == 1:
809
+ return self._get_submatrix(minor=idx, copy=copy)
810
+ # TODO: don't fall back to fancy indexing here
811
+ return self._minor_index_fancy(np.arange(start, stop, step))
812
+
813
+ def _get_submatrix(self, major=None, minor=None, copy=False):
814
+ """Return a submatrix of this matrix.
815
+
816
+ major, minor: None, int, or slice with step 1
817
+ """
818
+ M, N = self._swap(self.shape)
819
+ i0, i1 = _process_slice(major, M)
820
+ j0, j1 = _process_slice(minor, N)
821
+
822
+ if i0 == 0 and j0 == 0 and i1 == M and j1 == N:
823
+ return self.copy() if copy else self
824
+
825
+ indptr, indices, data = get_csr_submatrix(
826
+ M, N, self.indptr, self.indices, self.data, i0, i1, j0, j1)
827
+
828
+ shape = self._swap((i1 - i0, j1 - j0))
829
+ return self.__class__((data, indices, indptr), shape=shape,
830
+ dtype=self.dtype, copy=False)
831
+
832
+ def _set_intXint(self, row, col, x):
833
+ i, j = self._swap((row, col))
834
+ self._set_many(i, j, x)
835
+
836
+ def _set_arrayXarray(self, row, col, x):
837
+ i, j = self._swap((row, col))
838
+ self._set_many(i, j, x)
839
+
840
+ def _set_arrayXarray_sparse(self, row, col, x):
841
+ # clear entries that will be overwritten
842
+ self._zero_many(*self._swap((row, col)))
843
+
844
+ M, N = row.shape # matches col.shape
845
+ broadcast_row = M != 1 and x.shape[0] == 1
846
+ broadcast_col = N != 1 and x.shape[1] == 1
847
+ r, c = x.row, x.col
848
+
849
+ x = np.asarray(x.data, dtype=self.dtype)
850
+ if x.size == 0:
851
+ return
852
+
853
+ if broadcast_row:
854
+ r = np.repeat(np.arange(M), len(r))
855
+ c = np.tile(c, M)
856
+ x = np.tile(x, M)
857
+ if broadcast_col:
858
+ r = np.repeat(r, N)
859
+ c = np.tile(np.arange(N), len(c))
860
+ x = np.repeat(x, N)
861
+ # only assign entries in the new sparsity structure
862
+ i, j = self._swap((row[r, c], col[r, c]))
863
+ self._set_many(i, j, x)
864
+
865
+ def _setdiag(self, values, k):
866
+ if 0 in self.shape:
867
+ return
868
+
869
+ M, N = self.shape
870
+ broadcast = (values.ndim == 0)
871
+
872
+ if k < 0:
873
+ if broadcast:
874
+ max_index = min(M + k, N)
875
+ else:
876
+ max_index = min(M + k, N, len(values))
877
+ i = np.arange(-k, max_index - k, dtype=self.indices.dtype)
878
+ j = np.arange(max_index, dtype=self.indices.dtype)
879
+
880
+ else:
881
+ if broadcast:
882
+ max_index = min(M, N - k)
883
+ else:
884
+ max_index = min(M, N - k, len(values))
885
+ i = np.arange(max_index, dtype=self.indices.dtype)
886
+ j = np.arange(k, k + max_index, dtype=self.indices.dtype)
887
+
888
+ if not broadcast:
889
+ values = values[:len(i)]
890
+
891
+ x = np.atleast_1d(np.asarray(values, dtype=self.dtype)).ravel()
892
+ if x.squeeze().shape != i.squeeze().shape:
893
+ x = np.broadcast_to(x, i.shape)
894
+ if x.size == 0:
895
+ return
896
+
897
+ M, N = self._swap((M, N))
898
+ i, j = self._swap((i, j))
899
+ n_samples = x.size
900
+ offsets = np.empty(n_samples, dtype=self.indices.dtype)
901
+ ret = csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
902
+ i, j, offsets)
903
+ if ret == 1:
904
+ # rinse and repeat
905
+ self.sum_duplicates()
906
+ csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
907
+ i, j, offsets)
908
+ if -1 not in offsets:
909
+ # only affects existing non-zero cells
910
+ self.data[offsets] = x
911
+ return
912
+
913
+ mask = (offsets <= -1)
914
+ # Boundary between csc and convert to coo
915
+ # The value 0.001 is justified in gh-19962#issuecomment-1920499678
916
+ if mask.sum() < self.nnz * 0.001:
917
+ # create new entries
918
+ i = i[mask]
919
+ j = j[mask]
920
+ self._insert_many(i, j, x[mask])
921
+ # replace existing entries
922
+ mask = ~mask
923
+ self.data[offsets[mask]] = x[mask]
924
+ else:
925
+ # convert to coo for _set_diag
926
+ coo = self.tocoo()
927
+ coo._setdiag(values, k)
928
+ arrays = coo._coo_to_compressed(self._swap)
929
+ self.indptr, self.indices, self.data, _ = arrays
930
+
931
+ def _prepare_indices(self, i, j):
932
+ M, N = self._swap(self.shape)
933
+
934
+ def check_bounds(indices, bound):
935
+ idx = indices.max()
936
+ if idx >= bound:
937
+ raise IndexError('index (%d) out of range (>= %d)' %
938
+ (idx, bound))
939
+ idx = indices.min()
940
+ if idx < -bound:
941
+ raise IndexError('index (%d) out of range (< -%d)' %
942
+ (idx, bound))
943
+
944
+ i = np.atleast_1d(np.asarray(i, dtype=self.indices.dtype)).ravel()
945
+ j = np.atleast_1d(np.asarray(j, dtype=self.indices.dtype)).ravel()
946
+ check_bounds(i, M)
947
+ check_bounds(j, N)
948
+ return i, j, M, N
949
+
950
+ def _set_many(self, i, j, x):
951
+ """Sets value at each (i, j) to x
952
+
953
+ Here (i,j) index major and minor respectively, and must not contain
954
+ duplicate entries.
955
+ """
956
+ i, j, M, N = self._prepare_indices(i, j)
957
+ x = np.atleast_1d(np.asarray(x, dtype=self.dtype)).ravel()
958
+
959
+ n_samples = x.size
960
+ offsets = np.empty(n_samples, dtype=self.indices.dtype)
961
+ ret = csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
962
+ i, j, offsets)
963
+ if ret == 1:
964
+ # rinse and repeat
965
+ self.sum_duplicates()
966
+ csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
967
+ i, j, offsets)
968
+
969
+ if -1 not in offsets:
970
+ # only affects existing non-zero cells
971
+ self.data[offsets] = x
972
+ return
973
+
974
+ else:
975
+ warn("Changing the sparsity structure of a {}_matrix is expensive."
976
+ " lil_matrix is more efficient.".format(self.format),
977
+ SparseEfficiencyWarning, stacklevel=3)
978
+ # replace where possible
979
+ mask = offsets > -1
980
+ self.data[offsets[mask]] = x[mask]
981
+ # only insertions remain
982
+ mask = ~mask
983
+ i = i[mask]
984
+ i[i < 0] += M
985
+ j = j[mask]
986
+ j[j < 0] += N
987
+ self._insert_many(i, j, x[mask])
988
+
989
+ def _zero_many(self, i, j):
990
+ """Sets value at each (i, j) to zero, preserving sparsity structure.
991
+
992
+ Here (i,j) index major and minor respectively.
993
+ """
994
+ i, j, M, N = self._prepare_indices(i, j)
995
+
996
+ n_samples = len(i)
997
+ offsets = np.empty(n_samples, dtype=self.indices.dtype)
998
+ ret = csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
999
+ i, j, offsets)
1000
+ if ret == 1:
1001
+ # rinse and repeat
1002
+ self.sum_duplicates()
1003
+ csr_sample_offsets(M, N, self.indptr, self.indices, n_samples,
1004
+ i, j, offsets)
1005
+
1006
+ # only assign zeros to the existing sparsity structure
1007
+ self.data[offsets[offsets > -1]] = 0
1008
+
1009
+ def _insert_many(self, i, j, x):
1010
+ """Inserts new nonzero at each (i, j) with value x
1011
+
1012
+ Here (i,j) index major and minor respectively.
1013
+ i, j and x must be non-empty, 1d arrays.
1014
+ Inserts each major group (e.g. all entries per row) at a time.
1015
+ Maintains has_sorted_indices property.
1016
+ Modifies i, j, x in place.
1017
+ """
1018
+ order = np.argsort(i, kind='mergesort') # stable for duplicates
1019
+ i = i.take(order, mode='clip')
1020
+ j = j.take(order, mode='clip')
1021
+ x = x.take(order, mode='clip')
1022
+
1023
+ do_sort = self.has_sorted_indices
1024
+
1025
+ # Update index data type
1026
+ idx_dtype = self._get_index_dtype((self.indices, self.indptr),
1027
+ maxval=(self.indptr[-1] + x.size))
1028
+ self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
1029
+ self.indices = np.asarray(self.indices, dtype=idx_dtype)
1030
+ i = np.asarray(i, dtype=idx_dtype)
1031
+ j = np.asarray(j, dtype=idx_dtype)
1032
+
1033
+ # Collate old and new in chunks by major index
1034
+ indices_parts = []
1035
+ data_parts = []
1036
+ ui, ui_indptr = np.unique(i, return_index=True)
1037
+ ui_indptr = np.append(ui_indptr, len(j))
1038
+ new_nnzs = np.diff(ui_indptr)
1039
+ prev = 0
1040
+ for c, (ii, js, je) in enumerate(zip(ui, ui_indptr, ui_indptr[1:])):
1041
+ # old entries
1042
+ start = self.indptr[prev]
1043
+ stop = self.indptr[ii]
1044
+ indices_parts.append(self.indices[start:stop])
1045
+ data_parts.append(self.data[start:stop])
1046
+
1047
+ # handle duplicate j: keep last setting
1048
+ uj, uj_indptr = np.unique(j[js:je][::-1], return_index=True)
1049
+ if len(uj) == je - js:
1050
+ indices_parts.append(j[js:je])
1051
+ data_parts.append(x[js:je])
1052
+ else:
1053
+ indices_parts.append(j[js:je][::-1][uj_indptr])
1054
+ data_parts.append(x[js:je][::-1][uj_indptr])
1055
+ new_nnzs[c] = len(uj)
1056
+
1057
+ prev = ii
1058
+
1059
+ # remaining old entries
1060
+ start = self.indptr[ii]
1061
+ indices_parts.append(self.indices[start:])
1062
+ data_parts.append(self.data[start:])
1063
+
1064
+ # update attributes
1065
+ self.indices = np.concatenate(indices_parts)
1066
+ self.data = np.concatenate(data_parts)
1067
+ nnzs = np.empty(self.indptr.shape, dtype=idx_dtype)
1068
+ nnzs[0] = idx_dtype(0)
1069
+ indptr_diff = np.diff(self.indptr)
1070
+ indptr_diff[ui] += new_nnzs
1071
+ nnzs[1:] = indptr_diff
1072
+ self.indptr = np.cumsum(nnzs, out=nnzs)
1073
+
1074
+ if do_sort:
1075
+ # TODO: only sort where necessary
1076
+ self.has_sorted_indices = False
1077
+ self.sort_indices()
1078
+
1079
+ self.check_format(full_check=False)
1080
+
1081
+ ######################
1082
+ # Conversion methods #
1083
+ ######################
1084
+
1085
+ def tocoo(self, copy=True):
1086
+ major_dim, minor_dim = self._swap(self.shape)
1087
+ minor_indices = self.indices
1088
+ major_indices = np.empty(len(minor_indices), dtype=self.indices.dtype)
1089
+ _sparsetools.expandptr(major_dim, self.indptr, major_indices)
1090
+ coords = self._swap((major_indices, minor_indices))
1091
+
1092
+ return self._coo_container(
1093
+ (self.data, coords), self.shape, copy=copy, dtype=self.dtype
1094
+ )
1095
+
1096
+ tocoo.__doc__ = _spbase.tocoo.__doc__
1097
+
1098
+ def toarray(self, order=None, out=None):
1099
+ if out is None and order is None:
1100
+ order = self._swap('cf')[0]
1101
+ out = self._process_toarray_args(order, out)
1102
+ if not (out.flags.c_contiguous or out.flags.f_contiguous):
1103
+ raise ValueError('Output array must be C or F contiguous')
1104
+ # align ideal order with output array order
1105
+ if out.flags.c_contiguous:
1106
+ x = self.tocsr()
1107
+ y = out
1108
+ else:
1109
+ x = self.tocsc()
1110
+ y = out.T
1111
+ M, N = x._swap(x.shape)
1112
+ csr_todense(M, N, x.indptr, x.indices, x.data, y)
1113
+ return out
1114
+
1115
+ toarray.__doc__ = _spbase.toarray.__doc__
1116
+
1117
+ ##############################################################
1118
+ # methods that examine or modify the internal data structure #
1119
+ ##############################################################
1120
+
1121
+ def eliminate_zeros(self):
1122
+ """Remove zero entries from the array/matrix
1123
+
1124
+ This is an *in place* operation.
1125
+ """
1126
+ M, N = self._swap(self.shape)
1127
+ _sparsetools.csr_eliminate_zeros(M, N, self.indptr, self.indices,
1128
+ self.data)
1129
+ self.prune() # nnz may have changed
1130
+
1131
+ @property
1132
+ def has_canonical_format(self) -> bool:
1133
+ """Whether the array/matrix has sorted indices and no duplicates
1134
+
1135
+ Returns
1136
+ - True: if the above applies
1137
+ - False: otherwise
1138
+
1139
+ has_canonical_format implies has_sorted_indices, so if the latter flag
1140
+ is False, so will the former be; if the former is found True, the
1141
+ latter flag is also set.
1142
+ """
1143
+ # first check to see if result was cached
1144
+ if not getattr(self, '_has_sorted_indices', True):
1145
+ # not sorted => not canonical
1146
+ self._has_canonical_format = False
1147
+ elif not hasattr(self, '_has_canonical_format'):
1148
+ self.has_canonical_format = bool(
1149
+ _sparsetools.csr_has_canonical_format(
1150
+ len(self.indptr) - 1, self.indptr, self.indices)
1151
+ )
1152
+ return self._has_canonical_format
1153
+
1154
+ @has_canonical_format.setter
1155
+ def has_canonical_format(self, val: bool):
1156
+ self._has_canonical_format = bool(val)
1157
+ if val:
1158
+ self.has_sorted_indices = True
1159
+
1160
+ def sum_duplicates(self):
1161
+ """Eliminate duplicate entries by adding them together
1162
+
1163
+ This is an *in place* operation.
1164
+ """
1165
+ if self.has_canonical_format:
1166
+ return
1167
+ self.sort_indices()
1168
+
1169
+ M, N = self._swap(self.shape)
1170
+ _sparsetools.csr_sum_duplicates(M, N, self.indptr, self.indices,
1171
+ self.data)
1172
+
1173
+ self.prune() # nnz may have changed
1174
+ self.has_canonical_format = True
1175
+
1176
+ @property
1177
+ def has_sorted_indices(self) -> bool:
1178
+ """Whether the indices are sorted
1179
+
1180
+ Returns
1181
+ - True: if the indices of the array/matrix are in sorted order
1182
+ - False: otherwise
1183
+ """
1184
+ # first check to see if result was cached
1185
+ if not hasattr(self, '_has_sorted_indices'):
1186
+ self._has_sorted_indices = bool(
1187
+ _sparsetools.csr_has_sorted_indices(
1188
+ len(self.indptr) - 1, self.indptr, self.indices)
1189
+ )
1190
+ return self._has_sorted_indices
1191
+
1192
+ @has_sorted_indices.setter
1193
+ def has_sorted_indices(self, val: bool):
1194
+ self._has_sorted_indices = bool(val)
1195
+
1196
+
1197
+ def sorted_indices(self):
1198
+ """Return a copy of this array/matrix with sorted indices
1199
+ """
1200
+ A = self.copy()
1201
+ A.sort_indices()
1202
+ return A
1203
+
1204
+ # an alternative that has linear complexity is the following
1205
+ # although the previous option is typically faster
1206
+ # return self.toother().toother()
1207
+
1208
+ def sort_indices(self):
1209
+ """Sort the indices of this array/matrix *in place*
1210
+ """
1211
+
1212
+ if not self.has_sorted_indices:
1213
+ _sparsetools.csr_sort_indices(len(self.indptr) - 1, self.indptr,
1214
+ self.indices, self.data)
1215
+ self.has_sorted_indices = True
1216
+
1217
+ def prune(self):
1218
+ """Remove empty space after all non-zero elements.
1219
+ """
1220
+ major_dim = self._swap(self.shape)[0]
1221
+
1222
+ if len(self.indptr) != major_dim + 1:
1223
+ raise ValueError('index pointer has invalid length')
1224
+ if len(self.indices) < self.nnz:
1225
+ raise ValueError('indices array has fewer than nnz elements')
1226
+ if len(self.data) < self.nnz:
1227
+ raise ValueError('data array has fewer than nnz elements')
1228
+
1229
+ self.indices = _prune_array(self.indices[:self.nnz])
1230
+ self.data = _prune_array(self.data[:self.nnz])
1231
+
1232
+ def resize(self, *shape):
1233
+ shape = check_shape(shape)
1234
+ if hasattr(self, 'blocksize'):
1235
+ bm, bn = self.blocksize
1236
+ new_M, rm = divmod(shape[0], bm)
1237
+ new_N, rn = divmod(shape[1], bn)
1238
+ if rm or rn:
1239
+ raise ValueError("shape must be divisible into {} blocks. "
1240
+ "Got {}".format(self.blocksize, shape))
1241
+ M, N = self.shape[0] // bm, self.shape[1] // bn
1242
+ else:
1243
+ new_M, new_N = self._swap(shape)
1244
+ M, N = self._swap(self.shape)
1245
+
1246
+ if new_M < M:
1247
+ self.indices = self.indices[:self.indptr[new_M]]
1248
+ self.data = self.data[:self.indptr[new_M]]
1249
+ self.indptr = self.indptr[:new_M + 1]
1250
+ elif new_M > M:
1251
+ self.indptr = np.resize(self.indptr, new_M + 1)
1252
+ self.indptr[M + 1:].fill(self.indptr[M])
1253
+
1254
+ if new_N < N:
1255
+ mask = self.indices < new_N
1256
+ if not np.all(mask):
1257
+ self.indices = self.indices[mask]
1258
+ self.data = self.data[mask]
1259
+ major_index, val = self._minor_reduce(np.add, mask)
1260
+ self.indptr.fill(0)
1261
+ self.indptr[1:][major_index] = val
1262
+ np.cumsum(self.indptr, out=self.indptr)
1263
+
1264
+ self._shape = shape
1265
+
1266
+ resize.__doc__ = _spbase.resize.__doc__
1267
+
1268
+ ###################
1269
+ # utility methods #
1270
+ ###################
1271
+
1272
+ # needed by _data_matrix
1273
+ def _with_data(self, data, copy=True):
1274
+ """Returns a matrix with the same sparsity structure as self,
1275
+ but with different data. By default the structure arrays
1276
+ (i.e. .indptr and .indices) are copied.
1277
+ """
1278
+ if copy:
1279
+ return self.__class__((data, self.indices.copy(),
1280
+ self.indptr.copy()),
1281
+ shape=self.shape,
1282
+ dtype=data.dtype)
1283
+ else:
1284
+ return self.__class__((data, self.indices, self.indptr),
1285
+ shape=self.shape, dtype=data.dtype)
1286
+
1287
+ def _binopt(self, other, op):
1288
+ """apply the binary operation fn to two sparse matrices."""
1289
+ other = self.__class__(other)
1290
+
1291
+ # e.g. csr_plus_csr, csr_minus_csr, etc.
1292
+ fn = getattr(_sparsetools, self.format + op + self.format)
1293
+
1294
+ maxnnz = self.nnz + other.nnz
1295
+ idx_dtype = self._get_index_dtype((self.indptr, self.indices,
1296
+ other.indptr, other.indices),
1297
+ maxval=maxnnz)
1298
+ indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
1299
+ indices = np.empty(maxnnz, dtype=idx_dtype)
1300
+
1301
+ bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_']
1302
+ if op in bool_ops:
1303
+ data = np.empty(maxnnz, dtype=np.bool_)
1304
+ else:
1305
+ data = np.empty(maxnnz, dtype=upcast(self.dtype, other.dtype))
1306
+
1307
+ fn(self.shape[0], self.shape[1],
1308
+ np.asarray(self.indptr, dtype=idx_dtype),
1309
+ np.asarray(self.indices, dtype=idx_dtype),
1310
+ self.data,
1311
+ np.asarray(other.indptr, dtype=idx_dtype),
1312
+ np.asarray(other.indices, dtype=idx_dtype),
1313
+ other.data,
1314
+ indptr, indices, data)
1315
+
1316
+ A = self.__class__((data, indices, indptr), shape=self.shape)
1317
+ A.prune()
1318
+
1319
+ return A
1320
+
1321
+ def _divide_sparse(self, other):
1322
+ """
1323
+ Divide this matrix by a second sparse matrix.
1324
+ """
1325
+ if other.shape != self.shape:
1326
+ raise ValueError('inconsistent shapes')
1327
+
1328
+ r = self._binopt(other, '_eldiv_')
1329
+
1330
+ if np.issubdtype(r.dtype, np.inexact):
1331
+ # Eldiv leaves entries outside the combined sparsity
1332
+ # pattern empty, so they must be filled manually.
1333
+ # Everything outside of other's sparsity is NaN, and everything
1334
+ # inside it is either zero or defined by eldiv.
1335
+ out = np.empty(self.shape, dtype=self.dtype)
1336
+ out.fill(np.nan)
1337
+ row, col = other.nonzero()
1338
+ out[row, col] = 0
1339
+ r = r.tocoo()
1340
+ out[r.row, r.col] = r.data
1341
+ out = self._container(out)
1342
+ else:
1343
+ # integers types go with nan <-> 0
1344
+ out = r
1345
+
1346
+ return out
1347
+
1348
+
1349
+ def _process_slice(sl, num):
1350
+ if sl is None:
1351
+ i0, i1 = 0, num
1352
+ elif isinstance(sl, slice):
1353
+ i0, i1, stride = sl.indices(num)
1354
+ if stride != 1:
1355
+ raise ValueError('slicing with step != 1 not supported')
1356
+ i0 = min(i0, i1) # give an empty slice when i0 > i1
1357
+ elif isintlike(sl):
1358
+ if sl < 0:
1359
+ sl += num
1360
+ i0, i1 = sl, sl + 1
1361
+ if i0 < 0 or i1 > num:
1362
+ raise IndexError('index out of bounds: 0 <= %d < %d <= %d' %
1363
+ (i0, i1, num))
1364
+ else:
1365
+ raise TypeError('expected slice or scalar')
1366
+
1367
+ return i0, i1
env-llmeval/lib/python3.10/site-packages/scipy/sparse/_construct.py ADDED
@@ -0,0 +1,1401 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Functions to construct sparse matrices and arrays
2
+ """
3
+
4
+ __docformat__ = "restructuredtext en"
5
+
6
+ __all__ = ['spdiags', 'eye', 'identity', 'kron', 'kronsum',
7
+ 'hstack', 'vstack', 'bmat', 'rand', 'random', 'diags', 'block_diag',
8
+ 'diags_array', 'block_array', 'eye_array', 'random_array']
9
+
10
+ import numbers
11
+ import math
12
+ import numpy as np
13
+
14
+ from scipy._lib._util import check_random_state, rng_integers
15
+ from ._sputils import upcast, get_index_dtype, isscalarlike
16
+
17
+ from ._sparsetools import csr_hstack
18
+ from ._bsr import bsr_matrix, bsr_array
19
+ from ._coo import coo_matrix, coo_array
20
+ from ._csc import csc_matrix, csc_array
21
+ from ._csr import csr_matrix, csr_array
22
+ from ._dia import dia_matrix, dia_array
23
+
24
+ from ._base import issparse, sparray
25
+
26
+
27
+ def spdiags(data, diags, m=None, n=None, format=None):
28
+ """
29
+ Return a sparse matrix from diagonals.
30
+
31
+ Parameters
32
+ ----------
33
+ data : array_like
34
+ Matrix diagonals stored row-wise
35
+ diags : sequence of int or an int
36
+ Diagonals to set:
37
+
38
+ * k = 0 the main diagonal
39
+ * k > 0 the kth upper diagonal
40
+ * k < 0 the kth lower diagonal
41
+ m, n : int, tuple, optional
42
+ Shape of the result. If `n` is None and `m` is a given tuple,
43
+ the shape is this tuple. If omitted, the matrix is square and
44
+ its shape is len(data[0]).
45
+ format : str, optional
46
+ Format of the result. By default (format=None) an appropriate sparse
47
+ matrix format is returned. This choice is subject to change.
48
+
49
+ .. warning::
50
+
51
+ This function returns a sparse matrix -- not a sparse array.
52
+ You are encouraged to use ``diags_array`` to take advantage
53
+ of the sparse array functionality.
54
+
55
+ See Also
56
+ --------
57
+ diags_array : more convenient form of this function
58
+ diags : matrix version of diags_array
59
+ dia_matrix : the sparse DIAgonal format.
60
+
61
+ Examples
62
+ --------
63
+ >>> import numpy as np
64
+ >>> from scipy.sparse import spdiags
65
+ >>> data = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]])
66
+ >>> diags = np.array([0, -1, 2])
67
+ >>> spdiags(data, diags, 4, 4).toarray()
68
+ array([[1, 0, 3, 0],
69
+ [1, 2, 0, 4],
70
+ [0, 2, 3, 0],
71
+ [0, 0, 3, 4]])
72
+
73
+ """
74
+ if m is None and n is None:
75
+ m = n = len(data[0])
76
+ elif n is None:
77
+ m, n = m
78
+ return dia_matrix((data, diags), shape=(m, n)).asformat(format)
79
+
80
+
81
+ def diags_array(diagonals, /, *, offsets=0, shape=None, format=None, dtype=None):
82
+ """
83
+ Construct a sparse array from diagonals.
84
+
85
+ Parameters
86
+ ----------
87
+ diagonals : sequence of array_like
88
+ Sequence of arrays containing the array diagonals,
89
+ corresponding to `offsets`.
90
+ offsets : sequence of int or an int, optional
91
+ Diagonals to set:
92
+ - k = 0 the main diagonal (default)
93
+ - k > 0 the kth upper diagonal
94
+ - k < 0 the kth lower diagonal
95
+ shape : tuple of int, optional
96
+ Shape of the result. If omitted, a square array large enough
97
+ to contain the diagonals is returned.
98
+ format : {"dia", "csr", "csc", "lil", ...}, optional
99
+ Matrix format of the result. By default (format=None) an
100
+ appropriate sparse array format is returned. This choice is
101
+ subject to change.
102
+ dtype : dtype, optional
103
+ Data type of the array.
104
+
105
+ Notes
106
+ -----
107
+ The result from `diags_array` is the sparse equivalent of::
108
+
109
+ np.diag(diagonals[0], offsets[0])
110
+ + ...
111
+ + np.diag(diagonals[k], offsets[k])
112
+
113
+ Repeated diagonal offsets are disallowed.
114
+
115
+ .. versionadded:: 1.11
116
+
117
+ Examples
118
+ --------
119
+ >>> from scipy.sparse import diags_array
120
+ >>> diagonals = [[1, 2, 3, 4], [1, 2, 3], [1, 2]]
121
+ >>> diags_array(diagonals, offsets=[0, -1, 2]).toarray()
122
+ array([[1, 0, 1, 0],
123
+ [1, 2, 0, 2],
124
+ [0, 2, 3, 0],
125
+ [0, 0, 3, 4]])
126
+
127
+ Broadcasting of scalars is supported (but shape needs to be
128
+ specified):
129
+
130
+ >>> diags_array([1, -2, 1], offsets=[-1, 0, 1], shape=(4, 4)).toarray()
131
+ array([[-2., 1., 0., 0.],
132
+ [ 1., -2., 1., 0.],
133
+ [ 0., 1., -2., 1.],
134
+ [ 0., 0., 1., -2.]])
135
+
136
+
137
+ If only one diagonal is wanted (as in `numpy.diag`), the following
138
+ works as well:
139
+
140
+ >>> diags_array([1, 2, 3], offsets=1).toarray()
141
+ array([[ 0., 1., 0., 0.],
142
+ [ 0., 0., 2., 0.],
143
+ [ 0., 0., 0., 3.],
144
+ [ 0., 0., 0., 0.]])
145
+ """
146
+ # if offsets is not a sequence, assume that there's only one diagonal
147
+ if isscalarlike(offsets):
148
+ # now check that there's actually only one diagonal
149
+ if len(diagonals) == 0 or isscalarlike(diagonals[0]):
150
+ diagonals = [np.atleast_1d(diagonals)]
151
+ else:
152
+ raise ValueError("Different number of diagonals and offsets.")
153
+ else:
154
+ diagonals = list(map(np.atleast_1d, diagonals))
155
+
156
+ offsets = np.atleast_1d(offsets)
157
+
158
+ # Basic check
159
+ if len(diagonals) != len(offsets):
160
+ raise ValueError("Different number of diagonals and offsets.")
161
+
162
+ # Determine shape, if omitted
163
+ if shape is None:
164
+ m = len(diagonals[0]) + abs(int(offsets[0]))
165
+ shape = (m, m)
166
+
167
+ # Determine data type, if omitted
168
+ if dtype is None:
169
+ dtype = np.common_type(*diagonals)
170
+
171
+ # Construct data array
172
+ m, n = shape
173
+
174
+ M = max([min(m + offset, n - offset) + max(0, offset)
175
+ for offset in offsets])
176
+ M = max(0, M)
177
+ data_arr = np.zeros((len(offsets), M), dtype=dtype)
178
+
179
+ K = min(m, n)
180
+
181
+ for j, diagonal in enumerate(diagonals):
182
+ offset = offsets[j]
183
+ k = max(0, offset)
184
+ length = min(m + offset, n - offset, K)
185
+ if length < 0:
186
+ raise ValueError("Offset %d (index %d) out of bounds" % (offset, j))
187
+ try:
188
+ data_arr[j, k:k+length] = diagonal[...,:length]
189
+ except ValueError as e:
190
+ if len(diagonal) != length and len(diagonal) != 1:
191
+ raise ValueError(
192
+ "Diagonal length (index %d: %d at offset %d) does not "
193
+ "agree with array size (%d, %d)." % (
194
+ j, len(diagonal), offset, m, n)) from e
195
+ raise
196
+
197
+ return dia_array((data_arr, offsets), shape=(m, n)).asformat(format)
198
+
199
+
200
+ def diags(diagonals, offsets=0, shape=None, format=None, dtype=None):
201
+ """
202
+ Construct a sparse matrix from diagonals.
203
+
204
+ .. warning::
205
+
206
+ This function returns a sparse matrix -- not a sparse array.
207
+ You are encouraged to use ``diags_array`` to take advantage
208
+ of the sparse array functionality.
209
+
210
+ Parameters
211
+ ----------
212
+ diagonals : sequence of array_like
213
+ Sequence of arrays containing the matrix diagonals,
214
+ corresponding to `offsets`.
215
+ offsets : sequence of int or an int, optional
216
+ Diagonals to set:
217
+ - k = 0 the main diagonal (default)
218
+ - k > 0 the kth upper diagonal
219
+ - k < 0 the kth lower diagonal
220
+ shape : tuple of int, optional
221
+ Shape of the result. If omitted, a square matrix large enough
222
+ to contain the diagonals is returned.
223
+ format : {"dia", "csr", "csc", "lil", ...}, optional
224
+ Matrix format of the result. By default (format=None) an
225
+ appropriate sparse matrix format is returned. This choice is
226
+ subject to change.
227
+ dtype : dtype, optional
228
+ Data type of the matrix.
229
+
230
+ See Also
231
+ --------
232
+ spdiags : construct matrix from diagonals
233
+ diags_array : construct sparse array instead of sparse matrix
234
+
235
+ Notes
236
+ -----
237
+ This function differs from `spdiags` in the way it handles
238
+ off-diagonals.
239
+
240
+ The result from `diags` is the sparse equivalent of::
241
+
242
+ np.diag(diagonals[0], offsets[0])
243
+ + ...
244
+ + np.diag(diagonals[k], offsets[k])
245
+
246
+ Repeated diagonal offsets are disallowed.
247
+
248
+ .. versionadded:: 0.11
249
+
250
+ Examples
251
+ --------
252
+ >>> from scipy.sparse import diags
253
+ >>> diagonals = [[1, 2, 3, 4], [1, 2, 3], [1, 2]]
254
+ >>> diags(diagonals, [0, -1, 2]).toarray()
255
+ array([[1, 0, 1, 0],
256
+ [1, 2, 0, 2],
257
+ [0, 2, 3, 0],
258
+ [0, 0, 3, 4]])
259
+
260
+ Broadcasting of scalars is supported (but shape needs to be
261
+ specified):
262
+
263
+ >>> diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)).toarray()
264
+ array([[-2., 1., 0., 0.],
265
+ [ 1., -2., 1., 0.],
266
+ [ 0., 1., -2., 1.],
267
+ [ 0., 0., 1., -2.]])
268
+
269
+
270
+ If only one diagonal is wanted (as in `numpy.diag`), the following
271
+ works as well:
272
+
273
+ >>> diags([1, 2, 3], 1).toarray()
274
+ array([[ 0., 1., 0., 0.],
275
+ [ 0., 0., 2., 0.],
276
+ [ 0., 0., 0., 3.],
277
+ [ 0., 0., 0., 0.]])
278
+ """
279
+ A = diags_array(diagonals, offsets=offsets, shape=shape, dtype=dtype)
280
+ return dia_matrix(A).asformat(format)
281
+
282
+
283
+ def identity(n, dtype='d', format=None):
284
+ """Identity matrix in sparse format
285
+
286
+ Returns an identity matrix with shape (n,n) using a given
287
+ sparse format and dtype. This differs from `eye_array` in
288
+ that it has a square shape with ones only on the main diagonal.
289
+ It is thus the multiplicative identity. `eye_array` allows
290
+ rectangular shapes and the diagonal can be offset from the main one.
291
+
292
+ .. warning::
293
+
294
+ This function returns a sparse matrix -- not a sparse array.
295
+ You are encouraged to use ``eye_array`` to take advantage
296
+ of the sparse array functionality.
297
+
298
+ Parameters
299
+ ----------
300
+ n : int
301
+ Shape of the identity matrix.
302
+ dtype : dtype, optional
303
+ Data type of the matrix
304
+ format : str, optional
305
+ Sparse format of the result, e.g., format="csr", etc.
306
+
307
+ Examples
308
+ --------
309
+ >>> import scipy as sp
310
+ >>> sp.sparse.identity(3).toarray()
311
+ array([[ 1., 0., 0.],
312
+ [ 0., 1., 0.],
313
+ [ 0., 0., 1.]])
314
+ >>> sp.sparse.identity(3, dtype='int8', format='dia')
315
+ <3x3 sparse matrix of type '<class 'numpy.int8'>'
316
+ with 3 stored elements (1 diagonals) in DIAgonal format>
317
+ >>> sp.sparse.eye_array(3, dtype='int8', format='dia')
318
+ <3x3 sparse array of type '<class 'numpy.int8'>'
319
+ with 3 stored elements (1 diagonals) in DIAgonal format>
320
+
321
+ """
322
+ return eye(n, n, dtype=dtype, format=format)
323
+
324
+
325
+ def eye_array(m, n=None, *, k=0, dtype=float, format=None):
326
+ """Identity matrix in sparse array format
327
+
328
+ Return a sparse array with ones on diagonal.
329
+ Specifically a sparse array (m x n) where the kth diagonal
330
+ is all ones and everything else is zeros.
331
+
332
+ Parameters
333
+ ----------
334
+ m : int or tuple of ints
335
+ Number of rows requested.
336
+ n : int, optional
337
+ Number of columns. Default: `m`.
338
+ k : int, optional
339
+ Diagonal to place ones on. Default: 0 (main diagonal).
340
+ dtype : dtype, optional
341
+ Data type of the array
342
+ format : str, optional (default: "dia")
343
+ Sparse format of the result, e.g., format="csr", etc.
344
+
345
+ Examples
346
+ --------
347
+ >>> import numpy as np
348
+ >>> import scipy as sp
349
+ >>> sp.sparse.eye_array(3).toarray()
350
+ array([[ 1., 0., 0.],
351
+ [ 0., 1., 0.],
352
+ [ 0., 0., 1.]])
353
+ >>> sp.sparse.eye_array(3, dtype=np.int8)
354
+ <3x3 sparse array of type '<class 'numpy.int8'>'
355
+ with 3 stored elements (1 diagonals) in DIAgonal format>
356
+
357
+ """
358
+ # TODO: delete next 15 lines [combine with _eye()] once spmatrix removed
359
+ return _eye(m, n, k, dtype, format)
360
+
361
+
362
+ def _eye(m, n, k, dtype, format, as_sparray=True):
363
+ if as_sparray:
364
+ csr_sparse = csr_array
365
+ csc_sparse = csc_array
366
+ coo_sparse = coo_array
367
+ diags_sparse = diags_array
368
+ else:
369
+ csr_sparse = csr_matrix
370
+ csc_sparse = csc_matrix
371
+ coo_sparse = coo_matrix
372
+ diags_sparse = diags
373
+
374
+ if n is None:
375
+ n = m
376
+ m, n = int(m), int(n)
377
+
378
+ if m == n and k == 0:
379
+ # fast branch for special formats
380
+ if format in ['csr', 'csc']:
381
+ idx_dtype = get_index_dtype(maxval=n)
382
+ indptr = np.arange(n+1, dtype=idx_dtype)
383
+ indices = np.arange(n, dtype=idx_dtype)
384
+ data = np.ones(n, dtype=dtype)
385
+ cls = {'csr': csr_sparse, 'csc': csc_sparse}[format]
386
+ return cls((data, indices, indptr), (n, n))
387
+
388
+ elif format == 'coo':
389
+ idx_dtype = get_index_dtype(maxval=n)
390
+ row = np.arange(n, dtype=idx_dtype)
391
+ col = np.arange(n, dtype=idx_dtype)
392
+ data = np.ones(n, dtype=dtype)
393
+ return coo_sparse((data, (row, col)), (n, n))
394
+
395
+ data = np.ones((1, max(0, min(m + k, n))), dtype=dtype)
396
+ return diags_sparse(data, offsets=[k], shape=(m, n), dtype=dtype).asformat(format)
397
+
398
+
399
+ def eye(m, n=None, k=0, dtype=float, format=None):
400
+ """Sparse matrix with ones on diagonal
401
+
402
+ Returns a sparse matrix (m x n) where the kth diagonal
403
+ is all ones and everything else is zeros.
404
+
405
+ Parameters
406
+ ----------
407
+ m : int
408
+ Number of rows in the matrix.
409
+ n : int, optional
410
+ Number of columns. Default: `m`.
411
+ k : int, optional
412
+ Diagonal to place ones on. Default: 0 (main diagonal).
413
+ dtype : dtype, optional
414
+ Data type of the matrix.
415
+ format : str, optional
416
+ Sparse format of the result, e.g., format="csr", etc.
417
+
418
+ .. warning::
419
+
420
+ This function returns a sparse matrix -- not a sparse array.
421
+ You are encouraged to use ``eye_array`` to take advantage
422
+ of the sparse array functionality.
423
+
424
+ Examples
425
+ --------
426
+ >>> import numpy as np
427
+ >>> import scipy as sp
428
+ >>> sp.sparse.eye(3).toarray()
429
+ array([[ 1., 0., 0.],
430
+ [ 0., 1., 0.],
431
+ [ 0., 0., 1.]])
432
+ >>> sp.sparse.eye(3, dtype=np.int8)
433
+ <3x3 sparse matrix of type '<class 'numpy.int8'>'
434
+ with 3 stored elements (1 diagonals) in DIAgonal format>
435
+
436
+ """
437
+ return _eye(m, n, k, dtype, format, False)
438
+
439
+
440
+ def kron(A, B, format=None):
441
+ """kronecker product of sparse matrices A and B
442
+
443
+ Parameters
444
+ ----------
445
+ A : sparse or dense matrix
446
+ first matrix of the product
447
+ B : sparse or dense matrix
448
+ second matrix of the product
449
+ format : str, optional (default: 'bsr' or 'coo')
450
+ format of the result (e.g. "csr")
451
+ If None, choose 'bsr' for relatively dense array and 'coo' for others
452
+
453
+ Returns
454
+ -------
455
+ kronecker product in a sparse format.
456
+ Returns a sparse matrix unless either A or B is a
457
+ sparse array in which case returns a sparse array.
458
+
459
+ Examples
460
+ --------
461
+ >>> import numpy as np
462
+ >>> import scipy as sp
463
+ >>> A = sp.sparse.csr_array(np.array([[0, 2], [5, 0]]))
464
+ >>> B = sp.sparse.csr_array(np.array([[1, 2], [3, 4]]))
465
+ >>> sp.sparse.kron(A, B).toarray()
466
+ array([[ 0, 0, 2, 4],
467
+ [ 0, 0, 6, 8],
468
+ [ 5, 10, 0, 0],
469
+ [15, 20, 0, 0]])
470
+
471
+ >>> sp.sparse.kron(A, [[1, 2], [3, 4]]).toarray()
472
+ array([[ 0, 0, 2, 4],
473
+ [ 0, 0, 6, 8],
474
+ [ 5, 10, 0, 0],
475
+ [15, 20, 0, 0]])
476
+
477
+ """
478
+ # TODO: delete next 10 lines and replace _sparse with _array when spmatrix removed
479
+ if isinstance(A, sparray) or isinstance(B, sparray):
480
+ # convert to local variables
481
+ bsr_sparse = bsr_array
482
+ csr_sparse = csr_array
483
+ coo_sparse = coo_array
484
+ else: # use spmatrix
485
+ bsr_sparse = bsr_matrix
486
+ csr_sparse = csr_matrix
487
+ coo_sparse = coo_matrix
488
+
489
+ B = coo_sparse(B)
490
+
491
+ # B is fairly dense, use BSR
492
+ if (format is None or format == "bsr") and 2*B.nnz >= B.shape[0] * B.shape[1]:
493
+ A = csr_sparse(A,copy=True)
494
+ output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1])
495
+
496
+ if A.nnz == 0 or B.nnz == 0:
497
+ # kronecker product is the zero matrix
498
+ return coo_sparse(output_shape).asformat(format)
499
+
500
+ B = B.toarray()
501
+ data = A.data.repeat(B.size).reshape(-1,B.shape[0],B.shape[1])
502
+ data = data * B
503
+
504
+ return bsr_sparse((data,A.indices,A.indptr), shape=output_shape)
505
+ else:
506
+ # use COO
507
+ A = coo_sparse(A)
508
+ output_shape = (A.shape[0]*B.shape[0], A.shape[1]*B.shape[1])
509
+
510
+ if A.nnz == 0 or B.nnz == 0:
511
+ # kronecker product is the zero matrix
512
+ return coo_sparse(output_shape).asformat(format)
513
+
514
+ # expand entries of a into blocks
515
+ row = A.row.repeat(B.nnz)
516
+ col = A.col.repeat(B.nnz)
517
+ data = A.data.repeat(B.nnz)
518
+
519
+ if max(A.shape[0]*B.shape[0], A.shape[1]*B.shape[1]) > np.iinfo('int32').max:
520
+ row = row.astype(np.int64)
521
+ col = col.astype(np.int64)
522
+
523
+ row *= B.shape[0]
524
+ col *= B.shape[1]
525
+
526
+ # increment block indices
527
+ row,col = row.reshape(-1,B.nnz),col.reshape(-1,B.nnz)
528
+ row += B.row
529
+ col += B.col
530
+ row,col = row.reshape(-1),col.reshape(-1)
531
+
532
+ # compute block entries
533
+ data = data.reshape(-1,B.nnz) * B.data
534
+ data = data.reshape(-1)
535
+
536
+ return coo_sparse((data,(row,col)), shape=output_shape).asformat(format)
537
+
538
+
539
+ def kronsum(A, B, format=None):
540
+ """kronecker sum of square sparse matrices A and B
541
+
542
+ Kronecker sum of two sparse matrices is a sum of two Kronecker
543
+ products kron(I_n,A) + kron(B,I_m) where A has shape (m,m)
544
+ and B has shape (n,n) and I_m and I_n are identity matrices
545
+ of shape (m,m) and (n,n), respectively.
546
+
547
+ Parameters
548
+ ----------
549
+ A
550
+ square matrix
551
+ B
552
+ square matrix
553
+ format : str
554
+ format of the result (e.g. "csr")
555
+
556
+ Returns
557
+ -------
558
+ kronecker sum in a sparse matrix format
559
+
560
+ """
561
+ # TODO: delete next 8 lines and replace _sparse with _array when spmatrix removed
562
+ if isinstance(A, sparray) or isinstance(B, sparray):
563
+ # convert to local variables
564
+ coo_sparse = coo_array
565
+ identity_sparse = eye_array
566
+ else:
567
+ coo_sparse = coo_matrix
568
+ identity_sparse = identity
569
+
570
+ A = coo_sparse(A)
571
+ B = coo_sparse(B)
572
+
573
+ if A.shape[0] != A.shape[1]:
574
+ raise ValueError('A is not square')
575
+
576
+ if B.shape[0] != B.shape[1]:
577
+ raise ValueError('B is not square')
578
+
579
+ dtype = upcast(A.dtype, B.dtype)
580
+
581
+ I_n = identity_sparse(A.shape[0], dtype=dtype)
582
+ I_m = identity_sparse(B.shape[0], dtype=dtype)
583
+ L = kron(I_m, A, format='coo')
584
+ R = kron(B, I_n, format='coo')
585
+
586
+ return (L + R).asformat(format)
587
+
588
+
589
+ def _compressed_sparse_stack(blocks, axis, return_spmatrix):
590
+ """
591
+ Stacking fast path for CSR/CSC matrices or arrays
592
+ (i) vstack for CSR, (ii) hstack for CSC.
593
+ """
594
+ other_axis = 1 if axis == 0 else 0
595
+ data = np.concatenate([b.data for b in blocks])
596
+ constant_dim = blocks[0].shape[other_axis]
597
+ idx_dtype = get_index_dtype(arrays=[b.indptr for b in blocks],
598
+ maxval=max(data.size, constant_dim))
599
+ indices = np.empty(data.size, dtype=idx_dtype)
600
+ indptr = np.empty(sum(b.shape[axis] for b in blocks) + 1, dtype=idx_dtype)
601
+ last_indptr = idx_dtype(0)
602
+ sum_dim = 0
603
+ sum_indices = 0
604
+ for b in blocks:
605
+ if b.shape[other_axis] != constant_dim:
606
+ raise ValueError(f'incompatible dimensions for axis {other_axis}')
607
+ indices[sum_indices:sum_indices+b.indices.size] = b.indices
608
+ sum_indices += b.indices.size
609
+ idxs = slice(sum_dim, sum_dim + b.shape[axis])
610
+ indptr[idxs] = b.indptr[:-1]
611
+ indptr[idxs] += last_indptr
612
+ sum_dim += b.shape[axis]
613
+ last_indptr += b.indptr[-1]
614
+ indptr[-1] = last_indptr
615
+ # TODO remove this if-structure when sparse matrices removed
616
+ if return_spmatrix:
617
+ if axis == 0:
618
+ return csr_matrix((data, indices, indptr),
619
+ shape=(sum_dim, constant_dim))
620
+ else:
621
+ return csc_matrix((data, indices, indptr),
622
+ shape=(constant_dim, sum_dim))
623
+
624
+ if axis == 0:
625
+ return csr_array((data, indices, indptr),
626
+ shape=(sum_dim, constant_dim))
627
+ else:
628
+ return csc_array((data, indices, indptr),
629
+ shape=(constant_dim, sum_dim))
630
+
631
+
632
+ def _stack_along_minor_axis(blocks, axis):
633
+ """
634
+ Stacking fast path for CSR/CSC matrices along the minor axis
635
+ (i) hstack for CSR, (ii) vstack for CSC.
636
+ """
637
+ n_blocks = len(blocks)
638
+ if n_blocks == 0:
639
+ raise ValueError('Missing block matrices')
640
+
641
+ if n_blocks == 1:
642
+ return blocks[0]
643
+
644
+ # check for incompatible dimensions
645
+ other_axis = 1 if axis == 0 else 0
646
+ other_axis_dims = {b.shape[other_axis] for b in blocks}
647
+ if len(other_axis_dims) > 1:
648
+ raise ValueError(f'Mismatching dimensions along axis {other_axis}: '
649
+ f'{other_axis_dims}')
650
+ constant_dim, = other_axis_dims
651
+
652
+ # Do the stacking
653
+ indptr_list = [b.indptr for b in blocks]
654
+ data_cat = np.concatenate([b.data for b in blocks])
655
+
656
+ # Need to check if any indices/indptr, would be too large post-
657
+ # concatenation for np.int32:
658
+ # - The max value of indices is the output array's stacking-axis length - 1
659
+ # - The max value in indptr is the number of non-zero entries. This is
660
+ # exceedingly unlikely to require int64, but is checked out of an
661
+ # abundance of caution.
662
+ sum_dim = sum(b.shape[axis] for b in blocks)
663
+ nnz = sum(len(b.indices) for b in blocks)
664
+ idx_dtype = get_index_dtype(maxval=max(sum_dim - 1, nnz))
665
+ stack_dim_cat = np.array([b.shape[axis] for b in blocks], dtype=idx_dtype)
666
+ if data_cat.size > 0:
667
+ indptr_cat = np.concatenate(indptr_list).astype(idx_dtype)
668
+ indices_cat = (np.concatenate([b.indices for b in blocks])
669
+ .astype(idx_dtype))
670
+ indptr = np.empty(constant_dim + 1, dtype=idx_dtype)
671
+ indices = np.empty_like(indices_cat)
672
+ data = np.empty_like(data_cat)
673
+ csr_hstack(n_blocks, constant_dim, stack_dim_cat,
674
+ indptr_cat, indices_cat, data_cat,
675
+ indptr, indices, data)
676
+ else:
677
+ indptr = np.zeros(constant_dim + 1, dtype=idx_dtype)
678
+ indices = np.empty(0, dtype=idx_dtype)
679
+ data = np.empty(0, dtype=data_cat.dtype)
680
+
681
+ if axis == 0:
682
+ return blocks[0]._csc_container((data, indices, indptr),
683
+ shape=(sum_dim, constant_dim))
684
+ else:
685
+ return blocks[0]._csr_container((data, indices, indptr),
686
+ shape=(constant_dim, sum_dim))
687
+
688
+
689
+ def hstack(blocks, format=None, dtype=None):
690
+ """
691
+ Stack sparse matrices horizontally (column wise)
692
+
693
+ Parameters
694
+ ----------
695
+ blocks
696
+ sequence of sparse matrices with compatible shapes
697
+ format : str
698
+ sparse format of the result (e.g., "csr")
699
+ by default an appropriate sparse matrix format is returned.
700
+ This choice is subject to change.
701
+ dtype : dtype, optional
702
+ The data-type of the output matrix. If not given, the dtype is
703
+ determined from that of `blocks`.
704
+
705
+ Returns
706
+ -------
707
+ new_array : sparse matrix or array
708
+ If any block in blocks is a sparse array, return a sparse array.
709
+ Otherwise return a sparse matrix.
710
+
711
+ If you want a sparse array built from blocks that are not sparse
712
+ arrays, use `block(hstack(blocks))` or convert one block
713
+ e.g. `blocks[0] = csr_array(blocks[0])`.
714
+
715
+ See Also
716
+ --------
717
+ vstack : stack sparse matrices vertically (row wise)
718
+
719
+ Examples
720
+ --------
721
+ >>> from scipy.sparse import coo_matrix, hstack
722
+ >>> A = coo_matrix([[1, 2], [3, 4]])
723
+ >>> B = coo_matrix([[5], [6]])
724
+ >>> hstack([A,B]).toarray()
725
+ array([[1, 2, 5],
726
+ [3, 4, 6]])
727
+
728
+ """
729
+ blocks = np.asarray(blocks, dtype='object')
730
+ if any(isinstance(b, sparray) for b in blocks.flat):
731
+ return _block([blocks], format, dtype)
732
+ else:
733
+ return _block([blocks], format, dtype, return_spmatrix=True)
734
+
735
+
736
+ def vstack(blocks, format=None, dtype=None):
737
+ """
738
+ Stack sparse arrays vertically (row wise)
739
+
740
+ Parameters
741
+ ----------
742
+ blocks
743
+ sequence of sparse arrays with compatible shapes
744
+ format : str, optional
745
+ sparse format of the result (e.g., "csr")
746
+ by default an appropriate sparse array format is returned.
747
+ This choice is subject to change.
748
+ dtype : dtype, optional
749
+ The data-type of the output array. If not given, the dtype is
750
+ determined from that of `blocks`.
751
+
752
+ Returns
753
+ -------
754
+ new_array : sparse matrix or array
755
+ If any block in blocks is a sparse array, return a sparse array.
756
+ Otherwise return a sparse matrix.
757
+
758
+ If you want a sparse array built from blocks that are not sparse
759
+ arrays, use `block(vstack(blocks))` or convert one block
760
+ e.g. `blocks[0] = csr_array(blocks[0])`.
761
+
762
+ See Also
763
+ --------
764
+ hstack : stack sparse matrices horizontally (column wise)
765
+
766
+ Examples
767
+ --------
768
+ >>> from scipy.sparse import coo_array, vstack
769
+ >>> A = coo_array([[1, 2], [3, 4]])
770
+ >>> B = coo_array([[5, 6]])
771
+ >>> vstack([A, B]).toarray()
772
+ array([[1, 2],
773
+ [3, 4],
774
+ [5, 6]])
775
+
776
+ """
777
+ blocks = np.asarray(blocks, dtype='object')
778
+ if any(isinstance(b, sparray) for b in blocks.flat):
779
+ return _block([[b] for b in blocks], format, dtype)
780
+ else:
781
+ return _block([[b] for b in blocks], format, dtype, return_spmatrix=True)
782
+
783
+
784
+ def bmat(blocks, format=None, dtype=None):
785
+ """
786
+ Build a sparse array or matrix from sparse sub-blocks
787
+
788
+ Note: `block_array` is preferred over `bmat`. They are the same function
789
+ except that `bmat` can return a deprecated sparse matrix.
790
+ `bmat` returns a coo_matrix if none of the inputs are a sparse array.
791
+
792
+ .. warning::
793
+
794
+ This function returns a sparse matrix -- not a sparse array.
795
+ You are encouraged to use ``block_array`` to take advantage
796
+ of the sparse array functionality.
797
+
798
+ Parameters
799
+ ----------
800
+ blocks : array_like
801
+ Grid of sparse matrices with compatible shapes.
802
+ An entry of None implies an all-zero matrix.
803
+ format : {'bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil'}, optional
804
+ The sparse format of the result (e.g. "csr"). By default an
805
+ appropriate sparse matrix format is returned.
806
+ This choice is subject to change.
807
+ dtype : dtype, optional
808
+ The data-type of the output matrix. If not given, the dtype is
809
+ determined from that of `blocks`.
810
+
811
+ Returns
812
+ -------
813
+ bmat : sparse matrix or array
814
+ If any block in blocks is a sparse array, return a sparse array.
815
+ Otherwise return a sparse matrix.
816
+
817
+ If you want a sparse array built from blocks that are not sparse
818
+ arrays, use `block_array()`.
819
+
820
+ See Also
821
+ --------
822
+ block_array
823
+
824
+ Examples
825
+ --------
826
+ >>> from scipy.sparse import coo_array, bmat
827
+ >>> A = coo_array([[1, 2], [3, 4]])
828
+ >>> B = coo_array([[5], [6]])
829
+ >>> C = coo_array([[7]])
830
+ >>> bmat([[A, B], [None, C]]).toarray()
831
+ array([[1, 2, 5],
832
+ [3, 4, 6],
833
+ [0, 0, 7]])
834
+
835
+ >>> bmat([[A, None], [None, C]]).toarray()
836
+ array([[1, 2, 0],
837
+ [3, 4, 0],
838
+ [0, 0, 7]])
839
+
840
+ """
841
+ blocks = np.asarray(blocks, dtype='object')
842
+ if any(isinstance(b, sparray) for b in blocks.flat):
843
+ return _block(blocks, format, dtype)
844
+ else:
845
+ return _block(blocks, format, dtype, return_spmatrix=True)
846
+
847
+
848
+ def block_array(blocks, *, format=None, dtype=None):
849
+ """
850
+ Build a sparse array from sparse sub-blocks
851
+
852
+ Parameters
853
+ ----------
854
+ blocks : array_like
855
+ Grid of sparse arrays with compatible shapes.
856
+ An entry of None implies an all-zero array.
857
+ format : {'bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil'}, optional
858
+ The sparse format of the result (e.g. "csr"). By default an
859
+ appropriate sparse array format is returned.
860
+ This choice is subject to change.
861
+ dtype : dtype, optional
862
+ The data-type of the output array. If not given, the dtype is
863
+ determined from that of `blocks`.
864
+
865
+ Returns
866
+ -------
867
+ block : sparse array
868
+
869
+ See Also
870
+ --------
871
+ block_diag : specify blocks along the main diagonals
872
+ diags : specify (possibly offset) diagonals
873
+
874
+ Examples
875
+ --------
876
+ >>> from scipy.sparse import coo_array, block_array
877
+ >>> A = coo_array([[1, 2], [3, 4]])
878
+ >>> B = coo_array([[5], [6]])
879
+ >>> C = coo_array([[7]])
880
+ >>> block_array([[A, B], [None, C]]).toarray()
881
+ array([[1, 2, 5],
882
+ [3, 4, 6],
883
+ [0, 0, 7]])
884
+
885
+ >>> block_array([[A, None], [None, C]]).toarray()
886
+ array([[1, 2, 0],
887
+ [3, 4, 0],
888
+ [0, 0, 7]])
889
+
890
+ """
891
+ return _block(blocks, format, dtype)
892
+
893
+
894
+ def _block(blocks, format, dtype, return_spmatrix=False):
895
+ blocks = np.asarray(blocks, dtype='object')
896
+
897
+ if blocks.ndim != 2:
898
+ raise ValueError('blocks must be 2-D')
899
+
900
+ M,N = blocks.shape
901
+
902
+ # check for fast path cases
903
+ if (format in (None, 'csr') and
904
+ all(issparse(b) and b.format == 'csr' for b in blocks.flat)
905
+ ):
906
+ if N > 1:
907
+ # stack along columns (axis 1): must have shape (M, 1)
908
+ blocks = [[_stack_along_minor_axis(blocks[b, :], 1)] for b in range(M)]
909
+ blocks = np.asarray(blocks, dtype='object')
910
+
911
+ # stack along rows (axis 0):
912
+ A = _compressed_sparse_stack(blocks[:, 0], 0, return_spmatrix)
913
+ if dtype is not None:
914
+ A = A.astype(dtype)
915
+ return A
916
+ elif (format in (None, 'csc') and
917
+ all(issparse(b) and b.format == 'csc' for b in blocks.flat)
918
+ ):
919
+ if M > 1:
920
+ # stack along rows (axis 0): must have shape (1, N)
921
+ blocks = [[_stack_along_minor_axis(blocks[:, b], 0) for b in range(N)]]
922
+ blocks = np.asarray(blocks, dtype='object')
923
+
924
+ # stack along columns (axis 1):
925
+ A = _compressed_sparse_stack(blocks[0, :], 1, return_spmatrix)
926
+ if dtype is not None:
927
+ A = A.astype(dtype)
928
+ return A
929
+
930
+ block_mask = np.zeros(blocks.shape, dtype=bool)
931
+ brow_lengths = np.zeros(M, dtype=np.int64)
932
+ bcol_lengths = np.zeros(N, dtype=np.int64)
933
+
934
+ # convert everything to COO format
935
+ for i in range(M):
936
+ for j in range(N):
937
+ if blocks[i,j] is not None:
938
+ A = coo_array(blocks[i,j])
939
+ blocks[i,j] = A
940
+ block_mask[i,j] = True
941
+
942
+ if brow_lengths[i] == 0:
943
+ brow_lengths[i] = A.shape[0]
944
+ elif brow_lengths[i] != A.shape[0]:
945
+ msg = (f'blocks[{i},:] has incompatible row dimensions. '
946
+ f'Got blocks[{i},{j}].shape[0] == {A.shape[0]}, '
947
+ f'expected {brow_lengths[i]}.')
948
+ raise ValueError(msg)
949
+
950
+ if bcol_lengths[j] == 0:
951
+ bcol_lengths[j] = A.shape[1]
952
+ elif bcol_lengths[j] != A.shape[1]:
953
+ msg = (f'blocks[:,{j}] has incompatible column '
954
+ f'dimensions. '
955
+ f'Got blocks[{i},{j}].shape[1] == {A.shape[1]}, '
956
+ f'expected {bcol_lengths[j]}.')
957
+ raise ValueError(msg)
958
+
959
+ nnz = sum(block.nnz for block in blocks[block_mask])
960
+ if dtype is None:
961
+ all_dtypes = [blk.dtype for blk in blocks[block_mask]]
962
+ dtype = upcast(*all_dtypes) if all_dtypes else None
963
+
964
+ row_offsets = np.append(0, np.cumsum(brow_lengths))
965
+ col_offsets = np.append(0, np.cumsum(bcol_lengths))
966
+
967
+ shape = (row_offsets[-1], col_offsets[-1])
968
+
969
+ data = np.empty(nnz, dtype=dtype)
970
+ idx_dtype = get_index_dtype(maxval=max(shape))
971
+ row = np.empty(nnz, dtype=idx_dtype)
972
+ col = np.empty(nnz, dtype=idx_dtype)
973
+
974
+ nnz = 0
975
+ ii, jj = np.nonzero(block_mask)
976
+ for i, j in zip(ii, jj):
977
+ B = blocks[i, j]
978
+ idx = slice(nnz, nnz + B.nnz)
979
+ data[idx] = B.data
980
+ np.add(B.row, row_offsets[i], out=row[idx], dtype=idx_dtype)
981
+ np.add(B.col, col_offsets[j], out=col[idx], dtype=idx_dtype)
982
+ nnz += B.nnz
983
+
984
+ if return_spmatrix:
985
+ return coo_matrix((data, (row, col)), shape=shape).asformat(format)
986
+ return coo_array((data, (row, col)), shape=shape).asformat(format)
987
+
988
+
989
+ def block_diag(mats, format=None, dtype=None):
990
+ """
991
+ Build a block diagonal sparse matrix or array from provided matrices.
992
+
993
+ Parameters
994
+ ----------
995
+ mats : sequence of matrices or arrays
996
+ Input matrices or arrays.
997
+ format : str, optional
998
+ The sparse format of the result (e.g., "csr"). If not given, the result
999
+ is returned in "coo" format.
1000
+ dtype : dtype specifier, optional
1001
+ The data-type of the output. If not given, the dtype is
1002
+ determined from that of `blocks`.
1003
+
1004
+ Returns
1005
+ -------
1006
+ res : sparse matrix or array
1007
+ If at least one input is a sparse array, the output is a sparse array.
1008
+ Otherwise the output is a sparse matrix.
1009
+
1010
+ Notes
1011
+ -----
1012
+
1013
+ .. versionadded:: 0.11.0
1014
+
1015
+ See Also
1016
+ --------
1017
+ block_array
1018
+ diags_array
1019
+
1020
+ Examples
1021
+ --------
1022
+ >>> from scipy.sparse import coo_array, block_diag
1023
+ >>> A = coo_array([[1, 2], [3, 4]])
1024
+ >>> B = coo_array([[5], [6]])
1025
+ >>> C = coo_array([[7]])
1026
+ >>> block_diag((A, B, C)).toarray()
1027
+ array([[1, 2, 0, 0],
1028
+ [3, 4, 0, 0],
1029
+ [0, 0, 5, 0],
1030
+ [0, 0, 6, 0],
1031
+ [0, 0, 0, 7]])
1032
+
1033
+ """
1034
+ if any(isinstance(a, sparray) for a in mats):
1035
+ container = coo_array
1036
+ else:
1037
+ container = coo_matrix
1038
+
1039
+ row = []
1040
+ col = []
1041
+ data = []
1042
+ r_idx = 0
1043
+ c_idx = 0
1044
+ for a in mats:
1045
+ if isinstance(a, (list, numbers.Number)):
1046
+ a = coo_array(np.atleast_2d(a))
1047
+ if issparse(a):
1048
+ a = a.tocoo()
1049
+ nrows, ncols = a._shape_as_2d
1050
+ row.append(a.row + r_idx)
1051
+ col.append(a.col + c_idx)
1052
+ data.append(a.data)
1053
+ else:
1054
+ nrows, ncols = a.shape
1055
+ a_row, a_col = np.divmod(np.arange(nrows*ncols), ncols)
1056
+ row.append(a_row + r_idx)
1057
+ col.append(a_col + c_idx)
1058
+ data.append(a.ravel())
1059
+ r_idx += nrows
1060
+ c_idx += ncols
1061
+ row = np.concatenate(row)
1062
+ col = np.concatenate(col)
1063
+ data = np.concatenate(data)
1064
+ return container((data, (row, col)),
1065
+ shape=(r_idx, c_idx),
1066
+ dtype=dtype).asformat(format)
1067
+
1068
+
1069
+ def random_array(shape, *, density=0.01, format='coo', dtype=None,
1070
+ random_state=None, data_sampler=None):
1071
+ """Return a sparse array of uniformly random numbers in [0, 1)
1072
+
1073
+ Returns a sparse array with the given shape and density
1074
+ where values are generated uniformly randomly in the range [0, 1).
1075
+
1076
+ .. warning::
1077
+
1078
+ Since numpy 1.17, passing a ``np.random.Generator`` (e.g.
1079
+ ``np.random.default_rng``) for ``random_state`` will lead to much
1080
+ faster execution times.
1081
+
1082
+ A much slower implementation is used by default for backwards
1083
+ compatibility.
1084
+
1085
+ Parameters
1086
+ ----------
1087
+ shape : int or tuple of ints
1088
+ shape of the array
1089
+ density : real, optional (default: 0.01)
1090
+ density of the generated matrix: density equal to one means a full
1091
+ matrix, density of 0 means a matrix with no non-zero items.
1092
+ format : str, optional (default: 'coo')
1093
+ sparse matrix format.
1094
+ dtype : dtype, optional (default: np.float64)
1095
+ type of the returned matrix values.
1096
+ random_state : {None, int, `Generator`, `RandomState`}, optional
1097
+ A random number generator to determine nonzero structure. We recommend using
1098
+ a `numpy.random.Generator` manually provided for every call as it is much
1099
+ faster than RandomState.
1100
+
1101
+ - If `None` (or `np.random`), the `numpy.random.RandomState`
1102
+ singleton is used.
1103
+ - If an int, a new ``Generator`` instance is used,
1104
+ seeded with the int.
1105
+ - If a ``Generator`` or ``RandomState`` instance then
1106
+ that instance is used.
1107
+
1108
+ This random state will be used for sampling `indices` (the sparsity
1109
+ structure), and by default for the data values too (see `data_sampler`).
1110
+
1111
+ data_sampler : callable, optional (default depends on dtype)
1112
+ Sampler of random data values with keyword arg `size`.
1113
+ This function should take a single keyword argument `size` specifying
1114
+ the length of its returned ndarray. It is used to generate the nonzero
1115
+ values in the matrix after the locations of those values are chosen.
1116
+ By default, uniform [0, 1) random values are used unless `dtype` is
1117
+ an integer (default uniform integers from that dtype) or
1118
+ complex (default uniform over the unit square in the complex plane).
1119
+ For these, the `random_state` rng is used e.g. `rng.uniform(size=size)`.
1120
+
1121
+ Returns
1122
+ -------
1123
+ res : sparse array
1124
+
1125
+ Examples
1126
+ --------
1127
+
1128
+ Passing a ``np.random.Generator`` instance for better performance:
1129
+
1130
+ >>> import numpy as np
1131
+ >>> import scipy as sp
1132
+ >>> rng = np.random.default_rng()
1133
+
1134
+ Default sampling uniformly from [0, 1):
1135
+
1136
+ >>> S = sp.sparse.random_array((3, 4), density=0.25, random_state=rng)
1137
+
1138
+ Providing a sampler for the values:
1139
+
1140
+ >>> rvs = sp.stats.poisson(25, loc=10).rvs
1141
+ >>> S = sp.sparse.random_array((3, 4), density=0.25,
1142
+ ... random_state=rng, data_sampler=rvs)
1143
+ >>> S.toarray()
1144
+ array([[ 36., 0., 33., 0.], # random
1145
+ [ 0., 0., 0., 0.],
1146
+ [ 0., 0., 36., 0.]])
1147
+
1148
+ Building a custom distribution.
1149
+ This example builds a squared normal from np.random:
1150
+
1151
+ >>> def np_normal_squared(size=None, random_state=rng):
1152
+ ... return random_state.standard_normal(size) ** 2
1153
+ >>> S = sp.sparse.random_array((3, 4), density=0.25, random_state=rng,
1154
+ ... data_sampler=np_normal_squared)
1155
+
1156
+ Or we can build it from sp.stats style rvs functions:
1157
+
1158
+ >>> def sp_stats_normal_squared(size=None, random_state=rng):
1159
+ ... std_normal = sp.stats.distributions.norm_gen().rvs
1160
+ ... return std_normal(size=size, random_state=random_state) ** 2
1161
+ >>> S = sp.sparse.random_array((3, 4), density=0.25, random_state=rng,
1162
+ ... data_sampler=sp_stats_normal_squared)
1163
+
1164
+ Or we can subclass sp.stats rv_continous or rv_discrete:
1165
+
1166
+ >>> class NormalSquared(sp.stats.rv_continuous):
1167
+ ... def _rvs(self, size=None, random_state=rng):
1168
+ ... return random_state.standard_normal(size) ** 2
1169
+ >>> X = NormalSquared()
1170
+ >>> Y = X().rvs
1171
+ >>> S = sp.sparse.random_array((3, 4), density=0.25,
1172
+ ... random_state=rng, data_sampler=Y)
1173
+ """
1174
+ # Use the more efficient RNG by default.
1175
+ if random_state is None:
1176
+ random_state = np.random.default_rng()
1177
+ data, ind = _random(shape, density, format, dtype, random_state, data_sampler)
1178
+ return coo_array((data, ind), shape=shape).asformat(format)
1179
+
1180
+
1181
+ def _random(shape, density=0.01, format=None, dtype=None,
1182
+ random_state=None, data_sampler=None):
1183
+ if density < 0 or density > 1:
1184
+ raise ValueError("density expected to be 0 <= density <= 1")
1185
+
1186
+ tot_prod = math.prod(shape) # use `math` for when prod is >= 2**64
1187
+
1188
+ # Number of non zero values
1189
+ size = int(round(density * tot_prod))
1190
+
1191
+ rng = check_random_state(random_state)
1192
+
1193
+ if data_sampler is None:
1194
+ if np.issubdtype(dtype, np.integer):
1195
+ def data_sampler(size):
1196
+ return rng_integers(rng,
1197
+ np.iinfo(dtype).min,
1198
+ np.iinfo(dtype).max,
1199
+ size,
1200
+ dtype=dtype)
1201
+ elif np.issubdtype(dtype, np.complexfloating):
1202
+ def data_sampler(size):
1203
+ return (rng.uniform(size=size) +
1204
+ rng.uniform(size=size) * 1j)
1205
+ else:
1206
+ data_sampler = rng.uniform
1207
+
1208
+ # rng.choice uses int64 if first arg is an int
1209
+ if tot_prod < np.iinfo(np.int64).max:
1210
+ raveled_ind = rng.choice(tot_prod, size=size, replace=False)
1211
+ ind = np.unravel_index(raveled_ind, shape=shape, order='F')
1212
+ else:
1213
+ # for ravel indices bigger than dtype max, use sets to remove duplicates
1214
+ ndim = len(shape)
1215
+ seen = set()
1216
+ while len(seen) < size:
1217
+ dsize = size - len(seen)
1218
+ seen.update(map(tuple, rng_integers(rng, shape, size=(dsize, ndim))))
1219
+ ind = tuple(np.array(list(seen)).T)
1220
+
1221
+ # size kwarg allows eg data_sampler=partial(np.random.poisson, lam=5)
1222
+ vals = data_sampler(size=size).astype(dtype, copy=False)
1223
+ return vals, ind
1224
+
1225
+
1226
+ def random(m, n, density=0.01, format='coo', dtype=None,
1227
+ random_state=None, data_rvs=None):
1228
+ """Generate a sparse matrix of the given shape and density with randomly
1229
+ distributed values.
1230
+
1231
+ .. warning::
1232
+
1233
+ Since numpy 1.17, passing a ``np.random.Generator`` (e.g.
1234
+ ``np.random.default_rng``) for ``random_state`` will lead to much
1235
+ faster execution times.
1236
+
1237
+ A much slower implementation is used by default for backwards
1238
+ compatibility.
1239
+
1240
+ .. warning::
1241
+
1242
+ This function returns a sparse matrix -- not a sparse array.
1243
+ You are encouraged to use ``random_array`` to take advantage of the
1244
+ sparse array functionality.
1245
+
1246
+ Parameters
1247
+ ----------
1248
+ m, n : int
1249
+ shape of the matrix
1250
+ density : real, optional
1251
+ density of the generated matrix: density equal to one means a full
1252
+ matrix, density of 0 means a matrix with no non-zero items.
1253
+ format : str, optional
1254
+ sparse matrix format.
1255
+ dtype : dtype, optional
1256
+ type of the returned matrix values.
1257
+ random_state : {None, int, `numpy.random.Generator`,
1258
+ `numpy.random.RandomState`}, optional
1259
+
1260
+ - If `seed` is None (or `np.random`), the `numpy.random.RandomState`
1261
+ singleton is used.
1262
+ - If `seed` is an int, a new ``RandomState`` instance is used,
1263
+ seeded with `seed`.
1264
+ - If `seed` is already a ``Generator`` or ``RandomState`` instance then
1265
+ that instance is used.
1266
+
1267
+ This random state will be used for sampling the sparsity structure, but
1268
+ not necessarily for sampling the values of the structurally nonzero
1269
+ entries of the matrix.
1270
+ data_rvs : callable, optional
1271
+ Samples a requested number of random values.
1272
+ This function should take a single argument specifying the length
1273
+ of the ndarray that it will return. The structurally nonzero entries
1274
+ of the sparse random matrix will be taken from the array sampled
1275
+ by this function. By default, uniform [0, 1) random values will be
1276
+ sampled using the same random state as is used for sampling
1277
+ the sparsity structure.
1278
+
1279
+ Returns
1280
+ -------
1281
+ res : sparse matrix
1282
+
1283
+ See Also
1284
+ --------
1285
+ random_array : constructs sparse arrays instead of sparse matrices
1286
+
1287
+ Examples
1288
+ --------
1289
+
1290
+ Passing a ``np.random.Generator`` instance for better performance:
1291
+
1292
+ >>> import scipy as sp
1293
+ >>> import numpy as np
1294
+ >>> rng = np.random.default_rng()
1295
+ >>> S = sp.sparse.random(3, 4, density=0.25, random_state=rng)
1296
+
1297
+ Providing a sampler for the values:
1298
+
1299
+ >>> rvs = sp.stats.poisson(25, loc=10).rvs
1300
+ >>> S = sp.sparse.random(3, 4, density=0.25, random_state=rng, data_rvs=rvs)
1301
+ >>> S.toarray()
1302
+ array([[ 36., 0., 33., 0.], # random
1303
+ [ 0., 0., 0., 0.],
1304
+ [ 0., 0., 36., 0.]])
1305
+
1306
+ Building a custom distribution.
1307
+ This example builds a squared normal from np.random:
1308
+
1309
+ >>> def np_normal_squared(size=None, random_state=rng):
1310
+ ... return random_state.standard_normal(size) ** 2
1311
+ >>> S = sp.sparse.random(3, 4, density=0.25, random_state=rng,
1312
+ ... data_rvs=np_normal_squared)
1313
+
1314
+ Or we can build it from sp.stats style rvs functions:
1315
+
1316
+ >>> def sp_stats_normal_squared(size=None, random_state=rng):
1317
+ ... std_normal = sp.stats.distributions.norm_gen().rvs
1318
+ ... return std_normal(size=size, random_state=random_state) ** 2
1319
+ >>> S = sp.sparse.random(3, 4, density=0.25, random_state=rng,
1320
+ ... data_rvs=sp_stats_normal_squared)
1321
+
1322
+ Or we can subclass sp.stats rv_continous or rv_discrete:
1323
+
1324
+ >>> class NormalSquared(sp.stats.rv_continuous):
1325
+ ... def _rvs(self, size=None, random_state=rng):
1326
+ ... return random_state.standard_normal(size) ** 2
1327
+ >>> X = NormalSquared()
1328
+ >>> Y = X() # get a frozen version of the distribution
1329
+ >>> S = sp.sparse.random(3, 4, density=0.25, random_state=rng, data_rvs=Y.rvs)
1330
+ """
1331
+ if n is None:
1332
+ n = m
1333
+ m, n = int(m), int(n)
1334
+ # make keyword syntax work for data_rvs e.g. data_rvs(size=7)
1335
+ if data_rvs is not None:
1336
+ def data_rvs_kw(size):
1337
+ return data_rvs(size)
1338
+ else:
1339
+ data_rvs_kw = None
1340
+ vals, ind = _random((m, n), density, format, dtype, random_state, data_rvs_kw)
1341
+ return coo_matrix((vals, ind), shape=(m, n)).asformat(format)
1342
+
1343
+
1344
+ def rand(m, n, density=0.01, format="coo", dtype=None, random_state=None):
1345
+ """Generate a sparse matrix of the given shape and density with uniformly
1346
+ distributed values.
1347
+
1348
+ .. warning::
1349
+
1350
+ This function returns a sparse matrix -- not a sparse array.
1351
+ You are encouraged to use ``random_array`` to take advantage
1352
+ of the sparse array functionality.
1353
+
1354
+ Parameters
1355
+ ----------
1356
+ m, n : int
1357
+ shape of the matrix
1358
+ density : real, optional
1359
+ density of the generated matrix: density equal to one means a full
1360
+ matrix, density of 0 means a matrix with no non-zero items.
1361
+ format : str, optional
1362
+ sparse matrix format.
1363
+ dtype : dtype, optional
1364
+ type of the returned matrix values.
1365
+ random_state : {None, int, `numpy.random.Generator`,
1366
+ `numpy.random.RandomState`}, optional
1367
+
1368
+ If `seed` is None (or `np.random`), the `numpy.random.RandomState`
1369
+ singleton is used.
1370
+ If `seed` is an int, a new ``RandomState`` instance is used,
1371
+ seeded with `seed`.
1372
+ If `seed` is already a ``Generator`` or ``RandomState`` instance then
1373
+ that instance is used.
1374
+
1375
+ Returns
1376
+ -------
1377
+ res : sparse matrix
1378
+
1379
+ Notes
1380
+ -----
1381
+ Only float types are supported for now.
1382
+
1383
+ See Also
1384
+ --------
1385
+ random : Similar function allowing a custom random data sampler
1386
+ random_array : Similar to random() but returns a sparse array
1387
+
1388
+ Examples
1389
+ --------
1390
+ >>> from scipy.sparse import rand
1391
+ >>> matrix = rand(3, 4, density=0.25, format="csr", random_state=42)
1392
+ >>> matrix
1393
+ <3x4 sparse matrix of type '<class 'numpy.float64'>'
1394
+ with 3 stored elements in Compressed Sparse Row format>
1395
+ >>> matrix.toarray()
1396
+ array([[0.05641158, 0. , 0. , 0.65088847], # random
1397
+ [0. , 0. , 0. , 0.14286682],
1398
+ [0. , 0. , 0. , 0. ]])
1399
+
1400
+ """
1401
+ return random(m, n, density, format, dtype, random_state)
env-llmeval/lib/python3.10/site-packages/scipy/sparse/_csparsetools.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (823 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/_csr.py ADDED
@@ -0,0 +1,491 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Compressed Sparse Row matrix format"""
2
+
3
+ __docformat__ = "restructuredtext en"
4
+
5
+ __all__ = ['csr_array', 'csr_matrix', 'isspmatrix_csr']
6
+
7
+ import numpy as np
8
+
9
+ from ._matrix import spmatrix
10
+ from ._base import _spbase, sparray
11
+ from ._sparsetools import (csr_tocsc, csr_tobsr, csr_count_blocks,
12
+ get_csr_submatrix)
13
+ from ._sputils import upcast
14
+
15
+ from ._compressed import _cs_matrix
16
+
17
+
18
+ class _csr_base(_cs_matrix):
19
+ _format = 'csr'
20
+
21
+ def transpose(self, axes=None, copy=False):
22
+ if axes is not None and axes != (1, 0):
23
+ raise ValueError("Sparse arrays/matrices do not support "
24
+ "an 'axes' parameter because swapping "
25
+ "dimensions is the only logical permutation.")
26
+
27
+ M, N = self.shape
28
+ return self._csc_container((self.data, self.indices,
29
+ self.indptr), shape=(N, M), copy=copy)
30
+
31
+ transpose.__doc__ = _spbase.transpose.__doc__
32
+
33
+ def tolil(self, copy=False):
34
+ lil = self._lil_container(self.shape, dtype=self.dtype)
35
+
36
+ self.sum_duplicates()
37
+ ptr,ind,dat = self.indptr,self.indices,self.data
38
+ rows, data = lil.rows, lil.data
39
+
40
+ for n in range(self.shape[0]):
41
+ start = ptr[n]
42
+ end = ptr[n+1]
43
+ rows[n] = ind[start:end].tolist()
44
+ data[n] = dat[start:end].tolist()
45
+
46
+ return lil
47
+
48
+ tolil.__doc__ = _spbase.tolil.__doc__
49
+
50
+ def tocsr(self, copy=False):
51
+ if copy:
52
+ return self.copy()
53
+ else:
54
+ return self
55
+
56
+ tocsr.__doc__ = _spbase.tocsr.__doc__
57
+
58
+ def tocsc(self, copy=False):
59
+ idx_dtype = self._get_index_dtype((self.indptr, self.indices),
60
+ maxval=max(self.nnz, self.shape[0]))
61
+ indptr = np.empty(self.shape[1] + 1, dtype=idx_dtype)
62
+ indices = np.empty(self.nnz, dtype=idx_dtype)
63
+ data = np.empty(self.nnz, dtype=upcast(self.dtype))
64
+
65
+ csr_tocsc(self.shape[0], self.shape[1],
66
+ self.indptr.astype(idx_dtype),
67
+ self.indices.astype(idx_dtype),
68
+ self.data,
69
+ indptr,
70
+ indices,
71
+ data)
72
+
73
+ A = self._csc_container((data, indices, indptr), shape=self.shape)
74
+ A.has_sorted_indices = True
75
+ return A
76
+
77
+ tocsc.__doc__ = _spbase.tocsc.__doc__
78
+
79
+ def tobsr(self, blocksize=None, copy=True):
80
+ if blocksize is None:
81
+ from ._spfuncs import estimate_blocksize
82
+ return self.tobsr(blocksize=estimate_blocksize(self))
83
+
84
+ elif blocksize == (1,1):
85
+ arg1 = (self.data.reshape(-1,1,1),self.indices,self.indptr)
86
+ return self._bsr_container(arg1, shape=self.shape, copy=copy)
87
+
88
+ else:
89
+ R,C = blocksize
90
+ M,N = self.shape
91
+
92
+ if R < 1 or C < 1 or M % R != 0 or N % C != 0:
93
+ raise ValueError('invalid blocksize %s' % blocksize)
94
+
95
+ blks = csr_count_blocks(M,N,R,C,self.indptr,self.indices)
96
+
97
+ idx_dtype = self._get_index_dtype((self.indptr, self.indices),
98
+ maxval=max(N//C, blks))
99
+ indptr = np.empty(M//R+1, dtype=idx_dtype)
100
+ indices = np.empty(blks, dtype=idx_dtype)
101
+ data = np.zeros((blks,R,C), dtype=self.dtype)
102
+
103
+ csr_tobsr(M, N, R, C,
104
+ self.indptr.astype(idx_dtype),
105
+ self.indices.astype(idx_dtype),
106
+ self.data,
107
+ indptr, indices, data.ravel())
108
+
109
+ return self._bsr_container(
110
+ (data, indices, indptr), shape=self.shape
111
+ )
112
+
113
+ tobsr.__doc__ = _spbase.tobsr.__doc__
114
+
115
+ # these functions are used by the parent class (_cs_matrix)
116
+ # to remove redundancy between csc_matrix and csr_array
117
+ @staticmethod
118
+ def _swap(x):
119
+ """swap the members of x if this is a column-oriented matrix
120
+ """
121
+ return x
122
+
123
+ def __iter__(self):
124
+ indptr = np.zeros(2, dtype=self.indptr.dtype)
125
+ shape = (1, self.shape[1])
126
+ i0 = 0
127
+ for i1 in self.indptr[1:]:
128
+ indptr[1] = i1 - i0
129
+ indices = self.indices[i0:i1]
130
+ data = self.data[i0:i1]
131
+ yield self.__class__(
132
+ (data, indices, indptr), shape=shape, copy=True
133
+ )
134
+ i0 = i1
135
+
136
+ def _getrow(self, i):
137
+ """Returns a copy of row i of the matrix, as a (1 x n)
138
+ CSR matrix (row vector).
139
+ """
140
+ M, N = self.shape
141
+ i = int(i)
142
+ if i < 0:
143
+ i += M
144
+ if i < 0 or i >= M:
145
+ raise IndexError('index (%d) out of range' % i)
146
+ indptr, indices, data = get_csr_submatrix(
147
+ M, N, self.indptr, self.indices, self.data, i, i + 1, 0, N)
148
+ return self.__class__((data, indices, indptr), shape=(1, N),
149
+ dtype=self.dtype, copy=False)
150
+
151
+ def _getcol(self, i):
152
+ """Returns a copy of column i of the matrix, as a (m x 1)
153
+ CSR matrix (column vector).
154
+ """
155
+ M, N = self.shape
156
+ i = int(i)
157
+ if i < 0:
158
+ i += N
159
+ if i < 0 or i >= N:
160
+ raise IndexError('index (%d) out of range' % i)
161
+ indptr, indices, data = get_csr_submatrix(
162
+ M, N, self.indptr, self.indices, self.data, 0, M, i, i + 1)
163
+ return self.__class__((data, indices, indptr), shape=(M, 1),
164
+ dtype=self.dtype, copy=False)
165
+
166
+ def _get_intXarray(self, row, col):
167
+ return self._getrow(row)._minor_index_fancy(col)
168
+
169
+ def _get_intXslice(self, row, col):
170
+ if col.step in (1, None):
171
+ return self._get_submatrix(row, col, copy=True)
172
+ # TODO: uncomment this once it's faster:
173
+ # return self._getrow(row)._minor_slice(col)
174
+
175
+ M, N = self.shape
176
+ start, stop, stride = col.indices(N)
177
+
178
+ ii, jj = self.indptr[row:row+2]
179
+ row_indices = self.indices[ii:jj]
180
+ row_data = self.data[ii:jj]
181
+
182
+ if stride > 0:
183
+ ind = (row_indices >= start) & (row_indices < stop)
184
+ else:
185
+ ind = (row_indices <= start) & (row_indices > stop)
186
+
187
+ if abs(stride) > 1:
188
+ ind &= (row_indices - start) % stride == 0
189
+
190
+ row_indices = (row_indices[ind] - start) // stride
191
+ row_data = row_data[ind]
192
+ row_indptr = np.array([0, len(row_indices)])
193
+
194
+ if stride < 0:
195
+ row_data = row_data[::-1]
196
+ row_indices = abs(row_indices[::-1])
197
+
198
+ shape = (1, max(0, int(np.ceil(float(stop - start) / stride))))
199
+ return self.__class__((row_data, row_indices, row_indptr), shape=shape,
200
+ dtype=self.dtype, copy=False)
201
+
202
+ def _get_sliceXint(self, row, col):
203
+ if row.step in (1, None):
204
+ return self._get_submatrix(row, col, copy=True)
205
+ return self._major_slice(row)._get_submatrix(minor=col)
206
+
207
+ def _get_sliceXarray(self, row, col):
208
+ return self._major_slice(row)._minor_index_fancy(col)
209
+
210
+ def _get_arrayXint(self, row, col):
211
+ return self._major_index_fancy(row)._get_submatrix(minor=col)
212
+
213
+ def _get_arrayXslice(self, row, col):
214
+ if col.step not in (1, None):
215
+ col = np.arange(*col.indices(self.shape[1]))
216
+ return self._get_arrayXarray(row, col)
217
+ return self._major_index_fancy(row)._get_submatrix(minor=col)
218
+
219
+
220
+ def isspmatrix_csr(x):
221
+ """Is `x` of csr_matrix type?
222
+
223
+ Parameters
224
+ ----------
225
+ x
226
+ object to check for being a csr matrix
227
+
228
+ Returns
229
+ -------
230
+ bool
231
+ True if `x` is a csr matrix, False otherwise
232
+
233
+ Examples
234
+ --------
235
+ >>> from scipy.sparse import csr_array, csr_matrix, coo_matrix, isspmatrix_csr
236
+ >>> isspmatrix_csr(csr_matrix([[5]]))
237
+ True
238
+ >>> isspmatrix_csr(csr_array([[5]]))
239
+ False
240
+ >>> isspmatrix_csr(coo_matrix([[5]]))
241
+ False
242
+ """
243
+ return isinstance(x, csr_matrix)
244
+
245
+
246
+ # This namespace class separates array from matrix with isinstance
247
+ class csr_array(_csr_base, sparray):
248
+ """
249
+ Compressed Sparse Row array.
250
+
251
+ This can be instantiated in several ways:
252
+ csr_array(D)
253
+ where D is a 2-D ndarray
254
+
255
+ csr_array(S)
256
+ with another sparse array or matrix S (equivalent to S.tocsr())
257
+
258
+ csr_array((M, N), [dtype])
259
+ to construct an empty array with shape (M, N)
260
+ dtype is optional, defaulting to dtype='d'.
261
+
262
+ csr_array((data, (row_ind, col_ind)), [shape=(M, N)])
263
+ where ``data``, ``row_ind`` and ``col_ind`` satisfy the
264
+ relationship ``a[row_ind[k], col_ind[k]] = data[k]``.
265
+
266
+ csr_array((data, indices, indptr), [shape=(M, N)])
267
+ is the standard CSR representation where the column indices for
268
+ row i are stored in ``indices[indptr[i]:indptr[i+1]]`` and their
269
+ corresponding values are stored in ``data[indptr[i]:indptr[i+1]]``.
270
+ If the shape parameter is not supplied, the array dimensions
271
+ are inferred from the index arrays.
272
+
273
+ Attributes
274
+ ----------
275
+ dtype : dtype
276
+ Data type of the array
277
+ shape : 2-tuple
278
+ Shape of the array
279
+ ndim : int
280
+ Number of dimensions (this is always 2)
281
+ nnz
282
+ size
283
+ data
284
+ CSR format data array of the array
285
+ indices
286
+ CSR format index array of the array
287
+ indptr
288
+ CSR format index pointer array of the array
289
+ has_sorted_indices
290
+ has_canonical_format
291
+ T
292
+
293
+ Notes
294
+ -----
295
+
296
+ Sparse arrays can be used in arithmetic operations: they support
297
+ addition, subtraction, multiplication, division, and matrix power.
298
+
299
+ Advantages of the CSR format
300
+ - efficient arithmetic operations CSR + CSR, CSR * CSR, etc.
301
+ - efficient row slicing
302
+ - fast matrix vector products
303
+
304
+ Disadvantages of the CSR format
305
+ - slow column slicing operations (consider CSC)
306
+ - changes to the sparsity structure are expensive (consider LIL or DOK)
307
+
308
+ Canonical Format
309
+ - Within each row, indices are sorted by column.
310
+ - There are no duplicate entries.
311
+
312
+ Examples
313
+ --------
314
+
315
+ >>> import numpy as np
316
+ >>> from scipy.sparse import csr_array
317
+ >>> csr_array((3, 4), dtype=np.int8).toarray()
318
+ array([[0, 0, 0, 0],
319
+ [0, 0, 0, 0],
320
+ [0, 0, 0, 0]], dtype=int8)
321
+
322
+ >>> row = np.array([0, 0, 1, 2, 2, 2])
323
+ >>> col = np.array([0, 2, 2, 0, 1, 2])
324
+ >>> data = np.array([1, 2, 3, 4, 5, 6])
325
+ >>> csr_array((data, (row, col)), shape=(3, 3)).toarray()
326
+ array([[1, 0, 2],
327
+ [0, 0, 3],
328
+ [4, 5, 6]])
329
+
330
+ >>> indptr = np.array([0, 2, 3, 6])
331
+ >>> indices = np.array([0, 2, 2, 0, 1, 2])
332
+ >>> data = np.array([1, 2, 3, 4, 5, 6])
333
+ >>> csr_array((data, indices, indptr), shape=(3, 3)).toarray()
334
+ array([[1, 0, 2],
335
+ [0, 0, 3],
336
+ [4, 5, 6]])
337
+
338
+ Duplicate entries are summed together:
339
+
340
+ >>> row = np.array([0, 1, 2, 0])
341
+ >>> col = np.array([0, 1, 1, 0])
342
+ >>> data = np.array([1, 2, 4, 8])
343
+ >>> csr_array((data, (row, col)), shape=(3, 3)).toarray()
344
+ array([[9, 0, 0],
345
+ [0, 2, 0],
346
+ [0, 4, 0]])
347
+
348
+ As an example of how to construct a CSR array incrementally,
349
+ the following snippet builds a term-document array from texts:
350
+
351
+ >>> docs = [["hello", "world", "hello"], ["goodbye", "cruel", "world"]]
352
+ >>> indptr = [0]
353
+ >>> indices = []
354
+ >>> data = []
355
+ >>> vocabulary = {}
356
+ >>> for d in docs:
357
+ ... for term in d:
358
+ ... index = vocabulary.setdefault(term, len(vocabulary))
359
+ ... indices.append(index)
360
+ ... data.append(1)
361
+ ... indptr.append(len(indices))
362
+ ...
363
+ >>> csr_array((data, indices, indptr), dtype=int).toarray()
364
+ array([[2, 1, 0, 0],
365
+ [0, 1, 1, 1]])
366
+
367
+ """
368
+
369
+
370
+ class csr_matrix(spmatrix, _csr_base):
371
+ """
372
+ Compressed Sparse Row matrix.
373
+
374
+ This can be instantiated in several ways:
375
+ csr_matrix(D)
376
+ where D is a 2-D ndarray
377
+
378
+ csr_matrix(S)
379
+ with another sparse array or matrix S (equivalent to S.tocsr())
380
+
381
+ csr_matrix((M, N), [dtype])
382
+ to construct an empty matrix with shape (M, N)
383
+ dtype is optional, defaulting to dtype='d'.
384
+
385
+ csr_matrix((data, (row_ind, col_ind)), [shape=(M, N)])
386
+ where ``data``, ``row_ind`` and ``col_ind`` satisfy the
387
+ relationship ``a[row_ind[k], col_ind[k]] = data[k]``.
388
+
389
+ csr_matrix((data, indices, indptr), [shape=(M, N)])
390
+ is the standard CSR representation where the column indices for
391
+ row i are stored in ``indices[indptr[i]:indptr[i+1]]`` and their
392
+ corresponding values are stored in ``data[indptr[i]:indptr[i+1]]``.
393
+ If the shape parameter is not supplied, the matrix dimensions
394
+ are inferred from the index arrays.
395
+
396
+ Attributes
397
+ ----------
398
+ dtype : dtype
399
+ Data type of the matrix
400
+ shape : 2-tuple
401
+ Shape of the matrix
402
+ ndim : int
403
+ Number of dimensions (this is always 2)
404
+ nnz
405
+ size
406
+ data
407
+ CSR format data array of the matrix
408
+ indices
409
+ CSR format index array of the matrix
410
+ indptr
411
+ CSR format index pointer array of the matrix
412
+ has_sorted_indices
413
+ has_canonical_format
414
+ T
415
+
416
+ Notes
417
+ -----
418
+
419
+ Sparse matrices can be used in arithmetic operations: they support
420
+ addition, subtraction, multiplication, division, and matrix power.
421
+
422
+ Advantages of the CSR format
423
+ - efficient arithmetic operations CSR + CSR, CSR * CSR, etc.
424
+ - efficient row slicing
425
+ - fast matrix vector products
426
+
427
+ Disadvantages of the CSR format
428
+ - slow column slicing operations (consider CSC)
429
+ - changes to the sparsity structure are expensive (consider LIL or DOK)
430
+
431
+ Canonical Format
432
+ - Within each row, indices are sorted by column.
433
+ - There are no duplicate entries.
434
+
435
+ Examples
436
+ --------
437
+
438
+ >>> import numpy as np
439
+ >>> from scipy.sparse import csr_matrix
440
+ >>> csr_matrix((3, 4), dtype=np.int8).toarray()
441
+ array([[0, 0, 0, 0],
442
+ [0, 0, 0, 0],
443
+ [0, 0, 0, 0]], dtype=int8)
444
+
445
+ >>> row = np.array([0, 0, 1, 2, 2, 2])
446
+ >>> col = np.array([0, 2, 2, 0, 1, 2])
447
+ >>> data = np.array([1, 2, 3, 4, 5, 6])
448
+ >>> csr_matrix((data, (row, col)), shape=(3, 3)).toarray()
449
+ array([[1, 0, 2],
450
+ [0, 0, 3],
451
+ [4, 5, 6]])
452
+
453
+ >>> indptr = np.array([0, 2, 3, 6])
454
+ >>> indices = np.array([0, 2, 2, 0, 1, 2])
455
+ >>> data = np.array([1, 2, 3, 4, 5, 6])
456
+ >>> csr_matrix((data, indices, indptr), shape=(3, 3)).toarray()
457
+ array([[1, 0, 2],
458
+ [0, 0, 3],
459
+ [4, 5, 6]])
460
+
461
+ Duplicate entries are summed together:
462
+
463
+ >>> row = np.array([0, 1, 2, 0])
464
+ >>> col = np.array([0, 1, 1, 0])
465
+ >>> data = np.array([1, 2, 4, 8])
466
+ >>> csr_matrix((data, (row, col)), shape=(3, 3)).toarray()
467
+ array([[9, 0, 0],
468
+ [0, 2, 0],
469
+ [0, 4, 0]])
470
+
471
+ As an example of how to construct a CSR matrix incrementally,
472
+ the following snippet builds a term-document matrix from texts:
473
+
474
+ >>> docs = [["hello", "world", "hello"], ["goodbye", "cruel", "world"]]
475
+ >>> indptr = [0]
476
+ >>> indices = []
477
+ >>> data = []
478
+ >>> vocabulary = {}
479
+ >>> for d in docs:
480
+ ... for term in d:
481
+ ... index = vocabulary.setdefault(term, len(vocabulary))
482
+ ... indices.append(index)
483
+ ... data.append(1)
484
+ ... indptr.append(len(indices))
485
+ ...
486
+ >>> csr_matrix((data, indices, indptr), dtype=int).toarray()
487
+ array([[2, 1, 0, 0],
488
+ [0, 1, 1, 1]])
489
+
490
+ """
491
+
env-llmeval/lib/python3.10/site-packages/scipy/sparse/_data.py ADDED
@@ -0,0 +1,506 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Base class for sparse matrice with a .data attribute
2
+
3
+ subclasses must provide a _with_data() method that
4
+ creates a new matrix with the same sparsity pattern
5
+ as self but with a different data array
6
+
7
+ """
8
+
9
+ import numpy as np
10
+
11
+ from ._base import _spbase, _ufuncs_with_fixed_point_at_zero
12
+ from ._sputils import isscalarlike, validateaxis
13
+
14
+ __all__ = []
15
+
16
+
17
+ # TODO implement all relevant operations
18
+ # use .data.__methods__() instead of /=, *=, etc.
19
+ class _data_matrix(_spbase):
20
+ def __init__(self):
21
+ _spbase.__init__(self)
22
+
23
+ @property
24
+ def dtype(self):
25
+ return self.data.dtype
26
+
27
+ @dtype.setter
28
+ def dtype(self, newtype):
29
+ self.data.dtype = newtype
30
+
31
+ def _deduped_data(self):
32
+ if hasattr(self, 'sum_duplicates'):
33
+ self.sum_duplicates()
34
+ return self.data
35
+
36
+ def __abs__(self):
37
+ return self._with_data(abs(self._deduped_data()))
38
+
39
+ def __round__(self, ndigits=0):
40
+ return self._with_data(np.around(self._deduped_data(), decimals=ndigits))
41
+
42
+ def _real(self):
43
+ return self._with_data(self.data.real)
44
+
45
+ def _imag(self):
46
+ return self._with_data(self.data.imag)
47
+
48
+ def __neg__(self):
49
+ if self.dtype.kind == 'b':
50
+ raise NotImplementedError('negating a boolean sparse array is not '
51
+ 'supported')
52
+ return self._with_data(-self.data)
53
+
54
+ def __imul__(self, other): # self *= other
55
+ if isscalarlike(other):
56
+ self.data *= other
57
+ return self
58
+ else:
59
+ return NotImplemented
60
+
61
+ def __itruediv__(self, other): # self /= other
62
+ if isscalarlike(other):
63
+ recip = 1.0 / other
64
+ self.data *= recip
65
+ return self
66
+ else:
67
+ return NotImplemented
68
+
69
+ def astype(self, dtype, casting='unsafe', copy=True):
70
+ dtype = np.dtype(dtype)
71
+ if self.dtype != dtype:
72
+ matrix = self._with_data(
73
+ self.data.astype(dtype, casting=casting, copy=True),
74
+ copy=True
75
+ )
76
+ return matrix._with_data(matrix._deduped_data(), copy=False)
77
+ elif copy:
78
+ return self.copy()
79
+ else:
80
+ return self
81
+
82
+ astype.__doc__ = _spbase.astype.__doc__
83
+
84
+ def conjugate(self, copy=True):
85
+ if np.issubdtype(self.dtype, np.complexfloating):
86
+ return self._with_data(self.data.conjugate(), copy=copy)
87
+ elif copy:
88
+ return self.copy()
89
+ else:
90
+ return self
91
+
92
+ conjugate.__doc__ = _spbase.conjugate.__doc__
93
+
94
+ def copy(self):
95
+ return self._with_data(self.data.copy(), copy=True)
96
+
97
+ copy.__doc__ = _spbase.copy.__doc__
98
+
99
+ def count_nonzero(self):
100
+ return np.count_nonzero(self._deduped_data())
101
+
102
+ count_nonzero.__doc__ = _spbase.count_nonzero.__doc__
103
+
104
+ def power(self, n, dtype=None):
105
+ """
106
+ This function performs element-wise power.
107
+
108
+ Parameters
109
+ ----------
110
+ n : scalar
111
+ n is a non-zero scalar (nonzero avoids dense ones creation)
112
+ If zero power is desired, special case it to use `np.ones`
113
+
114
+ dtype : If dtype is not specified, the current dtype will be preserved.
115
+
116
+ Raises
117
+ ------
118
+ NotImplementedError : if n is a zero scalar
119
+ If zero power is desired, special case it to use
120
+ `np.ones(A.shape, dtype=A.dtype)`
121
+ """
122
+ if not isscalarlike(n):
123
+ raise NotImplementedError("input is not scalar")
124
+ if not n:
125
+ raise NotImplementedError(
126
+ "zero power is not supported as it would densify the matrix.\n"
127
+ "Use `np.ones(A.shape, dtype=A.dtype)` for this case."
128
+ )
129
+
130
+ data = self._deduped_data()
131
+ if dtype is not None:
132
+ data = data.astype(dtype)
133
+ return self._with_data(data ** n)
134
+
135
+ ###########################
136
+ # Multiplication handlers #
137
+ ###########################
138
+
139
+ def _mul_scalar(self, other):
140
+ return self._with_data(self.data * other)
141
+
142
+
143
+ # Add the numpy unary ufuncs for which func(0) = 0 to _data_matrix.
144
+ for npfunc in _ufuncs_with_fixed_point_at_zero:
145
+ name = npfunc.__name__
146
+
147
+ def _create_method(op):
148
+ def method(self):
149
+ result = op(self._deduped_data())
150
+ return self._with_data(result, copy=True)
151
+
152
+ method.__doc__ = (f"Element-wise {name}.\n\n"
153
+ f"See `numpy.{name}` for more information.")
154
+ method.__name__ = name
155
+
156
+ return method
157
+
158
+ setattr(_data_matrix, name, _create_method(npfunc))
159
+
160
+
161
+ def _find_missing_index(ind, n):
162
+ for k, a in enumerate(ind):
163
+ if k != a:
164
+ return k
165
+
166
+ k += 1
167
+ if k < n:
168
+ return k
169
+ else:
170
+ return -1
171
+
172
+
173
+ class _minmax_mixin:
174
+ """Mixin for min and max methods.
175
+
176
+ These are not implemented for dia_matrix, hence the separate class.
177
+ """
178
+
179
+ def _min_or_max_axis(self, axis, min_or_max):
180
+ N = self.shape[axis]
181
+ if N == 0:
182
+ raise ValueError("zero-size array to reduction operation")
183
+ M = self.shape[1 - axis]
184
+ idx_dtype = self._get_index_dtype(maxval=M)
185
+
186
+ mat = self.tocsc() if axis == 0 else self.tocsr()
187
+ mat.sum_duplicates()
188
+
189
+ major_index, value = mat._minor_reduce(min_or_max)
190
+ not_full = np.diff(mat.indptr)[major_index] < N
191
+ value[not_full] = min_or_max(value[not_full], 0)
192
+
193
+ mask = value != 0
194
+ major_index = np.compress(mask, major_index)
195
+ value = np.compress(mask, value)
196
+
197
+ if axis == 0:
198
+ return self._coo_container(
199
+ (value, (np.zeros(len(value), dtype=idx_dtype), major_index)),
200
+ dtype=self.dtype, shape=(1, M)
201
+ )
202
+ else:
203
+ return self._coo_container(
204
+ (value, (major_index, np.zeros(len(value), dtype=idx_dtype))),
205
+ dtype=self.dtype, shape=(M, 1)
206
+ )
207
+
208
+ def _min_or_max(self, axis, out, min_or_max):
209
+ if out is not None:
210
+ raise ValueError("Sparse arrays do not support an 'out' parameter.")
211
+
212
+ validateaxis(axis)
213
+ if self.ndim == 1:
214
+ if axis not in (None, 0, -1):
215
+ raise ValueError("axis out of range")
216
+ axis = None # avoid calling special axis case. no impact on 1d
217
+
218
+ if axis is None:
219
+ if 0 in self.shape:
220
+ raise ValueError("zero-size array to reduction operation")
221
+
222
+ zero = self.dtype.type(0)
223
+ if self.nnz == 0:
224
+ return zero
225
+ m = min_or_max.reduce(self._deduped_data().ravel())
226
+ if self.nnz != np.prod(self.shape):
227
+ m = min_or_max(zero, m)
228
+ return m
229
+
230
+ if axis < 0:
231
+ axis += 2
232
+
233
+ if (axis == 0) or (axis == 1):
234
+ return self._min_or_max_axis(axis, min_or_max)
235
+ else:
236
+ raise ValueError("axis out of range")
237
+
238
+ def _arg_min_or_max_axis(self, axis, argmin_or_argmax, compare):
239
+ if self.shape[axis] == 0:
240
+ raise ValueError("Cannot apply the operation along a zero-sized dimension.")
241
+
242
+ if axis < 0:
243
+ axis += 2
244
+
245
+ zero = self.dtype.type(0)
246
+
247
+ mat = self.tocsc() if axis == 0 else self.tocsr()
248
+ mat.sum_duplicates()
249
+
250
+ ret_size, line_size = mat._swap(mat.shape)
251
+ ret = np.zeros(ret_size, dtype=int)
252
+
253
+ nz_lines, = np.nonzero(np.diff(mat.indptr))
254
+ for i in nz_lines:
255
+ p, q = mat.indptr[i:i + 2]
256
+ data = mat.data[p:q]
257
+ indices = mat.indices[p:q]
258
+ extreme_index = argmin_or_argmax(data)
259
+ extreme_value = data[extreme_index]
260
+ if compare(extreme_value, zero) or q - p == line_size:
261
+ ret[i] = indices[extreme_index]
262
+ else:
263
+ zero_ind = _find_missing_index(indices, line_size)
264
+ if extreme_value == zero:
265
+ ret[i] = min(extreme_index, zero_ind)
266
+ else:
267
+ ret[i] = zero_ind
268
+
269
+ if axis == 1:
270
+ ret = ret.reshape(-1, 1)
271
+
272
+ return self._ascontainer(ret)
273
+
274
+ def _arg_min_or_max(self, axis, out, argmin_or_argmax, compare):
275
+ if out is not None:
276
+ raise ValueError("Sparse types do not support an 'out' parameter.")
277
+
278
+ validateaxis(axis)
279
+
280
+ if self.ndim == 1:
281
+ if axis not in (None, 0, -1):
282
+ raise ValueError("axis out of range")
283
+ axis = None # avoid calling special axis case. no impact on 1d
284
+
285
+ if axis is not None:
286
+ return self._arg_min_or_max_axis(axis, argmin_or_argmax, compare)
287
+
288
+ if 0 in self.shape:
289
+ raise ValueError("Cannot apply the operation to an empty matrix.")
290
+
291
+ if self.nnz == 0:
292
+ return 0
293
+
294
+ zero = self.dtype.type(0)
295
+ mat = self.tocoo()
296
+ # Convert to canonical form: no duplicates, sorted indices.
297
+ mat.sum_duplicates()
298
+ extreme_index = argmin_or_argmax(mat.data)
299
+ extreme_value = mat.data[extreme_index]
300
+ num_col = mat.shape[-1]
301
+
302
+ # If the min value is less than zero, or max is greater than zero,
303
+ # then we do not need to worry about implicit zeros.
304
+ if compare(extreme_value, zero):
305
+ # cast to Python int to avoid overflow and RuntimeError
306
+ return int(mat.row[extreme_index]) * num_col + int(mat.col[extreme_index])
307
+
308
+ # Cheap test for the rare case where we have no implicit zeros.
309
+ size = np.prod(self.shape)
310
+ if size == mat.nnz:
311
+ return int(mat.row[extreme_index]) * num_col + int(mat.col[extreme_index])
312
+
313
+ # At this stage, any implicit zero could be the min or max value.
314
+ # After sum_duplicates(), the `row` and `col` arrays are guaranteed to
315
+ # be sorted in C-order, which means the linearized indices are sorted.
316
+ linear_indices = mat.row * num_col + mat.col
317
+ first_implicit_zero_index = _find_missing_index(linear_indices, size)
318
+ if extreme_value == zero:
319
+ return min(first_implicit_zero_index, extreme_index)
320
+ return first_implicit_zero_index
321
+
322
+ def max(self, axis=None, out=None):
323
+ """
324
+ Return the maximum of the array/matrix or maximum along an axis.
325
+ This takes all elements into account, not just the non-zero ones.
326
+
327
+ Parameters
328
+ ----------
329
+ axis : {-2, -1, 0, 1, None} optional
330
+ Axis along which the sum is computed. The default is to
331
+ compute the maximum over all elements, returning
332
+ a scalar (i.e., `axis` = `None`).
333
+
334
+ out : None, optional
335
+ This argument is in the signature *solely* for NumPy
336
+ compatibility reasons. Do not pass in anything except
337
+ for the default value, as this argument is not used.
338
+
339
+ Returns
340
+ -------
341
+ amax : coo_matrix or scalar
342
+ Maximum of `a`. If `axis` is None, the result is a scalar value.
343
+ If `axis` is given, the result is a sparse.coo_matrix of dimension
344
+ ``a.ndim - 1``.
345
+
346
+ See Also
347
+ --------
348
+ min : The minimum value of a sparse array/matrix along a given axis.
349
+ numpy.matrix.max : NumPy's implementation of 'max' for matrices
350
+
351
+ """
352
+ return self._min_or_max(axis, out, np.maximum)
353
+
354
+ def min(self, axis=None, out=None):
355
+ """
356
+ Return the minimum of the array/matrix or maximum along an axis.
357
+ This takes all elements into account, not just the non-zero ones.
358
+
359
+ Parameters
360
+ ----------
361
+ axis : {-2, -1, 0, 1, None} optional
362
+ Axis along which the sum is computed. The default is to
363
+ compute the minimum over all elements, returning
364
+ a scalar (i.e., `axis` = `None`).
365
+
366
+ out : None, optional
367
+ This argument is in the signature *solely* for NumPy
368
+ compatibility reasons. Do not pass in anything except for
369
+ the default value, as this argument is not used.
370
+
371
+ Returns
372
+ -------
373
+ amin : coo_matrix or scalar
374
+ Minimum of `a`. If `axis` is None, the result is a scalar value.
375
+ If `axis` is given, the result is a sparse.coo_matrix of dimension
376
+ ``a.ndim - 1``.
377
+
378
+ See Also
379
+ --------
380
+ max : The maximum value of a sparse array/matrix along a given axis.
381
+ numpy.matrix.min : NumPy's implementation of 'min' for matrices
382
+
383
+ """
384
+ return self._min_or_max(axis, out, np.minimum)
385
+
386
+ def nanmax(self, axis=None, out=None):
387
+ """
388
+ Return the maximum of the array/matrix or maximum along an axis, ignoring any
389
+ NaNs. This takes all elements into account, not just the non-zero
390
+ ones.
391
+
392
+ .. versionadded:: 1.11.0
393
+
394
+ Parameters
395
+ ----------
396
+ axis : {-2, -1, 0, 1, None} optional
397
+ Axis along which the maximum is computed. The default is to
398
+ compute the maximum over all elements, returning
399
+ a scalar (i.e., `axis` = `None`).
400
+
401
+ out : None, optional
402
+ This argument is in the signature *solely* for NumPy
403
+ compatibility reasons. Do not pass in anything except
404
+ for the default value, as this argument is not used.
405
+
406
+ Returns
407
+ -------
408
+ amax : coo_matrix or scalar
409
+ Maximum of `a`. If `axis` is None, the result is a scalar value.
410
+ If `axis` is given, the result is a sparse.coo_matrix of dimension
411
+ ``a.ndim - 1``.
412
+
413
+ See Also
414
+ --------
415
+ nanmin : The minimum value of a sparse array/matrix along a given axis,
416
+ ignoring NaNs.
417
+ max : The maximum value of a sparse array/matrix along a given axis,
418
+ propagating NaNs.
419
+ numpy.nanmax : NumPy's implementation of 'nanmax'.
420
+
421
+ """
422
+ return self._min_or_max(axis, out, np.fmax)
423
+
424
+ def nanmin(self, axis=None, out=None):
425
+ """
426
+ Return the minimum of the array/matrix or minimum along an axis, ignoring any
427
+ NaNs. This takes all elements into account, not just the non-zero
428
+ ones.
429
+
430
+ .. versionadded:: 1.11.0
431
+
432
+ Parameters
433
+ ----------
434
+ axis : {-2, -1, 0, 1, None} optional
435
+ Axis along which the minimum is computed. The default is to
436
+ compute the minimum over all elements, returning
437
+ a scalar (i.e., `axis` = `None`).
438
+
439
+ out : None, optional
440
+ This argument is in the signature *solely* for NumPy
441
+ compatibility reasons. Do not pass in anything except for
442
+ the default value, as this argument is not used.
443
+
444
+ Returns
445
+ -------
446
+ amin : coo_matrix or scalar
447
+ Minimum of `a`. If `axis` is None, the result is a scalar value.
448
+ If `axis` is given, the result is a sparse.coo_matrix of dimension
449
+ ``a.ndim - 1``.
450
+
451
+ See Also
452
+ --------
453
+ nanmax : The maximum value of a sparse array/matrix along a given axis,
454
+ ignoring NaNs.
455
+ min : The minimum value of a sparse array/matrix along a given axis,
456
+ propagating NaNs.
457
+ numpy.nanmin : NumPy's implementation of 'nanmin'.
458
+
459
+ """
460
+ return self._min_or_max(axis, out, np.fmin)
461
+
462
+ def argmax(self, axis=None, out=None):
463
+ """Return indices of maximum elements along an axis.
464
+
465
+ Implicit zero elements are also taken into account. If there are
466
+ several maximum values, the index of the first occurrence is returned.
467
+
468
+ Parameters
469
+ ----------
470
+ axis : {-2, -1, 0, 1, None}, optional
471
+ Axis along which the argmax is computed. If None (default), index
472
+ of the maximum element in the flatten data is returned.
473
+ out : None, optional
474
+ This argument is in the signature *solely* for NumPy
475
+ compatibility reasons. Do not pass in anything except for
476
+ the default value, as this argument is not used.
477
+
478
+ Returns
479
+ -------
480
+ ind : numpy.matrix or int
481
+ Indices of maximum elements. If matrix, its size along `axis` is 1.
482
+ """
483
+ return self._arg_min_or_max(axis, out, np.argmax, np.greater)
484
+
485
+ def argmin(self, axis=None, out=None):
486
+ """Return indices of minimum elements along an axis.
487
+
488
+ Implicit zero elements are also taken into account. If there are
489
+ several minimum values, the index of the first occurrence is returned.
490
+
491
+ Parameters
492
+ ----------
493
+ axis : {-2, -1, 0, 1, None}, optional
494
+ Axis along which the argmin is computed. If None (default), index
495
+ of the minimum element in the flatten data is returned.
496
+ out : None, optional
497
+ This argument is in the signature *solely* for NumPy
498
+ compatibility reasons. Do not pass in anything except for
499
+ the default value, as this argument is not used.
500
+
501
+ Returns
502
+ -------
503
+ ind : numpy.matrix or int
504
+ Indices of minimum elements. If matrix, its size along `axis` is 1.
505
+ """
506
+ return self._arg_min_or_max(axis, out, np.argmin, np.less)
env-llmeval/lib/python3.10/site-packages/scipy/sparse/_dia.py ADDED
@@ -0,0 +1,563 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Sparse DIAgonal format"""
2
+
3
+ __docformat__ = "restructuredtext en"
4
+
5
+ __all__ = ['dia_array', 'dia_matrix', 'isspmatrix_dia']
6
+
7
+ import numpy as np
8
+
9
+ from .._lib._util import copy_if_needed
10
+ from ._matrix import spmatrix
11
+ from ._base import issparse, _formats, _spbase, sparray
12
+ from ._data import _data_matrix
13
+ from ._sputils import (
14
+ isshape, upcast_char, getdtype, get_sum_dtype, validateaxis, check_shape
15
+ )
16
+ from ._sparsetools import dia_matvec
17
+
18
+
19
+ class _dia_base(_data_matrix):
20
+ _format = 'dia'
21
+
22
+ def __init__(self, arg1, shape=None, dtype=None, copy=False):
23
+ _data_matrix.__init__(self)
24
+
25
+ if issparse(arg1):
26
+ if arg1.format == "dia":
27
+ if copy:
28
+ arg1 = arg1.copy()
29
+ self.data = arg1.data
30
+ self.offsets = arg1.offsets
31
+ self._shape = check_shape(arg1.shape)
32
+ else:
33
+ if arg1.format == self.format and copy:
34
+ A = arg1.copy()
35
+ else:
36
+ A = arg1.todia()
37
+ self.data = A.data
38
+ self.offsets = A.offsets
39
+ self._shape = check_shape(A.shape)
40
+ elif isinstance(arg1, tuple):
41
+ if isshape(arg1):
42
+ # It's a tuple of matrix dimensions (M, N)
43
+ # create empty matrix
44
+ self._shape = check_shape(arg1)
45
+ self.data = np.zeros((0,0), getdtype(dtype, default=float))
46
+ idx_dtype = self._get_index_dtype(maxval=max(self.shape))
47
+ self.offsets = np.zeros((0), dtype=idx_dtype)
48
+ else:
49
+ try:
50
+ # Try interpreting it as (data, offsets)
51
+ data, offsets = arg1
52
+ except Exception as e:
53
+ message = 'unrecognized form for dia_array constructor'
54
+ raise ValueError(message) from e
55
+ else:
56
+ if shape is None:
57
+ raise ValueError('expected a shape argument')
58
+ if not copy:
59
+ copy = copy_if_needed
60
+ self.data = np.atleast_2d(np.array(arg1[0], dtype=dtype, copy=copy))
61
+ offsets = np.array(arg1[1],
62
+ dtype=self._get_index_dtype(maxval=max(shape)),
63
+ copy=copy)
64
+ self.offsets = np.atleast_1d(offsets)
65
+ self._shape = check_shape(shape)
66
+ else:
67
+ #must be dense, convert to COO first, then to DIA
68
+ try:
69
+ arg1 = np.asarray(arg1)
70
+ except Exception as e:
71
+ raise ValueError("unrecognized form for"
72
+ " %s_matrix constructor" % self.format) from e
73
+ A = self._coo_container(arg1, dtype=dtype, shape=shape).todia()
74
+ self.data = A.data
75
+ self.offsets = A.offsets
76
+ self._shape = check_shape(A.shape)
77
+
78
+ if dtype is not None:
79
+ self.data = self.data.astype(dtype)
80
+
81
+ #check format
82
+ if self.offsets.ndim != 1:
83
+ raise ValueError('offsets array must have rank 1')
84
+
85
+ if self.data.ndim != 2:
86
+ raise ValueError('data array must have rank 2')
87
+
88
+ if self.data.shape[0] != len(self.offsets):
89
+ raise ValueError('number of diagonals (%d) '
90
+ 'does not match the number of offsets (%d)'
91
+ % (self.data.shape[0], len(self.offsets)))
92
+
93
+ if len(np.unique(self.offsets)) != len(self.offsets):
94
+ raise ValueError('offset array contains duplicate values')
95
+
96
+ def __repr__(self):
97
+ _, fmt = _formats[self.format]
98
+ sparse_cls = 'array' if isinstance(self, sparray) else 'matrix'
99
+ shape_str = 'x'.join(str(x) for x in self.shape)
100
+ ndiag = self.data.shape[0]
101
+ return (
102
+ f"<{shape_str} sparse {sparse_cls} of type '{self.dtype.type}'\n"
103
+ f"\twith {self.nnz} stored elements ({ndiag} diagonals) in {fmt} format>"
104
+ )
105
+
106
+ def _data_mask(self):
107
+ """Returns a mask of the same shape as self.data, where
108
+ mask[i,j] is True when data[i,j] corresponds to a stored element."""
109
+ num_rows, num_cols = self.shape
110
+ offset_inds = np.arange(self.data.shape[1])
111
+ row = offset_inds - self.offsets[:,None]
112
+ mask = (row >= 0)
113
+ mask &= (row < num_rows)
114
+ mask &= (offset_inds < num_cols)
115
+ return mask
116
+
117
+ def count_nonzero(self):
118
+ mask = self._data_mask()
119
+ return np.count_nonzero(self.data[mask])
120
+
121
+ def _getnnz(self, axis=None):
122
+ if axis is not None:
123
+ raise NotImplementedError("_getnnz over an axis is not implemented "
124
+ "for DIA format")
125
+ M,N = self.shape
126
+ nnz = 0
127
+ for k in self.offsets:
128
+ if k > 0:
129
+ nnz += min(M,N-k)
130
+ else:
131
+ nnz += min(M+k,N)
132
+ return int(nnz)
133
+
134
+ _getnnz.__doc__ = _spbase._getnnz.__doc__
135
+ count_nonzero.__doc__ = _spbase.count_nonzero.__doc__
136
+
137
+ def sum(self, axis=None, dtype=None, out=None):
138
+ validateaxis(axis)
139
+
140
+ if axis is not None and axis < 0:
141
+ axis += 2
142
+
143
+ res_dtype = get_sum_dtype(self.dtype)
144
+ num_rows, num_cols = self.shape
145
+ ret = None
146
+
147
+ if axis == 0:
148
+ mask = self._data_mask()
149
+ x = (self.data * mask).sum(axis=0)
150
+ if x.shape[0] == num_cols:
151
+ res = x
152
+ else:
153
+ res = np.zeros(num_cols, dtype=x.dtype)
154
+ res[:x.shape[0]] = x
155
+ ret = self._ascontainer(res, dtype=res_dtype)
156
+
157
+ else:
158
+ row_sums = np.zeros((num_rows, 1), dtype=res_dtype)
159
+ one = np.ones(num_cols, dtype=res_dtype)
160
+ dia_matvec(num_rows, num_cols, len(self.offsets),
161
+ self.data.shape[1], self.offsets, self.data, one, row_sums)
162
+
163
+ row_sums = self._ascontainer(row_sums)
164
+
165
+ if axis is None:
166
+ return row_sums.sum(dtype=dtype, out=out)
167
+
168
+ ret = self._ascontainer(row_sums.sum(axis=axis))
169
+
170
+ if out is not None and out.shape != ret.shape:
171
+ raise ValueError("dimensions do not match")
172
+
173
+ return ret.sum(axis=(), dtype=dtype, out=out)
174
+
175
+ sum.__doc__ = _spbase.sum.__doc__
176
+
177
+ def _add_sparse(self, other):
178
+
179
+ # Check if other is also of type dia_array
180
+ if not isinstance(other, type(self)):
181
+ # If other is not of type dia_array, default to
182
+ # converting to csr_matrix, as is done in the _add_sparse
183
+ # method of parent class _spbase
184
+ return self.tocsr()._add_sparse(other)
185
+
186
+ # The task is to compute m = self + other
187
+ # Start by making a copy of self, of the datatype
188
+ # that should result from adding self and other
189
+ dtype = np.promote_types(self.dtype, other.dtype)
190
+ m = self.astype(dtype, copy=True)
191
+
192
+ # Then, add all the stored diagonals of other.
193
+ for d in other.offsets:
194
+ # Check if the diagonal has already been added.
195
+ if d in m.offsets:
196
+ # If the diagonal is already there, we need to take
197
+ # the sum of the existing and the new
198
+ m.setdiag(m.diagonal(d) + other.diagonal(d), d)
199
+ else:
200
+ m.setdiag(other.diagonal(d), d)
201
+ return m
202
+
203
+ def _matmul_vector(self, other):
204
+ x = other
205
+
206
+ y = np.zeros(self.shape[0], dtype=upcast_char(self.dtype.char,
207
+ x.dtype.char))
208
+
209
+ L = self.data.shape[1]
210
+
211
+ M,N = self.shape
212
+
213
+ dia_matvec(M,N, len(self.offsets), L, self.offsets, self.data,
214
+ x.ravel(), y.ravel())
215
+
216
+ return y
217
+
218
+ def _setdiag(self, values, k=0):
219
+ M, N = self.shape
220
+
221
+ if values.ndim == 0:
222
+ # broadcast
223
+ values_n = np.inf
224
+ else:
225
+ values_n = len(values)
226
+
227
+ if k < 0:
228
+ n = min(M + k, N, values_n)
229
+ min_index = 0
230
+ max_index = n
231
+ else:
232
+ n = min(M, N - k, values_n)
233
+ min_index = k
234
+ max_index = k + n
235
+
236
+ if values.ndim != 0:
237
+ # allow also longer sequences
238
+ values = values[:n]
239
+
240
+ data_rows, data_cols = self.data.shape
241
+ if k in self.offsets:
242
+ if max_index > data_cols:
243
+ data = np.zeros((data_rows, max_index), dtype=self.data.dtype)
244
+ data[:, :data_cols] = self.data
245
+ self.data = data
246
+ self.data[self.offsets == k, min_index:max_index] = values
247
+ else:
248
+ self.offsets = np.append(self.offsets, self.offsets.dtype.type(k))
249
+ m = max(max_index, data_cols)
250
+ data = np.zeros((data_rows + 1, m), dtype=self.data.dtype)
251
+ data[:-1, :data_cols] = self.data
252
+ data[-1, min_index:max_index] = values
253
+ self.data = data
254
+
255
+ def todia(self, copy=False):
256
+ if copy:
257
+ return self.copy()
258
+ else:
259
+ return self
260
+
261
+ todia.__doc__ = _spbase.todia.__doc__
262
+
263
+ def transpose(self, axes=None, copy=False):
264
+ if axes is not None and axes != (1, 0):
265
+ raise ValueError("Sparse arrays/matrices do not support "
266
+ "an 'axes' parameter because swapping "
267
+ "dimensions is the only logical permutation.")
268
+
269
+ num_rows, num_cols = self.shape
270
+ max_dim = max(self.shape)
271
+
272
+ # flip diagonal offsets
273
+ offsets = -self.offsets
274
+
275
+ # re-align the data matrix
276
+ r = np.arange(len(offsets), dtype=np.intc)[:, None]
277
+ c = np.arange(num_rows, dtype=np.intc) - (offsets % max_dim)[:, None]
278
+ pad_amount = max(0, max_dim-self.data.shape[1])
279
+ data = np.hstack((self.data, np.zeros((self.data.shape[0], pad_amount),
280
+ dtype=self.data.dtype)))
281
+ data = data[r, c]
282
+ return self._dia_container((data, offsets), shape=(
283
+ num_cols, num_rows), copy=copy)
284
+
285
+ transpose.__doc__ = _spbase.transpose.__doc__
286
+
287
+ def diagonal(self, k=0):
288
+ rows, cols = self.shape
289
+ if k <= -rows or k >= cols:
290
+ return np.empty(0, dtype=self.data.dtype)
291
+ idx, = np.nonzero(self.offsets == k)
292
+ first_col = max(0, k)
293
+ last_col = min(rows + k, cols)
294
+ result_size = last_col - first_col
295
+ if idx.size == 0:
296
+ return np.zeros(result_size, dtype=self.data.dtype)
297
+ result = self.data[idx[0], first_col:last_col]
298
+ padding = result_size - len(result)
299
+ if padding > 0:
300
+ result = np.pad(result, (0, padding), mode='constant')
301
+ return result
302
+
303
+ diagonal.__doc__ = _spbase.diagonal.__doc__
304
+
305
+ def tocsc(self, copy=False):
306
+ if self.nnz == 0:
307
+ return self._csc_container(self.shape, dtype=self.dtype)
308
+
309
+ num_rows, num_cols = self.shape
310
+ num_offsets, offset_len = self.data.shape
311
+ offset_inds = np.arange(offset_len)
312
+
313
+ row = offset_inds - self.offsets[:,None]
314
+ mask = (row >= 0)
315
+ mask &= (row < num_rows)
316
+ mask &= (offset_inds < num_cols)
317
+ mask &= (self.data != 0)
318
+
319
+ idx_dtype = self._get_index_dtype(maxval=max(self.shape))
320
+ indptr = np.zeros(num_cols + 1, dtype=idx_dtype)
321
+ indptr[1:offset_len+1] = np.cumsum(mask.sum(axis=0)[:num_cols])
322
+ if offset_len < num_cols:
323
+ indptr[offset_len+1:] = indptr[offset_len]
324
+ indices = row.T[mask.T].astype(idx_dtype, copy=False)
325
+ data = self.data.T[mask.T]
326
+ return self._csc_container((data, indices, indptr), shape=self.shape,
327
+ dtype=self.dtype)
328
+
329
+ tocsc.__doc__ = _spbase.tocsc.__doc__
330
+
331
+ def tocoo(self, copy=False):
332
+ num_rows, num_cols = self.shape
333
+ num_offsets, offset_len = self.data.shape
334
+ offset_inds = np.arange(offset_len)
335
+
336
+ row = offset_inds - self.offsets[:,None]
337
+ mask = (row >= 0)
338
+ mask &= (row < num_rows)
339
+ mask &= (offset_inds < num_cols)
340
+ mask &= (self.data != 0)
341
+ row = row[mask]
342
+ col = np.tile(offset_inds, num_offsets)[mask.ravel()]
343
+ idx_dtype = self._get_index_dtype(
344
+ arrays=(self.offsets,), maxval=max(self.shape)
345
+ )
346
+ row = row.astype(idx_dtype, copy=False)
347
+ col = col.astype(idx_dtype, copy=False)
348
+ data = self.data[mask]
349
+ # Note: this cannot set has_canonical_format=True, because despite the
350
+ # lack of duplicates, we do not generate sorted indices.
351
+ return self._coo_container(
352
+ (data, (row, col)), shape=self.shape, dtype=self.dtype, copy=False
353
+ )
354
+
355
+ tocoo.__doc__ = _spbase.tocoo.__doc__
356
+
357
+ # needed by _data_matrix
358
+ def _with_data(self, data, copy=True):
359
+ """Returns a matrix with the same sparsity structure as self,
360
+ but with different data. By default the structure arrays are copied.
361
+ """
362
+ if copy:
363
+ return self._dia_container(
364
+ (data, self.offsets.copy()), shape=self.shape
365
+ )
366
+ else:
367
+ return self._dia_container(
368
+ (data, self.offsets), shape=self.shape
369
+ )
370
+
371
+ def resize(self, *shape):
372
+ shape = check_shape(shape)
373
+ M, N = shape
374
+ # we do not need to handle the case of expanding N
375
+ self.data = self.data[:, :N]
376
+
377
+ if (M > self.shape[0] and
378
+ np.any(self.offsets + self.shape[0] < self.data.shape[1])):
379
+ # explicitly clear values that were previously hidden
380
+ mask = (self.offsets[:, None] + self.shape[0] <=
381
+ np.arange(self.data.shape[1]))
382
+ self.data[mask] = 0
383
+
384
+ self._shape = shape
385
+
386
+ resize.__doc__ = _spbase.resize.__doc__
387
+
388
+
389
+ def isspmatrix_dia(x):
390
+ """Is `x` of dia_matrix type?
391
+
392
+ Parameters
393
+ ----------
394
+ x
395
+ object to check for being a dia matrix
396
+
397
+ Returns
398
+ -------
399
+ bool
400
+ True if `x` is a dia matrix, False otherwise
401
+
402
+ Examples
403
+ --------
404
+ >>> from scipy.sparse import dia_array, dia_matrix, coo_matrix, isspmatrix_dia
405
+ >>> isspmatrix_dia(dia_matrix([[5]]))
406
+ True
407
+ >>> isspmatrix_dia(dia_array([[5]]))
408
+ False
409
+ >>> isspmatrix_dia(coo_matrix([[5]]))
410
+ False
411
+ """
412
+ return isinstance(x, dia_matrix)
413
+
414
+
415
+ # This namespace class separates array from matrix with isinstance
416
+ class dia_array(_dia_base, sparray):
417
+ """
418
+ Sparse array with DIAgonal storage.
419
+
420
+ This can be instantiated in several ways:
421
+ dia_array(D)
422
+ where D is a 2-D ndarray
423
+
424
+ dia_array(S)
425
+ with another sparse array or matrix S (equivalent to S.todia())
426
+
427
+ dia_array((M, N), [dtype])
428
+ to construct an empty array with shape (M, N),
429
+ dtype is optional, defaulting to dtype='d'.
430
+
431
+ dia_array((data, offsets), shape=(M, N))
432
+ where the ``data[k,:]`` stores the diagonal entries for
433
+ diagonal ``offsets[k]`` (See example below)
434
+
435
+ Attributes
436
+ ----------
437
+ dtype : dtype
438
+ Data type of the array
439
+ shape : 2-tuple
440
+ Shape of the array
441
+ ndim : int
442
+ Number of dimensions (this is always 2)
443
+ nnz
444
+ size
445
+ data
446
+ DIA format data array of the array
447
+ offsets
448
+ DIA format offset array of the array
449
+ T
450
+
451
+ Notes
452
+ -----
453
+
454
+ Sparse arrays can be used in arithmetic operations: they support
455
+ addition, subtraction, multiplication, division, and matrix power.
456
+
457
+ Examples
458
+ --------
459
+
460
+ >>> import numpy as np
461
+ >>> from scipy.sparse import dia_array
462
+ >>> dia_array((3, 4), dtype=np.int8).toarray()
463
+ array([[0, 0, 0, 0],
464
+ [0, 0, 0, 0],
465
+ [0, 0, 0, 0]], dtype=int8)
466
+
467
+ >>> data = np.array([[1, 2, 3, 4]]).repeat(3, axis=0)
468
+ >>> offsets = np.array([0, -1, 2])
469
+ >>> dia_array((data, offsets), shape=(4, 4)).toarray()
470
+ array([[1, 0, 3, 0],
471
+ [1, 2, 0, 4],
472
+ [0, 2, 3, 0],
473
+ [0, 0, 3, 4]])
474
+
475
+ >>> from scipy.sparse import dia_array
476
+ >>> n = 10
477
+ >>> ex = np.ones(n)
478
+ >>> data = np.array([ex, 2 * ex, ex])
479
+ >>> offsets = np.array([-1, 0, 1])
480
+ >>> dia_array((data, offsets), shape=(n, n)).toarray()
481
+ array([[2., 1., 0., ..., 0., 0., 0.],
482
+ [1., 2., 1., ..., 0., 0., 0.],
483
+ [0., 1., 2., ..., 0., 0., 0.],
484
+ ...,
485
+ [0., 0., 0., ..., 2., 1., 0.],
486
+ [0., 0., 0., ..., 1., 2., 1.],
487
+ [0., 0., 0., ..., 0., 1., 2.]])
488
+ """
489
+
490
+
491
+ class dia_matrix(spmatrix, _dia_base):
492
+ """
493
+ Sparse matrix with DIAgonal storage.
494
+
495
+ This can be instantiated in several ways:
496
+ dia_matrix(D)
497
+ where D is a 2-D ndarray
498
+
499
+ dia_matrix(S)
500
+ with another sparse array or matrix S (equivalent to S.todia())
501
+
502
+ dia_matrix((M, N), [dtype])
503
+ to construct an empty matrix with shape (M, N),
504
+ dtype is optional, defaulting to dtype='d'.
505
+
506
+ dia_matrix((data, offsets), shape=(M, N))
507
+ where the ``data[k,:]`` stores the diagonal entries for
508
+ diagonal ``offsets[k]`` (See example below)
509
+
510
+ Attributes
511
+ ----------
512
+ dtype : dtype
513
+ Data type of the matrix
514
+ shape : 2-tuple
515
+ Shape of the matrix
516
+ ndim : int
517
+ Number of dimensions (this is always 2)
518
+ nnz
519
+ size
520
+ data
521
+ DIA format data array of the matrix
522
+ offsets
523
+ DIA format offset array of the matrix
524
+ T
525
+
526
+ Notes
527
+ -----
528
+
529
+ Sparse matrices can be used in arithmetic operations: they support
530
+ addition, subtraction, multiplication, division, and matrix power.
531
+
532
+ Examples
533
+ --------
534
+
535
+ >>> import numpy as np
536
+ >>> from scipy.sparse import dia_matrix
537
+ >>> dia_matrix((3, 4), dtype=np.int8).toarray()
538
+ array([[0, 0, 0, 0],
539
+ [0, 0, 0, 0],
540
+ [0, 0, 0, 0]], dtype=int8)
541
+
542
+ >>> data = np.array([[1, 2, 3, 4]]).repeat(3, axis=0)
543
+ >>> offsets = np.array([0, -1, 2])
544
+ >>> dia_matrix((data, offsets), shape=(4, 4)).toarray()
545
+ array([[1, 0, 3, 0],
546
+ [1, 2, 0, 4],
547
+ [0, 2, 3, 0],
548
+ [0, 0, 3, 4]])
549
+
550
+ >>> from scipy.sparse import dia_matrix
551
+ >>> n = 10
552
+ >>> ex = np.ones(n)
553
+ >>> data = np.array([ex, 2 * ex, ex])
554
+ >>> offsets = np.array([-1, 0, 1])
555
+ >>> dia_matrix((data, offsets), shape=(n, n)).toarray()
556
+ array([[2., 1., 0., ..., 0., 0., 0.],
557
+ [1., 2., 1., ..., 0., 0., 0.],
558
+ [0., 1., 2., ..., 0., 0., 0.],
559
+ ...,
560
+ [0., 0., 0., ..., 2., 1., 0.],
561
+ [0., 0., 0., ..., 1., 2., 1.],
562
+ [0., 0., 0., ..., 0., 1., 2.]])
563
+ """
env-llmeval/lib/python3.10/site-packages/scipy/sparse/_dok.py ADDED
@@ -0,0 +1,672 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Dictionary Of Keys based matrix"""
2
+
3
+ __docformat__ = "restructuredtext en"
4
+
5
+ __all__ = ['dok_array', 'dok_matrix', 'isspmatrix_dok']
6
+
7
+ import itertools
8
+ import numpy as np
9
+
10
+ from ._matrix import spmatrix
11
+ from ._base import _spbase, sparray, issparse
12
+ from ._index import IndexMixin
13
+ from ._sputils import (isdense, getdtype, isshape, isintlike, isscalarlike,
14
+ upcast, upcast_scalar, check_shape)
15
+
16
+
17
+ class _dok_base(_spbase, IndexMixin, dict):
18
+ _format = 'dok'
19
+
20
+ def __init__(self, arg1, shape=None, dtype=None, copy=False):
21
+ _spbase.__init__(self)
22
+
23
+ is_array = isinstance(self, sparray)
24
+ if isinstance(arg1, tuple) and isshape(arg1, allow_1d=is_array):
25
+ self._shape = check_shape(arg1, allow_1d=is_array)
26
+ self._dict = {}
27
+ self.dtype = getdtype(dtype, default=float)
28
+ elif issparse(arg1): # Sparse ctor
29
+ if arg1.format == self.format:
30
+ arg1 = arg1.copy() if copy else arg1
31
+ else:
32
+ arg1 = arg1.todok()
33
+
34
+ if dtype is not None:
35
+ arg1 = arg1.astype(dtype, copy=False)
36
+
37
+ self._dict = arg1._dict
38
+ self._shape = check_shape(arg1.shape, allow_1d=is_array)
39
+ self.dtype = arg1.dtype
40
+ else: # Dense ctor
41
+ try:
42
+ arg1 = np.asarray(arg1)
43
+ except Exception as e:
44
+ raise TypeError('Invalid input format.') from e
45
+
46
+ if arg1.ndim > 2:
47
+ raise TypeError('Expected rank <=2 dense array or matrix.')
48
+
49
+ if arg1.ndim == 1:
50
+ if dtype is not None:
51
+ arg1 = arg1.astype(dtype)
52
+ self._dict = {i: v for i, v in enumerate(arg1) if v != 0}
53
+ self.dtype = arg1.dtype
54
+ else:
55
+ d = self._coo_container(arg1, dtype=dtype).todok()
56
+ self._dict = d._dict
57
+ self.dtype = d.dtype
58
+ self._shape = check_shape(arg1.shape, allow_1d=is_array)
59
+
60
+ def update(self, val):
61
+ # Prevent direct usage of update
62
+ raise NotImplementedError("Direct update to DOK sparse format is not allowed.")
63
+
64
+ def _getnnz(self, axis=None):
65
+ if axis is not None:
66
+ raise NotImplementedError(
67
+ "_getnnz over an axis is not implemented for DOK format."
68
+ )
69
+ return len(self._dict)
70
+
71
+ def count_nonzero(self):
72
+ return sum(x != 0 for x in self.values())
73
+
74
+ _getnnz.__doc__ = _spbase._getnnz.__doc__
75
+ count_nonzero.__doc__ = _spbase.count_nonzero.__doc__
76
+
77
+ def __len__(self):
78
+ return len(self._dict)
79
+
80
+ def __contains__(self, key):
81
+ return key in self._dict
82
+
83
+ def setdefault(self, key, default=None, /):
84
+ return self._dict.setdefault(key, default)
85
+
86
+ def __delitem__(self, key, /):
87
+ del self._dict[key]
88
+
89
+ def clear(self):
90
+ return self._dict.clear()
91
+
92
+ def pop(self, key, default=None, /):
93
+ return self._dict.pop(key, default)
94
+
95
+ def __reversed__(self):
96
+ raise TypeError("reversed is not defined for dok_array type")
97
+
98
+ def __or__(self, other):
99
+ type_names = f"{type(self).__name__} and {type(other).__name__}"
100
+ raise TypeError(f"unsupported operand type for |: {type_names}")
101
+
102
+ def __ror__(self, other):
103
+ type_names = f"{type(self).__name__} and {type(other).__name__}"
104
+ raise TypeError(f"unsupported operand type for |: {type_names}")
105
+
106
+ def __ior__(self, other):
107
+ type_names = f"{type(self).__name__} and {type(other).__name__}"
108
+ raise TypeError(f"unsupported operand type for |: {type_names}")
109
+
110
+ def popitem(self):
111
+ return self._dict.popitem()
112
+
113
+ def items(self):
114
+ return self._dict.items()
115
+
116
+ def keys(self):
117
+ return self._dict.keys()
118
+
119
+ def values(self):
120
+ return self._dict.values()
121
+
122
+ def get(self, key, default=0.0):
123
+ """This provides dict.get method functionality with type checking"""
124
+ if key in self._dict:
125
+ return self._dict[key]
126
+ if isintlike(key) and self.ndim == 1:
127
+ key = (key,)
128
+ if self.ndim != len(key):
129
+ raise IndexError(f'Index {key} length needs to match self.shape')
130
+ try:
131
+ for i in key:
132
+ assert isintlike(i)
133
+ except (AssertionError, TypeError, ValueError) as e:
134
+ raise IndexError('Index must be or consist of integers.') from e
135
+ key = tuple(i + M if i < 0 else i for i, M in zip(key, self.shape))
136
+ if any(i < 0 or i >= M for i, M in zip(key, self.shape)):
137
+ raise IndexError('Index out of bounds.')
138
+ if self.ndim == 1:
139
+ key = key[0]
140
+ return self._dict.get(key, default)
141
+
142
+ # override IndexMixin.__getitem__ for 1d case until fully implemented
143
+ def __getitem__(self, key):
144
+ if self.ndim == 2:
145
+ return super().__getitem__(key)
146
+
147
+ if isinstance(key, tuple) and len(key) == 1:
148
+ key = key[0]
149
+ INT_TYPES = (int, np.integer)
150
+ if isinstance(key, INT_TYPES):
151
+ if key < 0:
152
+ key += self.shape[-1]
153
+ if key < 0 or key >= self.shape[-1]:
154
+ raise IndexError('index value out of bounds')
155
+ return self._get_int(key)
156
+ else:
157
+ raise IndexError('array/slice index for 1d dok_array not yet supported')
158
+
159
+ # 1D get methods
160
+ def _get_int(self, idx):
161
+ return self._dict.get(idx, self.dtype.type(0))
162
+
163
+ # 2D get methods
164
+ def _get_intXint(self, row, col):
165
+ return self._dict.get((row, col), self.dtype.type(0))
166
+
167
+ def _get_intXslice(self, row, col):
168
+ return self._get_sliceXslice(slice(row, row + 1), col)
169
+
170
+ def _get_sliceXint(self, row, col):
171
+ return self._get_sliceXslice(row, slice(col, col + 1))
172
+
173
+ def _get_sliceXslice(self, row, col):
174
+ row_start, row_stop, row_step = row.indices(self.shape[0])
175
+ col_start, col_stop, col_step = col.indices(self.shape[1])
176
+ row_range = range(row_start, row_stop, row_step)
177
+ col_range = range(col_start, col_stop, col_step)
178
+ shape = (len(row_range), len(col_range))
179
+ # Switch paths only when advantageous
180
+ # (count the iterations in the loops, adjust for complexity)
181
+ if len(self) >= 2 * shape[0] * shape[1]:
182
+ # O(nr*nc) path: loop over <row x col>
183
+ return self._get_columnXarray(row_range, col_range)
184
+ # O(nnz) path: loop over entries of self
185
+ newdok = self._dok_container(shape, dtype=self.dtype)
186
+ for key in self.keys():
187
+ i, ri = divmod(int(key[0]) - row_start, row_step)
188
+ if ri != 0 or i < 0 or i >= shape[0]:
189
+ continue
190
+ j, rj = divmod(int(key[1]) - col_start, col_step)
191
+ if rj != 0 or j < 0 or j >= shape[1]:
192
+ continue
193
+ newdok._dict[i, j] = self._dict[key]
194
+ return newdok
195
+
196
+ def _get_intXarray(self, row, col):
197
+ col = col.squeeze()
198
+ return self._get_columnXarray([row], col)
199
+
200
+ def _get_arrayXint(self, row, col):
201
+ row = row.squeeze()
202
+ return self._get_columnXarray(row, [col])
203
+
204
+ def _get_sliceXarray(self, row, col):
205
+ row = list(range(*row.indices(self.shape[0])))
206
+ return self._get_columnXarray(row, col)
207
+
208
+ def _get_arrayXslice(self, row, col):
209
+ col = list(range(*col.indices(self.shape[1])))
210
+ return self._get_columnXarray(row, col)
211
+
212
+ def _get_columnXarray(self, row, col):
213
+ # outer indexing
214
+ newdok = self._dok_container((len(row), len(col)), dtype=self.dtype)
215
+
216
+ for i, r in enumerate(row):
217
+ for j, c in enumerate(col):
218
+ v = self._dict.get((r, c), 0)
219
+ if v:
220
+ newdok._dict[i, j] = v
221
+ return newdok
222
+
223
+ def _get_arrayXarray(self, row, col):
224
+ # inner indexing
225
+ i, j = map(np.atleast_2d, np.broadcast_arrays(row, col))
226
+ newdok = self._dok_container(i.shape, dtype=self.dtype)
227
+
228
+ for key in itertools.product(range(i.shape[0]), range(i.shape[1])):
229
+ v = self._dict.get((i[key], j[key]), 0)
230
+ if v:
231
+ newdok._dict[key] = v
232
+ return newdok
233
+
234
+ # override IndexMixin.__setitem__ for 1d case until fully implemented
235
+ def __setitem__(self, key, value):
236
+ if self.ndim == 2:
237
+ return super().__setitem__(key, value)
238
+
239
+ if isinstance(key, tuple) and len(key) == 1:
240
+ key = key[0]
241
+ INT_TYPES = (int, np.integer)
242
+ if isinstance(key, INT_TYPES):
243
+ if key < 0:
244
+ key += self.shape[-1]
245
+ if key < 0 or key >= self.shape[-1]:
246
+ raise IndexError('index value out of bounds')
247
+ return self._set_int(key, value)
248
+ else:
249
+ raise IndexError('array index for 1d dok_array not yet provided')
250
+
251
+ # 1D set methods
252
+ def _set_int(self, idx, x):
253
+ if x:
254
+ self._dict[idx] = x
255
+ elif idx in self._dict:
256
+ del self._dict[idx]
257
+
258
+ # 2D set methods
259
+ def _set_intXint(self, row, col, x):
260
+ key = (row, col)
261
+ if x:
262
+ self._dict[key] = x
263
+ elif key in self._dict:
264
+ del self._dict[key]
265
+
266
+ def _set_arrayXarray(self, row, col, x):
267
+ row = list(map(int, row.ravel()))
268
+ col = list(map(int, col.ravel()))
269
+ x = x.ravel()
270
+ self._dict.update(zip(zip(row, col), x))
271
+
272
+ for i in np.nonzero(x == 0)[0]:
273
+ key = (row[i], col[i])
274
+ if self._dict[key] == 0:
275
+ # may have been superseded by later update
276
+ del self._dict[key]
277
+
278
+ def __add__(self, other):
279
+ if isscalarlike(other):
280
+ res_dtype = upcast_scalar(self.dtype, other)
281
+ new = self._dok_container(self.shape, dtype=res_dtype)
282
+ # Add this scalar to each element.
283
+ for key in itertools.product(*[range(d) for d in self.shape]):
284
+ aij = self._dict.get(key, 0) + other
285
+ if aij:
286
+ new[key] = aij
287
+ elif issparse(other):
288
+ if other.shape != self.shape:
289
+ raise ValueError("Matrix dimensions are not equal.")
290
+ res_dtype = upcast(self.dtype, other.dtype)
291
+ new = self._dok_container(self.shape, dtype=res_dtype)
292
+ new._dict = self._dict.copy()
293
+ if other.format == "dok":
294
+ o_items = other.items()
295
+ else:
296
+ other = other.tocoo()
297
+ if self.ndim == 1:
298
+ o_items = zip(other.coords[0], other.data)
299
+ else:
300
+ o_items = zip(zip(*other.coords), other.data)
301
+ with np.errstate(over='ignore'):
302
+ new._dict.update((k, new[k] + v) for k, v in o_items)
303
+ elif isdense(other):
304
+ new = self.todense() + other
305
+ else:
306
+ return NotImplemented
307
+ return new
308
+
309
+ def __radd__(self, other):
310
+ return self + other # addition is comutative
311
+
312
+ def __neg__(self):
313
+ if self.dtype.kind == 'b':
314
+ raise NotImplementedError(
315
+ 'Negating a sparse boolean matrix is not supported.'
316
+ )
317
+ new = self._dok_container(self.shape, dtype=self.dtype)
318
+ new._dict.update((k, -v) for k, v in self.items())
319
+ return new
320
+
321
+ def _mul_scalar(self, other):
322
+ res_dtype = upcast_scalar(self.dtype, other)
323
+ # Multiply this scalar by every element.
324
+ new = self._dok_container(self.shape, dtype=res_dtype)
325
+ new._dict.update(((k, v * other) for k, v in self.items()))
326
+ return new
327
+
328
+ def _matmul_vector(self, other):
329
+ res_dtype = upcast(self.dtype, other.dtype)
330
+
331
+ # vector @ vector
332
+ if self.ndim == 1:
333
+ if issparse(other):
334
+ if other.format == "dok":
335
+ keys = self.keys() & other.keys()
336
+ else:
337
+ keys = self.keys() & other.tocoo().coords[0]
338
+ return res_dtype(sum(self._dict[k] * other._dict[k] for k in keys))
339
+ elif isdense(other):
340
+ return res_dtype(sum(other[k] * v for k, v in self.items()))
341
+ else:
342
+ return NotImplemented
343
+
344
+ # matrix @ vector
345
+ result = np.zeros(self.shape[0], dtype=res_dtype)
346
+ for (i, j), v in self.items():
347
+ result[i] += v * other[j]
348
+ return result
349
+
350
+ def _matmul_multivector(self, other):
351
+ result_dtype = upcast(self.dtype, other.dtype)
352
+ # vector @ multivector
353
+ if self.ndim == 1:
354
+ # works for other 1d or 2d
355
+ return sum(v * other[j] for j, v in self._dict.items())
356
+
357
+ # matrix @ multivector
358
+ M = self.shape[0]
359
+ new_shape = (M,) if other.ndim == 1 else (M, other.shape[1])
360
+ result = np.zeros(new_shape, dtype=result_dtype)
361
+ for (i, j), v in self.items():
362
+ result[i] += v * other[j]
363
+ return result
364
+
365
+ def __imul__(self, other):
366
+ if isscalarlike(other):
367
+ self._dict.update((k, v * other) for k, v in self.items())
368
+ return self
369
+ return NotImplemented
370
+
371
+ def __truediv__(self, other):
372
+ if isscalarlike(other):
373
+ res_dtype = upcast_scalar(self.dtype, other)
374
+ new = self._dok_container(self.shape, dtype=res_dtype)
375
+ new._dict.update(((k, v / other) for k, v in self.items()))
376
+ return new
377
+ return self.tocsr() / other
378
+
379
+ def __itruediv__(self, other):
380
+ if isscalarlike(other):
381
+ self._dict.update((k, v / other) for k, v in self.items())
382
+ return self
383
+ return NotImplemented
384
+
385
+ def __reduce__(self):
386
+ # this approach is necessary because __setstate__ is called after
387
+ # __setitem__ upon unpickling and since __init__ is not called there
388
+ # is no shape attribute hence it is not possible to unpickle it.
389
+ return dict.__reduce__(self)
390
+
391
+ def diagonal(self, k=0):
392
+ if self.ndim == 2:
393
+ return super().diagonal(k)
394
+ raise ValueError("diagonal requires two dimensions")
395
+
396
+ def transpose(self, axes=None, copy=False):
397
+ if self.ndim == 1:
398
+ return self.copy()
399
+
400
+ if axes is not None and axes != (1, 0):
401
+ raise ValueError(
402
+ "Sparse arrays/matrices do not support "
403
+ "an 'axes' parameter because swapping "
404
+ "dimensions is the only logical permutation."
405
+ )
406
+
407
+ M, N = self.shape
408
+ new = self._dok_container((N, M), dtype=self.dtype, copy=copy)
409
+ new._dict.update((((right, left), val) for (left, right), val in self.items()))
410
+ return new
411
+
412
+ transpose.__doc__ = _spbase.transpose.__doc__
413
+
414
+ def conjtransp(self):
415
+ """Return the conjugate transpose."""
416
+ if self.ndim == 1:
417
+ new = self.tocoo()
418
+ new.data = new.data.conjugate()
419
+ return new
420
+ M, N = self.shape
421
+ new = self._dok_container((N, M), dtype=self.dtype)
422
+ new._dict = {(right, left): np.conj(val) for (left, right), val in self.items()}
423
+ return new
424
+
425
+ def copy(self):
426
+ new = self._dok_container(self.shape, dtype=self.dtype)
427
+ new._dict.update(self._dict)
428
+ return new
429
+
430
+ copy.__doc__ = _spbase.copy.__doc__
431
+
432
+ @classmethod
433
+ def fromkeys(cls, iterable, value=1, /):
434
+ tmp = dict.fromkeys(iterable, value)
435
+ if isinstance(next(iter(tmp)), tuple):
436
+ shape = tuple(max(idx) + 1 for idx in zip(*tmp))
437
+ else:
438
+ shape = (max(tmp) + 1,)
439
+ result = cls(shape, dtype=type(value))
440
+ result._dict = tmp
441
+ return result
442
+
443
+ def tocoo(self, copy=False):
444
+ nnz = self.nnz
445
+ if nnz == 0:
446
+ return self._coo_container(self.shape, dtype=self.dtype)
447
+
448
+ idx_dtype = self._get_index_dtype(maxval=max(self.shape))
449
+ data = np.fromiter(self.values(), dtype=self.dtype, count=nnz)
450
+ # handle 1d keys specially b/c not a tuple
451
+ inds = zip(*self.keys()) if self.ndim > 1 else (self.keys(),)
452
+ coords = tuple(np.fromiter(ix, dtype=idx_dtype, count=nnz) for ix in inds)
453
+ A = self._coo_container((data, coords), shape=self.shape, dtype=self.dtype)
454
+ A.has_canonical_format = True
455
+ return A
456
+
457
+ tocoo.__doc__ = _spbase.tocoo.__doc__
458
+
459
+ def todok(self, copy=False):
460
+ if copy:
461
+ return self.copy()
462
+ return self
463
+
464
+ todok.__doc__ = _spbase.todok.__doc__
465
+
466
+ def tocsc(self, copy=False):
467
+ if self.ndim == 1:
468
+ raise NotImplementedError("tocsr() not valid for 1d sparse array")
469
+ return self.tocoo(copy=False).tocsc(copy=copy)
470
+
471
+ tocsc.__doc__ = _spbase.tocsc.__doc__
472
+
473
+ def resize(self, *shape):
474
+ is_array = isinstance(self, sparray)
475
+ shape = check_shape(shape, allow_1d=is_array)
476
+ if len(shape) != len(self.shape):
477
+ # TODO implement resize across dimensions
478
+ raise NotImplementedError
479
+
480
+ if self.ndim == 1:
481
+ newN = shape[-1]
482
+ for i in list(self._dict):
483
+ if i >= newN:
484
+ del self._dict[i]
485
+ self._shape = shape
486
+ return
487
+
488
+ newM, newN = shape
489
+ M, N = self.shape
490
+ if newM < M or newN < N:
491
+ # Remove all elements outside new dimensions
492
+ for i, j in list(self.keys()):
493
+ if i >= newM or j >= newN:
494
+ del self._dict[i, j]
495
+ self._shape = shape
496
+
497
+ resize.__doc__ = _spbase.resize.__doc__
498
+
499
+ # Added for 1d to avoid `tocsr` from _base.py
500
+ def astype(self, dtype, casting='unsafe', copy=True):
501
+ dtype = np.dtype(dtype)
502
+ if self.dtype != dtype:
503
+ result = self._dok_container(self.shape, dtype=dtype)
504
+ data = np.array(list(self._dict.values()), dtype=dtype)
505
+ result._dict = dict(zip(self._dict, data))
506
+ return result
507
+ elif copy:
508
+ return self.copy()
509
+ return self
510
+
511
+
512
+ def isspmatrix_dok(x):
513
+ """Is `x` of dok_array type?
514
+
515
+ Parameters
516
+ ----------
517
+ x
518
+ object to check for being a dok matrix
519
+
520
+ Returns
521
+ -------
522
+ bool
523
+ True if `x` is a dok matrix, False otherwise
524
+
525
+ Examples
526
+ --------
527
+ >>> from scipy.sparse import dok_array, dok_matrix, coo_matrix, isspmatrix_dok
528
+ >>> isspmatrix_dok(dok_matrix([[5]]))
529
+ True
530
+ >>> isspmatrix_dok(dok_array([[5]]))
531
+ False
532
+ >>> isspmatrix_dok(coo_matrix([[5]]))
533
+ False
534
+ """
535
+ return isinstance(x, dok_matrix)
536
+
537
+
538
+ # This namespace class separates array from matrix with isinstance
539
+ class dok_array(_dok_base, sparray):
540
+ """
541
+ Dictionary Of Keys based sparse array.
542
+
543
+ This is an efficient structure for constructing sparse
544
+ arrays incrementally.
545
+
546
+ This can be instantiated in several ways:
547
+ dok_array(D)
548
+ where D is a 2-D ndarray
549
+
550
+ dok_array(S)
551
+ with another sparse array or matrix S (equivalent to S.todok())
552
+
553
+ dok_array((M,N), [dtype])
554
+ create the array with initial shape (M,N)
555
+ dtype is optional, defaulting to dtype='d'
556
+
557
+ Attributes
558
+ ----------
559
+ dtype : dtype
560
+ Data type of the array
561
+ shape : 2-tuple
562
+ Shape of the array
563
+ ndim : int
564
+ Number of dimensions (this is always 2)
565
+ nnz
566
+ Number of nonzero elements
567
+ size
568
+ T
569
+
570
+ Notes
571
+ -----
572
+
573
+ Sparse arrays can be used in arithmetic operations: they support
574
+ addition, subtraction, multiplication, division, and matrix power.
575
+
576
+ - Allows for efficient O(1) access of individual elements.
577
+ - Duplicates are not allowed.
578
+ - Can be efficiently converted to a coo_array once constructed.
579
+
580
+ Examples
581
+ --------
582
+ >>> import numpy as np
583
+ >>> from scipy.sparse import dok_array
584
+ >>> S = dok_array((5, 5), dtype=np.float32)
585
+ >>> for i in range(5):
586
+ ... for j in range(5):
587
+ ... S[i, j] = i + j # Update element
588
+
589
+ """
590
+
591
+
592
+ class dok_matrix(spmatrix, _dok_base):
593
+ """
594
+ Dictionary Of Keys based sparse matrix.
595
+
596
+ This is an efficient structure for constructing sparse
597
+ matrices incrementally.
598
+
599
+ This can be instantiated in several ways:
600
+ dok_matrix(D)
601
+ where D is a 2-D ndarray
602
+
603
+ dok_matrix(S)
604
+ with another sparse array or matrix S (equivalent to S.todok())
605
+
606
+ dok_matrix((M,N), [dtype])
607
+ create the matrix with initial shape (M,N)
608
+ dtype is optional, defaulting to dtype='d'
609
+
610
+ Attributes
611
+ ----------
612
+ dtype : dtype
613
+ Data type of the matrix
614
+ shape : 2-tuple
615
+ Shape of the matrix
616
+ ndim : int
617
+ Number of dimensions (this is always 2)
618
+ nnz
619
+ Number of nonzero elements
620
+ size
621
+ T
622
+
623
+ Notes
624
+ -----
625
+
626
+ Sparse matrices can be used in arithmetic operations: they support
627
+ addition, subtraction, multiplication, division, and matrix power.
628
+
629
+ - Allows for efficient O(1) access of individual elements.
630
+ - Duplicates are not allowed.
631
+ - Can be efficiently converted to a coo_matrix once constructed.
632
+
633
+ Examples
634
+ --------
635
+ >>> import numpy as np
636
+ >>> from scipy.sparse import dok_matrix
637
+ >>> S = dok_matrix((5, 5), dtype=np.float32)
638
+ >>> for i in range(5):
639
+ ... for j in range(5):
640
+ ... S[i, j] = i + j # Update element
641
+
642
+ """
643
+
644
+ def set_shape(self, shape):
645
+ new_matrix = self.reshape(shape, copy=False).asformat(self.format)
646
+ self.__dict__ = new_matrix.__dict__
647
+
648
+ def get_shape(self):
649
+ """Get shape of a sparse matrix."""
650
+ return self._shape
651
+
652
+ shape = property(fget=get_shape, fset=set_shape)
653
+
654
+ def __reversed__(self):
655
+ return self._dict.__reversed__()
656
+
657
+ def __or__(self, other):
658
+ if isinstance(other, _dok_base):
659
+ return self._dict | other._dict
660
+ return self._dict | other
661
+
662
+ def __ror__(self, other):
663
+ if isinstance(other, _dok_base):
664
+ return self._dict | other._dict
665
+ return self._dict | other
666
+
667
+ def __ior__(self, other):
668
+ if isinstance(other, _dok_base):
669
+ self._dict |= other._dict
670
+ else:
671
+ self._dict |= other
672
+ return self
env-llmeval/lib/python3.10/site-packages/scipy/sparse/_index.py ADDED
@@ -0,0 +1,392 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Indexing mixin for sparse array/matrix classes.
2
+ """
3
+ from __future__ import annotations
4
+
5
+ from typing import TYPE_CHECKING
6
+
7
+ import numpy as np
8
+ from ._sputils import isintlike
9
+
10
+ if TYPE_CHECKING:
11
+ import numpy.typing as npt
12
+
13
+ INT_TYPES = (int, np.integer)
14
+
15
+
16
+ def _broadcast_arrays(a, b):
17
+ """
18
+ Same as np.broadcast_arrays(a, b) but old writeability rules.
19
+
20
+ NumPy >= 1.17.0 transitions broadcast_arrays to return
21
+ read-only arrays. Set writeability explicitly to avoid warnings.
22
+ Retain the old writeability rules, as our Cython code assumes
23
+ the old behavior.
24
+ """
25
+ x, y = np.broadcast_arrays(a, b)
26
+ x.flags.writeable = a.flags.writeable
27
+ y.flags.writeable = b.flags.writeable
28
+ return x, y
29
+
30
+
31
+ class IndexMixin:
32
+ """
33
+ This class provides common dispatching and validation logic for indexing.
34
+ """
35
+ def _raise_on_1d_array_slice(self):
36
+ """We do not currently support 1D sparse arrays.
37
+
38
+ This function is called each time that a 1D array would
39
+ result, raising an error instead.
40
+
41
+ Once 1D sparse arrays are implemented, it should be removed.
42
+ """
43
+ from scipy.sparse import sparray
44
+
45
+ if isinstance(self, sparray):
46
+ raise NotImplementedError(
47
+ 'We have not yet implemented 1D sparse slices; '
48
+ 'please index using explicit indices, e.g. `x[:, [0]]`'
49
+ )
50
+
51
+ def __getitem__(self, key):
52
+ row, col = self._validate_indices(key)
53
+
54
+ # Dispatch to specialized methods.
55
+ if isinstance(row, INT_TYPES):
56
+ if isinstance(col, INT_TYPES):
57
+ return self._get_intXint(row, col)
58
+ elif isinstance(col, slice):
59
+ self._raise_on_1d_array_slice()
60
+ return self._get_intXslice(row, col)
61
+ elif col.ndim == 1:
62
+ self._raise_on_1d_array_slice()
63
+ return self._get_intXarray(row, col)
64
+ elif col.ndim == 2:
65
+ return self._get_intXarray(row, col)
66
+ raise IndexError('index results in >2 dimensions')
67
+ elif isinstance(row, slice):
68
+ if isinstance(col, INT_TYPES):
69
+ self._raise_on_1d_array_slice()
70
+ return self._get_sliceXint(row, col)
71
+ elif isinstance(col, slice):
72
+ if row == slice(None) and row == col:
73
+ return self.copy()
74
+ return self._get_sliceXslice(row, col)
75
+ elif col.ndim == 1:
76
+ return self._get_sliceXarray(row, col)
77
+ raise IndexError('index results in >2 dimensions')
78
+ elif row.ndim == 1:
79
+ if isinstance(col, INT_TYPES):
80
+ self._raise_on_1d_array_slice()
81
+ return self._get_arrayXint(row, col)
82
+ elif isinstance(col, slice):
83
+ return self._get_arrayXslice(row, col)
84
+ else: # row.ndim == 2
85
+ if isinstance(col, INT_TYPES):
86
+ return self._get_arrayXint(row, col)
87
+ elif isinstance(col, slice):
88
+ raise IndexError('index results in >2 dimensions')
89
+ elif row.shape[1] == 1 and (col.ndim == 1 or col.shape[0] == 1):
90
+ # special case for outer indexing
91
+ return self._get_columnXarray(row[:,0], col.ravel())
92
+
93
+ # The only remaining case is inner (fancy) indexing
94
+ row, col = _broadcast_arrays(row, col)
95
+ if row.shape != col.shape:
96
+ raise IndexError('number of row and column indices differ')
97
+ if row.size == 0:
98
+ return self.__class__(np.atleast_2d(row).shape, dtype=self.dtype)
99
+ return self._get_arrayXarray(row, col)
100
+
101
+ def __setitem__(self, key, x):
102
+ row, col = self._validate_indices(key)
103
+
104
+ if isinstance(row, INT_TYPES) and isinstance(col, INT_TYPES):
105
+ x = np.asarray(x, dtype=self.dtype)
106
+ if x.size != 1:
107
+ raise ValueError('Trying to assign a sequence to an item')
108
+ self._set_intXint(row, col, x.flat[0])
109
+ return
110
+
111
+ if isinstance(row, slice):
112
+ row = np.arange(*row.indices(self.shape[0]))[:, None]
113
+ else:
114
+ row = np.atleast_1d(row)
115
+
116
+ if isinstance(col, slice):
117
+ col = np.arange(*col.indices(self.shape[1]))[None, :]
118
+ if row.ndim == 1:
119
+ row = row[:, None]
120
+ else:
121
+ col = np.atleast_1d(col)
122
+
123
+ i, j = _broadcast_arrays(row, col)
124
+ if i.shape != j.shape:
125
+ raise IndexError('number of row and column indices differ')
126
+
127
+ from ._base import issparse
128
+ if issparse(x):
129
+ if i.ndim == 1:
130
+ # Inner indexing, so treat them like row vectors.
131
+ i = i[None]
132
+ j = j[None]
133
+ broadcast_row = x.shape[0] == 1 and i.shape[0] != 1
134
+ broadcast_col = x.shape[1] == 1 and i.shape[1] != 1
135
+ if not ((broadcast_row or x.shape[0] == i.shape[0]) and
136
+ (broadcast_col or x.shape[1] == i.shape[1])):
137
+ raise ValueError('shape mismatch in assignment')
138
+ if x.shape[0] == 0 or x.shape[1] == 0:
139
+ return
140
+ x = x.tocoo(copy=True)
141
+ x.sum_duplicates()
142
+ self._set_arrayXarray_sparse(i, j, x)
143
+ else:
144
+ # Make x and i into the same shape
145
+ x = np.asarray(x, dtype=self.dtype)
146
+ if x.squeeze().shape != i.squeeze().shape:
147
+ x = np.broadcast_to(x, i.shape)
148
+ if x.size == 0:
149
+ return
150
+ x = x.reshape(i.shape)
151
+ self._set_arrayXarray(i, j, x)
152
+
153
+ def _validate_indices(self, key):
154
+ # First, check if indexing with single boolean matrix.
155
+ from ._base import _spbase
156
+ if (isinstance(key, (_spbase, np.ndarray)) and
157
+ key.ndim == 2 and key.dtype.kind == 'b'):
158
+ if key.shape != self.shape:
159
+ raise IndexError('boolean index shape does not match array shape')
160
+ row, col = key.nonzero()
161
+ else:
162
+ row, col = _unpack_index(key)
163
+ M, N = self.shape
164
+
165
+ def _validate_bool_idx(
166
+ idx: npt.NDArray[np.bool_],
167
+ axis_size: int,
168
+ axis_name: str
169
+ ) -> npt.NDArray[np.int_]:
170
+ if len(idx) != axis_size:
171
+ raise IndexError(
172
+ f"boolean {axis_name} index has incorrect length: {len(idx)} "
173
+ f"instead of {axis_size}"
174
+ )
175
+ return _boolean_index_to_array(idx)
176
+
177
+ if isintlike(row):
178
+ row = int(row)
179
+ if row < -M or row >= M:
180
+ raise IndexError('row index (%d) out of range' % row)
181
+ if row < 0:
182
+ row += M
183
+ elif (bool_row := _compatible_boolean_index(row)) is not None:
184
+ row = _validate_bool_idx(bool_row, M, "row")
185
+ elif not isinstance(row, slice):
186
+ row = self._asindices(row, M)
187
+
188
+ if isintlike(col):
189
+ col = int(col)
190
+ if col < -N or col >= N:
191
+ raise IndexError('column index (%d) out of range' % col)
192
+ if col < 0:
193
+ col += N
194
+ elif (bool_col := _compatible_boolean_index(col)) is not None:
195
+ col = _validate_bool_idx(bool_col, N, "column")
196
+ elif not isinstance(col, slice):
197
+ col = self._asindices(col, N)
198
+
199
+ return row, col
200
+
201
+ def _asindices(self, idx, length):
202
+ """Convert `idx` to a valid index for an axis with a given length.
203
+
204
+ Subclasses that need special validation can override this method.
205
+ """
206
+ try:
207
+ x = np.asarray(idx)
208
+ except (ValueError, TypeError, MemoryError) as e:
209
+ raise IndexError('invalid index') from e
210
+
211
+ if x.ndim not in (1, 2):
212
+ raise IndexError('Index dimension must be 1 or 2')
213
+
214
+ if x.size == 0:
215
+ return x
216
+
217
+ # Check bounds
218
+ max_indx = x.max()
219
+ if max_indx >= length:
220
+ raise IndexError('index (%d) out of range' % max_indx)
221
+
222
+ min_indx = x.min()
223
+ if min_indx < 0:
224
+ if min_indx < -length:
225
+ raise IndexError('index (%d) out of range' % min_indx)
226
+ if x is idx or not x.flags.owndata:
227
+ x = x.copy()
228
+ x[x < 0] += length
229
+ return x
230
+
231
+ def _getrow(self, i):
232
+ """Return a copy of row i of the matrix, as a (1 x n) row vector.
233
+ """
234
+ M, N = self.shape
235
+ i = int(i)
236
+ if i < -M or i >= M:
237
+ raise IndexError('index (%d) out of range' % i)
238
+ if i < 0:
239
+ i += M
240
+ return self._get_intXslice(i, slice(None))
241
+
242
+ def _getcol(self, i):
243
+ """Return a copy of column i of the matrix, as a (m x 1) column vector.
244
+ """
245
+ M, N = self.shape
246
+ i = int(i)
247
+ if i < -N or i >= N:
248
+ raise IndexError('index (%d) out of range' % i)
249
+ if i < 0:
250
+ i += N
251
+ return self._get_sliceXint(slice(None), i)
252
+
253
+ def _get_intXint(self, row, col):
254
+ raise NotImplementedError()
255
+
256
+ def _get_intXarray(self, row, col):
257
+ raise NotImplementedError()
258
+
259
+ def _get_intXslice(self, row, col):
260
+ raise NotImplementedError()
261
+
262
+ def _get_sliceXint(self, row, col):
263
+ raise NotImplementedError()
264
+
265
+ def _get_sliceXslice(self, row, col):
266
+ raise NotImplementedError()
267
+
268
+ def _get_sliceXarray(self, row, col):
269
+ raise NotImplementedError()
270
+
271
+ def _get_arrayXint(self, row, col):
272
+ raise NotImplementedError()
273
+
274
+ def _get_arrayXslice(self, row, col):
275
+ raise NotImplementedError()
276
+
277
+ def _get_columnXarray(self, row, col):
278
+ raise NotImplementedError()
279
+
280
+ def _get_arrayXarray(self, row, col):
281
+ raise NotImplementedError()
282
+
283
+ def _set_intXint(self, row, col, x):
284
+ raise NotImplementedError()
285
+
286
+ def _set_arrayXarray(self, row, col, x):
287
+ raise NotImplementedError()
288
+
289
+ def _set_arrayXarray_sparse(self, row, col, x):
290
+ # Fall back to densifying x
291
+ x = np.asarray(x.toarray(), dtype=self.dtype)
292
+ x, _ = _broadcast_arrays(x, row)
293
+ self._set_arrayXarray(row, col, x)
294
+
295
+
296
+ def _unpack_index(index) -> tuple[
297
+ int | slice | npt.NDArray[np.bool_ | np.int_],
298
+ int | slice | npt.NDArray[np.bool_ | np.int_]
299
+ ]:
300
+ """ Parse index. Always return a tuple of the form (row, col).
301
+ Valid type for row/col is integer, slice, array of bool, or array of integers.
302
+ """
303
+ # Parse any ellipses.
304
+ index = _check_ellipsis(index)
305
+
306
+ # Next, parse the tuple or object
307
+ if isinstance(index, tuple):
308
+ if len(index) == 2:
309
+ row, col = index
310
+ elif len(index) == 1:
311
+ row, col = index[0], slice(None)
312
+ else:
313
+ raise IndexError('invalid number of indices')
314
+ else:
315
+ idx = _compatible_boolean_index(index)
316
+ if idx is None:
317
+ row, col = index, slice(None)
318
+ elif idx.ndim < 2:
319
+ return idx, slice(None)
320
+ elif idx.ndim == 2:
321
+ return idx.nonzero()
322
+ # Next, check for validity and transform the index as needed.
323
+ from ._base import issparse
324
+ if issparse(row) or issparse(col):
325
+ # Supporting sparse boolean indexing with both row and col does
326
+ # not work because spmatrix.ndim is always 2.
327
+ raise IndexError(
328
+ 'Indexing with sparse matrices is not supported '
329
+ 'except boolean indexing where matrix and index '
330
+ 'are equal shapes.')
331
+ return row, col
332
+
333
+
334
+ def _check_ellipsis(index):
335
+ """Process indices with Ellipsis. Returns modified index."""
336
+ if index is Ellipsis:
337
+ return (slice(None), slice(None))
338
+
339
+ if not isinstance(index, tuple):
340
+ return index
341
+
342
+ # Find any Ellipsis objects.
343
+ ellipsis_indices = [i for i, v in enumerate(index) if v is Ellipsis]
344
+ if not ellipsis_indices:
345
+ return index
346
+ if len(ellipsis_indices) > 1:
347
+ raise IndexError("an index can only have a single ellipsis ('...')")
348
+
349
+ # Replace the Ellipsis object with 0, 1, or 2 null-slices as needed.
350
+ i, = ellipsis_indices
351
+ num_slices = max(0, 3 - len(index))
352
+ return index[:i] + (slice(None),) * num_slices + index[i + 1:]
353
+
354
+
355
+ def _maybe_bool_ndarray(idx):
356
+ """Returns a compatible array if elements are boolean.
357
+ """
358
+ idx = np.asanyarray(idx)
359
+ if idx.dtype.kind == 'b':
360
+ return idx
361
+ return None
362
+
363
+
364
+ def _first_element_bool(idx, max_dim=2):
365
+ """Returns True if first element of the incompatible
366
+ array type is boolean.
367
+ """
368
+ if max_dim < 1:
369
+ return None
370
+ try:
371
+ first = next(iter(idx), None)
372
+ except TypeError:
373
+ return None
374
+ if isinstance(first, bool):
375
+ return True
376
+ return _first_element_bool(first, max_dim-1)
377
+
378
+
379
+ def _compatible_boolean_index(idx):
380
+ """Returns a boolean index array that can be converted to
381
+ integer array. Returns None if no such array exists.
382
+ """
383
+ # Presence of attribute `ndim` indicates a compatible array type.
384
+ if hasattr(idx, 'ndim') or _first_element_bool(idx):
385
+ return _maybe_bool_ndarray(idx)
386
+ return None
387
+
388
+
389
+ def _boolean_index_to_array(idx):
390
+ if idx.ndim > 1:
391
+ raise IndexError('invalid index shape')
392
+ return np.where(idx)[0]
env-llmeval/lib/python3.10/site-packages/scipy/sparse/_matrix.py ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ class spmatrix:
2
+ """This class provides a base class for all sparse matrix classes.
3
+
4
+ It cannot be instantiated. Most of the work is provided by subclasses.
5
+ """
6
+
7
+ @property
8
+ def _bsr_container(self):
9
+ from ._bsr import bsr_matrix
10
+ return bsr_matrix
11
+
12
+ @property
13
+ def _coo_container(self):
14
+ from ._coo import coo_matrix
15
+ return coo_matrix
16
+
17
+ @property
18
+ def _csc_container(self):
19
+ from ._csc import csc_matrix
20
+ return csc_matrix
21
+
22
+ @property
23
+ def _csr_container(self):
24
+ from ._csr import csr_matrix
25
+ return csr_matrix
26
+
27
+ @property
28
+ def _dia_container(self):
29
+ from ._dia import dia_matrix
30
+ return dia_matrix
31
+
32
+ @property
33
+ def _dok_container(self):
34
+ from ._dok import dok_matrix
35
+ return dok_matrix
36
+
37
+ @property
38
+ def _lil_container(self):
39
+ from ._lil import lil_matrix
40
+ return lil_matrix
41
+
42
+ # Restore matrix multiplication
43
+ def __mul__(self, other):
44
+ return self._matmul_dispatch(other)
45
+
46
+ def __rmul__(self, other):
47
+ return self._rmatmul_dispatch(other)
48
+
49
+ # Restore matrix power
50
+ def __pow__(self, power):
51
+ from .linalg import matrix_power
52
+
53
+ return matrix_power(self, power)
54
+
55
+ ## Backward compatibility
56
+
57
+ def set_shape(self, shape):
58
+ """Set the shape of the matrix in-place"""
59
+ # Make sure copy is False since this is in place
60
+ # Make sure format is unchanged because we are doing a __dict__ swap
61
+ new_self = self.reshape(shape, copy=False).asformat(self.format)
62
+ self.__dict__ = new_self.__dict__
63
+
64
+ def get_shape(self):
65
+ """Get the shape of the matrix"""
66
+ return self._shape
67
+
68
+ shape = property(fget=get_shape, fset=set_shape,
69
+ doc="Shape of the matrix")
70
+
71
+ def asfptype(self):
72
+ """Upcast matrix to a floating point format (if necessary)"""
73
+ return self._asfptype()
74
+
75
+ def getmaxprint(self):
76
+ """Maximum number of elements to display when printed."""
77
+ return self._getmaxprint()
78
+
79
+ def getformat(self):
80
+ """Matrix storage format"""
81
+ return self.format
82
+
83
+ def getnnz(self, axis=None):
84
+ """Number of stored values, including explicit zeros.
85
+
86
+ Parameters
87
+ ----------
88
+ axis : None, 0, or 1
89
+ Select between the number of values across the whole array, in
90
+ each column, or in each row.
91
+ """
92
+ return self._getnnz(axis=axis)
93
+
94
+ def getH(self):
95
+ """Return the Hermitian transpose of this matrix.
96
+
97
+ See Also
98
+ --------
99
+ numpy.matrix.getH : NumPy's implementation of `getH` for matrices
100
+ """
101
+ return self.conjugate().transpose()
102
+
103
+ def getcol(self, j):
104
+ """Returns a copy of column j of the matrix, as an (m x 1) sparse
105
+ matrix (column vector).
106
+ """
107
+ return self._getcol(j)
108
+
109
+ def getrow(self, i):
110
+ """Returns a copy of row i of the matrix, as a (1 x n) sparse
111
+ matrix (row vector).
112
+ """
113
+ return self._getrow(i)
env-llmeval/lib/python3.10/site-packages/scipy/sparse/_matrix_io.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import scipy as sp
3
+
4
+ __all__ = ['save_npz', 'load_npz']
5
+
6
+
7
+ # Make loading safe vs. malicious input
8
+ PICKLE_KWARGS = dict(allow_pickle=False)
9
+
10
+
11
+ def save_npz(file, matrix, compressed=True):
12
+ """ Save a sparse matrix or array to a file using ``.npz`` format.
13
+
14
+ Parameters
15
+ ----------
16
+ file : str or file-like object
17
+ Either the file name (string) or an open file (file-like object)
18
+ where the data will be saved. If file is a string, the ``.npz``
19
+ extension will be appended to the file name if it is not already
20
+ there.
21
+ matrix: spmatrix or sparray
22
+ The sparse matrix or array to save.
23
+ Supported formats: ``csc``, ``csr``, ``bsr``, ``dia`` or ``coo``.
24
+ compressed : bool, optional
25
+ Allow compressing the file. Default: True
26
+
27
+ See Also
28
+ --------
29
+ scipy.sparse.load_npz: Load a sparse matrix from a file using ``.npz`` format.
30
+ numpy.savez: Save several arrays into a ``.npz`` archive.
31
+ numpy.savez_compressed : Save several arrays into a compressed ``.npz`` archive.
32
+
33
+ Examples
34
+ --------
35
+ Store sparse matrix to disk, and load it again:
36
+
37
+ >>> import numpy as np
38
+ >>> import scipy as sp
39
+ >>> sparse_matrix = sp.sparse.csc_matrix([[0, 0, 3], [4, 0, 0]])
40
+ >>> sparse_matrix
41
+ <2x3 sparse matrix of type '<class 'numpy.int64'>'
42
+ with 2 stored elements in Compressed Sparse Column format>
43
+ >>> sparse_matrix.toarray()
44
+ array([[0, 0, 3],
45
+ [4, 0, 0]], dtype=int64)
46
+
47
+ >>> sp.sparse.save_npz('/tmp/sparse_matrix.npz', sparse_matrix)
48
+ >>> sparse_matrix = sp.sparse.load_npz('/tmp/sparse_matrix.npz')
49
+
50
+ >>> sparse_matrix
51
+ <2x3 sparse matrix of type '<class 'numpy.int64'>'
52
+ with 2 stored elements in Compressed Sparse Column format>
53
+ >>> sparse_matrix.toarray()
54
+ array([[0, 0, 3],
55
+ [4, 0, 0]], dtype=int64)
56
+ """
57
+ arrays_dict = {}
58
+ if matrix.format in ('csc', 'csr', 'bsr'):
59
+ arrays_dict.update(indices=matrix.indices, indptr=matrix.indptr)
60
+ elif matrix.format == 'dia':
61
+ arrays_dict.update(offsets=matrix.offsets)
62
+ elif matrix.format == 'coo':
63
+ arrays_dict.update(row=matrix.row, col=matrix.col)
64
+ else:
65
+ msg = f'Save is not implemented for sparse matrix of format {matrix.format}.'
66
+ raise NotImplementedError(msg)
67
+ arrays_dict.update(
68
+ format=matrix.format.encode('ascii'),
69
+ shape=matrix.shape,
70
+ data=matrix.data
71
+ )
72
+ if isinstance(matrix, sp.sparse.sparray):
73
+ arrays_dict.update(_is_array=True)
74
+ if compressed:
75
+ np.savez_compressed(file, **arrays_dict)
76
+ else:
77
+ np.savez(file, **arrays_dict)
78
+
79
+
80
+ def load_npz(file):
81
+ """ Load a sparse array/matrix from a file using ``.npz`` format.
82
+
83
+ Parameters
84
+ ----------
85
+ file : str or file-like object
86
+ Either the file name (string) or an open file (file-like object)
87
+ where the data will be loaded.
88
+
89
+ Returns
90
+ -------
91
+ result : csc_array, csr_array, bsr_array, dia_array or coo_array
92
+ A sparse array/matrix containing the loaded data.
93
+
94
+ Raises
95
+ ------
96
+ OSError
97
+ If the input file does not exist or cannot be read.
98
+
99
+ See Also
100
+ --------
101
+ scipy.sparse.save_npz: Save a sparse array/matrix to a file using ``.npz`` format.
102
+ numpy.load: Load several arrays from a ``.npz`` archive.
103
+
104
+ Examples
105
+ --------
106
+ Store sparse array/matrix to disk, and load it again:
107
+
108
+ >>> import numpy as np
109
+ >>> import scipy as sp
110
+ >>> sparse_array = sp.sparse.csc_array([[0, 0, 3], [4, 0, 0]])
111
+ >>> sparse_array
112
+ <2x3 sparse array of type '<class 'numpy.int64'>'
113
+ with 2 stored elements in Compressed Sparse Column format>
114
+ >>> sparse_array.toarray()
115
+ array([[0, 0, 3],
116
+ [4, 0, 0]], dtype=int64)
117
+
118
+ >>> sp.sparse.save_npz('/tmp/sparse_array.npz', sparse_array)
119
+ >>> sparse_array = sp.sparse.load_npz('/tmp/sparse_array.npz')
120
+
121
+ >>> sparse_array
122
+ <2x3 sparse array of type '<class 'numpy.int64'>'
123
+ with 2 stored elements in Compressed Sparse Column format>
124
+ >>> sparse_array.toarray()
125
+ array([[0, 0, 3],
126
+ [4, 0, 0]], dtype=int64)
127
+
128
+ In this example we force the result to be csr_array from csr_matrix
129
+ >>> sparse_matrix = sp.sparse.csc_matrix([[0, 0, 3], [4, 0, 0]])
130
+ >>> sp.sparse.save_npz('/tmp/sparse_matrix.npz', sparse_matrix)
131
+ >>> tmp = sp.sparse.load_npz('/tmp/sparse_matrix.npz')
132
+ >>> sparse_array = sp.sparse.csr_array(tmp)
133
+ """
134
+ with np.load(file, **PICKLE_KWARGS) as loaded:
135
+ sparse_format = loaded.get('format')
136
+ if sparse_format is None:
137
+ raise ValueError(f'The file {file} does not contain '
138
+ f'a sparse array or matrix.')
139
+ sparse_format = sparse_format.item()
140
+
141
+ if not isinstance(sparse_format, str):
142
+ # Play safe with Python 2 vs 3 backward compatibility;
143
+ # files saved with SciPy < 1.0.0 may contain unicode or bytes.
144
+ sparse_format = sparse_format.decode('ascii')
145
+
146
+ if loaded.get('_is_array'):
147
+ sparse_type = sparse_format + '_array'
148
+ else:
149
+ sparse_type = sparse_format + '_matrix'
150
+
151
+ try:
152
+ cls = getattr(sp.sparse, f'{sparse_type}')
153
+ except AttributeError as e:
154
+ raise ValueError(f'Unknown format "{sparse_type}"') from e
155
+
156
+ if sparse_format in ('csc', 'csr', 'bsr'):
157
+ return cls((loaded['data'], loaded['indices'], loaded['indptr']),
158
+ shape=loaded['shape'])
159
+ elif sparse_format == 'dia':
160
+ return cls((loaded['data'], loaded['offsets']),
161
+ shape=loaded['shape'])
162
+ elif sparse_format == 'coo':
163
+ return cls((loaded['data'], (loaded['row'], loaded['col'])),
164
+ shape=loaded['shape'])
165
+ else:
166
+ raise NotImplementedError(f'Load is not implemented for '
167
+ f'sparse matrix of format {sparse_format}.')
env-llmeval/lib/python3.10/site-packages/scipy/sparse/_sputils.py ADDED
@@ -0,0 +1,451 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Utility functions for sparse matrix module
2
+ """
3
+
4
+ import sys
5
+ from typing import Any, Literal, Optional, Union
6
+ import operator
7
+ import numpy as np
8
+ from math import prod
9
+ import scipy.sparse as sp
10
+ from scipy._lib._util import np_long, np_ulong
11
+
12
+
13
+ __all__ = ['upcast', 'getdtype', 'getdata', 'isscalarlike', 'isintlike',
14
+ 'isshape', 'issequence', 'isdense', 'ismatrix', 'get_sum_dtype']
15
+
16
+ supported_dtypes = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc,
17
+ np.uintc, np_long, np_ulong, np.longlong, np.ulonglong,
18
+ np.float32, np.float64, np.longdouble,
19
+ np.complex64, np.complex128, np.clongdouble]
20
+
21
+ _upcast_memo = {}
22
+
23
+
24
+ def upcast(*args):
25
+ """Returns the nearest supported sparse dtype for the
26
+ combination of one or more types.
27
+
28
+ upcast(t0, t1, ..., tn) -> T where T is a supported dtype
29
+
30
+ Examples
31
+ --------
32
+ >>> from scipy.sparse._sputils import upcast
33
+ >>> upcast('int32')
34
+ <type 'numpy.int32'>
35
+ >>> upcast('bool')
36
+ <type 'numpy.bool_'>
37
+ >>> upcast('int32','float32')
38
+ <type 'numpy.float64'>
39
+ >>> upcast('bool',complex,float)
40
+ <type 'numpy.complex128'>
41
+
42
+ """
43
+
44
+ t = _upcast_memo.get(hash(args))
45
+ if t is not None:
46
+ return t
47
+
48
+ upcast = np.result_type(*args)
49
+
50
+ for t in supported_dtypes:
51
+ if np.can_cast(upcast, t):
52
+ _upcast_memo[hash(args)] = t
53
+ return t
54
+
55
+ raise TypeError(f'no supported conversion for types: {args!r}')
56
+
57
+
58
+ def upcast_char(*args):
59
+ """Same as `upcast` but taking dtype.char as input (faster)."""
60
+ t = _upcast_memo.get(args)
61
+ if t is not None:
62
+ return t
63
+ t = upcast(*map(np.dtype, args))
64
+ _upcast_memo[args] = t
65
+ return t
66
+
67
+
68
+ def upcast_scalar(dtype, scalar):
69
+ """Determine data type for binary operation between an array of
70
+ type `dtype` and a scalar.
71
+ """
72
+ return (np.array([0], dtype=dtype) * scalar).dtype
73
+
74
+
75
+ def downcast_intp_index(arr):
76
+ """
77
+ Down-cast index array to np.intp dtype if it is of a larger dtype.
78
+
79
+ Raise an error if the array contains a value that is too large for
80
+ intp.
81
+ """
82
+ if arr.dtype.itemsize > np.dtype(np.intp).itemsize:
83
+ if arr.size == 0:
84
+ return arr.astype(np.intp)
85
+ maxval = arr.max()
86
+ minval = arr.min()
87
+ if maxval > np.iinfo(np.intp).max or minval < np.iinfo(np.intp).min:
88
+ raise ValueError("Cannot deal with arrays with indices larger "
89
+ "than the machine maximum address size "
90
+ "(e.g. 64-bit indices on 32-bit machine).")
91
+ return arr.astype(np.intp)
92
+ return arr
93
+
94
+
95
+ def to_native(A):
96
+ """
97
+ Ensure that the data type of the NumPy array `A` has native byte order.
98
+
99
+ `A` must be a NumPy array. If the data type of `A` does not have native
100
+ byte order, a copy of `A` with a native byte order is returned. Otherwise
101
+ `A` is returned.
102
+ """
103
+ dt = A.dtype
104
+ if dt.isnative:
105
+ # Don't call `asarray()` if A is already native, to avoid unnecessarily
106
+ # creating a view of the input array.
107
+ return A
108
+ return np.asarray(A, dtype=dt.newbyteorder('native'))
109
+
110
+
111
+ def getdtype(dtype, a=None, default=None):
112
+ """Function used to simplify argument processing. If 'dtype' is not
113
+ specified (is None), returns a.dtype; otherwise returns a np.dtype
114
+ object created from the specified dtype argument. If 'dtype' and 'a'
115
+ are both None, construct a data type out of the 'default' parameter.
116
+ Furthermore, 'dtype' must be in 'allowed' set.
117
+ """
118
+ # TODO is this really what we want?
119
+ if dtype is None:
120
+ try:
121
+ newdtype = a.dtype
122
+ except AttributeError as e:
123
+ if default is not None:
124
+ newdtype = np.dtype(default)
125
+ else:
126
+ raise TypeError("could not interpret data type") from e
127
+ else:
128
+ newdtype = np.dtype(dtype)
129
+ if newdtype == np.object_:
130
+ raise ValueError(
131
+ "object dtype is not supported by sparse matrices"
132
+ )
133
+
134
+ return newdtype
135
+
136
+
137
+ def getdata(obj, dtype=None, copy=False) -> np.ndarray:
138
+ """
139
+ This is a wrapper of `np.array(obj, dtype=dtype, copy=copy)`
140
+ that will generate a warning if the result is an object array.
141
+ """
142
+ data = np.array(obj, dtype=dtype, copy=copy)
143
+ # Defer to getdtype for checking that the dtype is OK.
144
+ # This is called for the validation only; we don't need the return value.
145
+ getdtype(data.dtype)
146
+ return data
147
+
148
+
149
+ def get_index_dtype(arrays=(), maxval=None, check_contents=False):
150
+ """
151
+ Based on input (integer) arrays `a`, determine a suitable index data
152
+ type that can hold the data in the arrays.
153
+
154
+ Parameters
155
+ ----------
156
+ arrays : tuple of array_like
157
+ Input arrays whose types/contents to check
158
+ maxval : float, optional
159
+ Maximum value needed
160
+ check_contents : bool, optional
161
+ Whether to check the values in the arrays and not just their types.
162
+ Default: False (check only the types)
163
+
164
+ Returns
165
+ -------
166
+ dtype : dtype
167
+ Suitable index data type (int32 or int64)
168
+
169
+ """
170
+
171
+ int32min = np.int32(np.iinfo(np.int32).min)
172
+ int32max = np.int32(np.iinfo(np.int32).max)
173
+
174
+ # not using intc directly due to misinteractions with pythran
175
+ dtype = np.int32 if np.intc().itemsize == 4 else np.int64
176
+ if maxval is not None:
177
+ maxval = np.int64(maxval)
178
+ if maxval > int32max:
179
+ dtype = np.int64
180
+
181
+ if isinstance(arrays, np.ndarray):
182
+ arrays = (arrays,)
183
+
184
+ for arr in arrays:
185
+ arr = np.asarray(arr)
186
+ if not np.can_cast(arr.dtype, np.int32):
187
+ if check_contents:
188
+ if arr.size == 0:
189
+ # a bigger type not needed
190
+ continue
191
+ elif np.issubdtype(arr.dtype, np.integer):
192
+ maxval = arr.max()
193
+ minval = arr.min()
194
+ if minval >= int32min and maxval <= int32max:
195
+ # a bigger type not needed
196
+ continue
197
+
198
+ dtype = np.int64
199
+ break
200
+
201
+ return dtype
202
+
203
+
204
+ def get_sum_dtype(dtype: np.dtype) -> np.dtype:
205
+ """Mimic numpy's casting for np.sum"""
206
+ if dtype.kind == 'u' and np.can_cast(dtype, np.uint):
207
+ return np.uint
208
+ if np.can_cast(dtype, np.int_):
209
+ return np.int_
210
+ return dtype
211
+
212
+
213
+ def isscalarlike(x) -> bool:
214
+ """Is x either a scalar, an array scalar, or a 0-dim array?"""
215
+ return np.isscalar(x) or (isdense(x) and x.ndim == 0)
216
+
217
+
218
+ def isintlike(x) -> bool:
219
+ """Is x appropriate as an index into a sparse matrix? Returns True
220
+ if it can be cast safely to a machine int.
221
+ """
222
+ # Fast-path check to eliminate non-scalar values. operator.index would
223
+ # catch this case too, but the exception catching is slow.
224
+ if np.ndim(x) != 0:
225
+ return False
226
+ try:
227
+ operator.index(x)
228
+ except (TypeError, ValueError):
229
+ try:
230
+ loose_int = bool(int(x) == x)
231
+ except (TypeError, ValueError):
232
+ return False
233
+ if loose_int:
234
+ msg = "Inexact indices into sparse matrices are not allowed"
235
+ raise ValueError(msg)
236
+ return loose_int
237
+ return True
238
+
239
+
240
+ def isshape(x, nonneg=False, *, allow_1d=False) -> bool:
241
+ """Is x a valid tuple of dimensions?
242
+
243
+ If nonneg, also checks that the dimensions are non-negative.
244
+ If allow_1d, shapes of length 1 or 2 are allowed.
245
+ """
246
+ ndim = len(x)
247
+ if ndim != 2 and not (allow_1d and ndim == 1):
248
+ return False
249
+ for d in x:
250
+ if not isintlike(d):
251
+ return False
252
+ if nonneg and d < 0:
253
+ return False
254
+ return True
255
+
256
+
257
+ def issequence(t) -> bool:
258
+ return ((isinstance(t, (list, tuple)) and
259
+ (len(t) == 0 or np.isscalar(t[0]))) or
260
+ (isinstance(t, np.ndarray) and (t.ndim == 1)))
261
+
262
+
263
+ def ismatrix(t) -> bool:
264
+ return ((isinstance(t, (list, tuple)) and
265
+ len(t) > 0 and issequence(t[0])) or
266
+ (isinstance(t, np.ndarray) and t.ndim == 2))
267
+
268
+
269
+ def isdense(x) -> bool:
270
+ return isinstance(x, np.ndarray)
271
+
272
+
273
+ def validateaxis(axis) -> None:
274
+ if axis is None:
275
+ return
276
+ axis_type = type(axis)
277
+
278
+ # In NumPy, you can pass in tuples for 'axis', but they are
279
+ # not very useful for sparse matrices given their limited
280
+ # dimensions, so let's make it explicit that they are not
281
+ # allowed to be passed in
282
+ if axis_type == tuple:
283
+ raise TypeError("Tuples are not accepted for the 'axis' parameter. "
284
+ "Please pass in one of the following: "
285
+ "{-2, -1, 0, 1, None}.")
286
+
287
+ # If not a tuple, check that the provided axis is actually
288
+ # an integer and raise a TypeError similar to NumPy's
289
+ if not np.issubdtype(np.dtype(axis_type), np.integer):
290
+ raise TypeError(f"axis must be an integer, not {axis_type.__name__}")
291
+
292
+ if not (-2 <= axis <= 1):
293
+ raise ValueError("axis out of range")
294
+
295
+
296
+ def check_shape(args, current_shape=None, *, allow_1d=False) -> tuple[int, ...]:
297
+ """Imitate numpy.matrix handling of shape arguments
298
+
299
+ Parameters
300
+ ----------
301
+ args : array_like
302
+ Data structures providing information about the shape of the sparse array.
303
+ current_shape : tuple, optional
304
+ The current shape of the sparse array or matrix.
305
+ If None (default), the current shape will be inferred from args.
306
+ allow_1d : bool, optional
307
+ If True, then 1-D or 2-D arrays are accepted.
308
+ If False (default), then only 2-D arrays are accepted and an error is
309
+ raised otherwise.
310
+
311
+ Returns
312
+ -------
313
+ new_shape: tuple
314
+ The new shape after validation.
315
+ """
316
+ if len(args) == 0:
317
+ raise TypeError("function missing 1 required positional argument: "
318
+ "'shape'")
319
+ if len(args) == 1:
320
+ try:
321
+ shape_iter = iter(args[0])
322
+ except TypeError:
323
+ new_shape = (operator.index(args[0]), )
324
+ else:
325
+ new_shape = tuple(operator.index(arg) for arg in shape_iter)
326
+ else:
327
+ new_shape = tuple(operator.index(arg) for arg in args)
328
+
329
+ if current_shape is None:
330
+ if allow_1d:
331
+ if len(new_shape) not in (1, 2):
332
+ raise ValueError('shape must be a 1- or 2-tuple of positive '
333
+ 'integers')
334
+ elif len(new_shape) != 2:
335
+ raise ValueError('shape must be a 2-tuple of positive integers')
336
+ if any(d < 0 for d in new_shape):
337
+ raise ValueError("'shape' elements cannot be negative")
338
+ else:
339
+ # Check the current size only if needed
340
+ current_size = prod(current_shape)
341
+
342
+ # Check for negatives
343
+ negative_indexes = [i for i, x in enumerate(new_shape) if x < 0]
344
+ if not negative_indexes:
345
+ new_size = prod(new_shape)
346
+ if new_size != current_size:
347
+ raise ValueError('cannot reshape array of size {} into shape {}'
348
+ .format(current_size, new_shape))
349
+ elif len(negative_indexes) == 1:
350
+ skip = negative_indexes[0]
351
+ specified = prod(new_shape[:skip] + new_shape[skip+1:])
352
+ unspecified, remainder = divmod(current_size, specified)
353
+ if remainder != 0:
354
+ err_shape = tuple('newshape' if x < 0 else x for x in new_shape)
355
+ raise ValueError('cannot reshape array of size {} into shape {}'
356
+ ''.format(current_size, err_shape))
357
+ new_shape = new_shape[:skip] + (unspecified,) + new_shape[skip+1:]
358
+ else:
359
+ raise ValueError('can only specify one unknown dimension')
360
+
361
+ if len(new_shape) != 2 and not (allow_1d and len(new_shape) == 1):
362
+ raise ValueError('matrix shape must be two-dimensional')
363
+
364
+ return new_shape
365
+
366
+
367
+ def check_reshape_kwargs(kwargs):
368
+ """Unpack keyword arguments for reshape function.
369
+
370
+ This is useful because keyword arguments after star arguments are not
371
+ allowed in Python 2, but star keyword arguments are. This function unpacks
372
+ 'order' and 'copy' from the star keyword arguments (with defaults) and
373
+ throws an error for any remaining.
374
+ """
375
+
376
+ order = kwargs.pop('order', 'C')
377
+ copy = kwargs.pop('copy', False)
378
+ if kwargs: # Some unused kwargs remain
379
+ raise TypeError('reshape() got unexpected keywords arguments: {}'
380
+ .format(', '.join(kwargs.keys())))
381
+ return order, copy
382
+
383
+
384
+ def is_pydata_spmatrix(m) -> bool:
385
+ """
386
+ Check whether object is pydata/sparse matrix, avoiding importing the module.
387
+ """
388
+ base_cls = getattr(sys.modules.get('sparse'), 'SparseArray', None)
389
+ return base_cls is not None and isinstance(m, base_cls)
390
+
391
+
392
+ def convert_pydata_sparse_to_scipy(
393
+ arg: Any, target_format: Optional[Literal["csc", "csr"]] = None
394
+ ) -> Union[Any, "sp.spmatrix"]:
395
+ """
396
+ Convert a pydata/sparse array to scipy sparse matrix,
397
+ pass through anything else.
398
+ """
399
+ if is_pydata_spmatrix(arg):
400
+ arg = arg.to_scipy_sparse()
401
+ if target_format is not None:
402
+ arg = arg.asformat(target_format)
403
+ elif arg.format not in ("csc", "csr"):
404
+ arg = arg.tocsc()
405
+ return arg
406
+
407
+
408
+ ###############################################################################
409
+ # Wrappers for NumPy types that are deprecated
410
+
411
+ # Numpy versions of these functions raise deprecation warnings, the
412
+ # ones below do not.
413
+
414
+ def matrix(*args, **kwargs):
415
+ return np.array(*args, **kwargs).view(np.matrix)
416
+
417
+
418
+ def asmatrix(data, dtype=None):
419
+ if isinstance(data, np.matrix) and (dtype is None or data.dtype == dtype):
420
+ return data
421
+ return np.asarray(data, dtype=dtype).view(np.matrix)
422
+
423
+ ###############################################################################
424
+
425
+
426
+ def _todata(s) -> np.ndarray:
427
+ """Access nonzero values, possibly after summing duplicates.
428
+
429
+ Parameters
430
+ ----------
431
+ s : sparse array
432
+ Input sparse array.
433
+
434
+ Returns
435
+ -------
436
+ data: ndarray
437
+ Nonzero values of the array, with shape (s.nnz,)
438
+
439
+ """
440
+ if isinstance(s, sp._data._data_matrix):
441
+ return s._deduped_data()
442
+
443
+ if isinstance(s, sp.dok_array):
444
+ return np.fromiter(s.values(), dtype=s.dtype, count=s.nnz)
445
+
446
+ if isinstance(s, sp.lil_array):
447
+ data = np.empty(s.nnz, dtype=s.dtype)
448
+ sp._csparsetools.lil_flatten_to_array(s.data, data)
449
+ return data
450
+
451
+ return s.tocoo()._deduped_data()
env-llmeval/lib/python3.10/site-packages/scipy/sparse/base.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.sparse` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+
8
+ __all__ = [ # noqa: F822
9
+ 'MAXPRINT',
10
+ 'SparseEfficiencyWarning',
11
+ 'SparseFormatWarning',
12
+ 'SparseWarning',
13
+ 'asmatrix',
14
+ 'check_reshape_kwargs',
15
+ 'check_shape',
16
+ 'get_sum_dtype',
17
+ 'isdense',
18
+ 'isscalarlike',
19
+ 'issparse',
20
+ 'isspmatrix',
21
+ 'spmatrix',
22
+ 'validateaxis',
23
+ ]
24
+
25
+
26
+ def __dir__():
27
+ return __all__
28
+
29
+
30
+ def __getattr__(name):
31
+ return _sub_module_deprecation(sub_package="sparse", module="base",
32
+ private_modules=["_base"], all=__all__,
33
+ attribute=name)
env-llmeval/lib/python3.10/site-packages/scipy/sparse/bsr.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.sparse` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+
8
+ __all__ = [ # noqa: F822
9
+ 'bsr_matmat',
10
+ 'bsr_matrix',
11
+ 'bsr_matvec',
12
+ 'bsr_matvecs',
13
+ 'bsr_sort_indices',
14
+ 'bsr_tocsr',
15
+ 'bsr_transpose',
16
+ 'check_shape',
17
+ 'csr_matmat_maxnnz',
18
+ 'getdata',
19
+ 'getdtype',
20
+ 'isshape',
21
+ 'isspmatrix_bsr',
22
+ 'spmatrix',
23
+ 'to_native',
24
+ 'upcast',
25
+ 'warn',
26
+ ]
27
+
28
+
29
+ def __dir__():
30
+ return __all__
31
+
32
+
33
+ def __getattr__(name):
34
+ return _sub_module_deprecation(sub_package="sparse", module="bsr",
35
+ private_modules=["_bsr"], all=__all__,
36
+ attribute=name)
env-llmeval/lib/python3.10/site-packages/scipy/sparse/compressed.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.sparse` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+
8
+ __all__ = [ # noqa: F822
9
+ 'IndexMixin',
10
+ 'SparseEfficiencyWarning',
11
+ 'check_shape',
12
+ 'csr_column_index1',
13
+ 'csr_column_index2',
14
+ 'csr_row_index',
15
+ 'csr_row_slice',
16
+ 'csr_sample_offsets',
17
+ 'csr_sample_values',
18
+ 'csr_todense',
19
+ 'downcast_intp_index',
20
+ 'get_csr_submatrix',
21
+ 'get_sum_dtype',
22
+ 'getdtype',
23
+ 'is_pydata_spmatrix',
24
+ 'isdense',
25
+ 'isintlike',
26
+ 'isscalarlike',
27
+ 'isshape',
28
+ 'operator',
29
+ 'to_native',
30
+ 'upcast',
31
+ 'upcast_char',
32
+ 'warn',
33
+ ]
34
+
35
+
36
+ def __dir__():
37
+ return __all__
38
+
39
+
40
+ def __getattr__(name):
41
+ return _sub_module_deprecation(sub_package="sparse", module="compressed",
42
+ private_modules=["_compressed"], all=__all__,
43
+ attribute=name)
env-llmeval/lib/python3.10/site-packages/scipy/sparse/coo.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.sparse` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+
8
+ __all__ = [ # noqa: F822
9
+ 'SparseEfficiencyWarning',
10
+ 'check_reshape_kwargs',
11
+ 'check_shape',
12
+ 'coo_matrix',
13
+ 'coo_matvec',
14
+ 'coo_tocsr',
15
+ 'coo_todense',
16
+ 'downcast_intp_index',
17
+ 'getdata',
18
+ 'getdtype',
19
+ 'isshape',
20
+ 'isspmatrix_coo',
21
+ 'operator',
22
+ 'spmatrix',
23
+ 'to_native',
24
+ 'upcast',
25
+ 'upcast_char',
26
+ 'warn',
27
+ ]
28
+
29
+
30
+ def __dir__():
31
+ return __all__
32
+
33
+
34
+ def __getattr__(name):
35
+ return _sub_module_deprecation(sub_package="sparse", module="coo",
36
+ private_modules=["_coo"], all=__all__,
37
+ attribute=name)
env-llmeval/lib/python3.10/site-packages/scipy/sparse/csc.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.sparse` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+
8
+ __all__ = [ # noqa: F822
9
+ 'csc_matrix',
10
+ 'csc_tocsr',
11
+ 'expandptr',
12
+ 'isspmatrix_csc',
13
+ 'spmatrix',
14
+ 'upcast',
15
+ ]
16
+
17
+
18
+ def __dir__():
19
+ return __all__
20
+
21
+
22
+ def __getattr__(name):
23
+ return _sub_module_deprecation(sub_package="sparse", module="csc",
24
+ private_modules=["_csc"], all=__all__,
25
+ attribute=name)
env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/__init__.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r"""
2
+ Compressed sparse graph routines (:mod:`scipy.sparse.csgraph`)
3
+ ==============================================================
4
+
5
+ .. currentmodule:: scipy.sparse.csgraph
6
+
7
+ Fast graph algorithms based on sparse matrix representations.
8
+
9
+ Contents
10
+ --------
11
+
12
+ .. autosummary::
13
+ :toctree: generated/
14
+
15
+ connected_components -- determine connected components of a graph
16
+ laplacian -- compute the laplacian of a graph
17
+ shortest_path -- compute the shortest path between points on a positive graph
18
+ dijkstra -- use Dijkstra's algorithm for shortest path
19
+ floyd_warshall -- use the Floyd-Warshall algorithm for shortest path
20
+ bellman_ford -- use the Bellman-Ford algorithm for shortest path
21
+ johnson -- use Johnson's algorithm for shortest path
22
+ breadth_first_order -- compute a breadth-first order of nodes
23
+ depth_first_order -- compute a depth-first order of nodes
24
+ breadth_first_tree -- construct the breadth-first tree from a given node
25
+ depth_first_tree -- construct a depth-first tree from a given node
26
+ minimum_spanning_tree -- construct the minimum spanning tree of a graph
27
+ reverse_cuthill_mckee -- compute permutation for reverse Cuthill-McKee ordering
28
+ maximum_flow -- solve the maximum flow problem for a graph
29
+ maximum_bipartite_matching -- compute a maximum matching of a bipartite graph
30
+ min_weight_full_bipartite_matching - compute a minimum weight full matching of a bipartite graph
31
+ structural_rank -- compute the structural rank of a graph
32
+ NegativeCycleError
33
+
34
+ .. autosummary::
35
+ :toctree: generated/
36
+
37
+ construct_dist_matrix
38
+ csgraph_from_dense
39
+ csgraph_from_masked
40
+ csgraph_masked_from_dense
41
+ csgraph_to_dense
42
+ csgraph_to_masked
43
+ reconstruct_path
44
+
45
+ Graph Representations
46
+ ---------------------
47
+ This module uses graphs which are stored in a matrix format. A
48
+ graph with N nodes can be represented by an (N x N) adjacency matrix G.
49
+ If there is a connection from node i to node j, then G[i, j] = w, where
50
+ w is the weight of the connection. For nodes i and j which are
51
+ not connected, the value depends on the representation:
52
+
53
+ - for dense array representations, non-edges are represented by
54
+ G[i, j] = 0, infinity, or NaN.
55
+
56
+ - for dense masked representations (of type np.ma.MaskedArray), non-edges
57
+ are represented by masked values. This can be useful when graphs with
58
+ zero-weight edges are desired.
59
+
60
+ - for sparse array representations, non-edges are represented by
61
+ non-entries in the matrix. This sort of sparse representation also
62
+ allows for edges with zero weights.
63
+
64
+ As a concrete example, imagine that you would like to represent the following
65
+ undirected graph::
66
+
67
+ G
68
+
69
+ (0)
70
+ / \
71
+ 1 2
72
+ / \
73
+ (2) (1)
74
+
75
+ This graph has three nodes, where node 0 and 1 are connected by an edge of
76
+ weight 2, and nodes 0 and 2 are connected by an edge of weight 1.
77
+ We can construct the dense, masked, and sparse representations as follows,
78
+ keeping in mind that an undirected graph is represented by a symmetric matrix::
79
+
80
+ >>> import numpy as np
81
+ >>> G_dense = np.array([[0, 2, 1],
82
+ ... [2, 0, 0],
83
+ ... [1, 0, 0]])
84
+ >>> G_masked = np.ma.masked_values(G_dense, 0)
85
+ >>> from scipy.sparse import csr_matrix
86
+ >>> G_sparse = csr_matrix(G_dense)
87
+
88
+ This becomes more difficult when zero edges are significant. For example,
89
+ consider the situation when we slightly modify the above graph::
90
+
91
+ G2
92
+
93
+ (0)
94
+ / \
95
+ 0 2
96
+ / \
97
+ (2) (1)
98
+
99
+ This is identical to the previous graph, except nodes 0 and 2 are connected
100
+ by an edge of zero weight. In this case, the dense representation above
101
+ leads to ambiguities: how can non-edges be represented if zero is a meaningful
102
+ value? In this case, either a masked or sparse representation must be used
103
+ to eliminate the ambiguity::
104
+
105
+ >>> import numpy as np
106
+ >>> G2_data = np.array([[np.inf, 2, 0 ],
107
+ ... [2, np.inf, np.inf],
108
+ ... [0, np.inf, np.inf]])
109
+ >>> G2_masked = np.ma.masked_invalid(G2_data)
110
+ >>> from scipy.sparse.csgraph import csgraph_from_dense
111
+ >>> # G2_sparse = csr_matrix(G2_data) would give the wrong result
112
+ >>> G2_sparse = csgraph_from_dense(G2_data, null_value=np.inf)
113
+ >>> G2_sparse.data
114
+ array([ 2., 0., 2., 0.])
115
+
116
+ Here we have used a utility routine from the csgraph submodule in order to
117
+ convert the dense representation to a sparse representation which can be
118
+ understood by the algorithms in submodule. By viewing the data array, we
119
+ can see that the zero values are explicitly encoded in the graph.
120
+
121
+ Directed vs. undirected
122
+ ^^^^^^^^^^^^^^^^^^^^^^^
123
+ Matrices may represent either directed or undirected graphs. This is
124
+ specified throughout the csgraph module by a boolean keyword. Graphs are
125
+ assumed to be directed by default. In a directed graph, traversal from node
126
+ i to node j can be accomplished over the edge G[i, j], but not the edge
127
+ G[j, i]. Consider the following dense graph::
128
+
129
+ >>> import numpy as np
130
+ >>> G_dense = np.array([[0, 1, 0],
131
+ ... [2, 0, 3],
132
+ ... [0, 4, 0]])
133
+
134
+ When ``directed=True`` we get the graph::
135
+
136
+ ---1--> ---3-->
137
+ (0) (1) (2)
138
+ <--2--- <--4---
139
+
140
+ In a non-directed graph, traversal from node i to node j can be
141
+ accomplished over either G[i, j] or G[j, i]. If both edges are not null,
142
+ and the two have unequal weights, then the smaller of the two is used.
143
+
144
+ So for the same graph, when ``directed=False`` we get the graph::
145
+
146
+ (0)--1--(1)--3--(2)
147
+
148
+ Note that a symmetric matrix will represent an undirected graph, regardless
149
+ of whether the 'directed' keyword is set to True or False. In this case,
150
+ using ``directed=True`` generally leads to more efficient computation.
151
+
152
+ The routines in this module accept as input either scipy.sparse representations
153
+ (csr, csc, or lil format), masked representations, or dense representations
154
+ with non-edges indicated by zeros, infinities, and NaN entries.
155
+ """ # noqa: E501
156
+
157
+ __docformat__ = "restructuredtext en"
158
+
159
+ __all__ = ['connected_components',
160
+ 'laplacian',
161
+ 'shortest_path',
162
+ 'floyd_warshall',
163
+ 'dijkstra',
164
+ 'bellman_ford',
165
+ 'johnson',
166
+ 'breadth_first_order',
167
+ 'depth_first_order',
168
+ 'breadth_first_tree',
169
+ 'depth_first_tree',
170
+ 'minimum_spanning_tree',
171
+ 'reverse_cuthill_mckee',
172
+ 'maximum_flow',
173
+ 'maximum_bipartite_matching',
174
+ 'min_weight_full_bipartite_matching',
175
+ 'structural_rank',
176
+ 'construct_dist_matrix',
177
+ 'reconstruct_path',
178
+ 'csgraph_masked_from_dense',
179
+ 'csgraph_from_dense',
180
+ 'csgraph_from_masked',
181
+ 'csgraph_to_dense',
182
+ 'csgraph_to_masked',
183
+ 'NegativeCycleError']
184
+
185
+ from ._laplacian import laplacian
186
+ from ._shortest_path import (
187
+ shortest_path, floyd_warshall, dijkstra, bellman_ford, johnson,
188
+ NegativeCycleError
189
+ )
190
+ from ._traversal import (
191
+ breadth_first_order, depth_first_order, breadth_first_tree,
192
+ depth_first_tree, connected_components
193
+ )
194
+ from ._min_spanning_tree import minimum_spanning_tree
195
+ from ._flow import maximum_flow
196
+ from ._matching import (
197
+ maximum_bipartite_matching, min_weight_full_bipartite_matching
198
+ )
199
+ from ._reordering import reverse_cuthill_mckee, structural_rank
200
+ from ._tools import (
201
+ construct_dist_matrix, reconstruct_path, csgraph_from_dense,
202
+ csgraph_to_dense, csgraph_masked_from_dense, csgraph_from_masked,
203
+ csgraph_to_masked
204
+ )
205
+
206
+ from scipy._lib._testutils import PytestTester
207
+ test = PytestTester(__name__)
208
+ del PytestTester
env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (7.46 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/__pycache__/_laplacian.cpython-310.pyc ADDED
Binary file (16.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/__pycache__/_validation.cpython-310.pyc ADDED
Binary file (1.56 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/_flow.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (345 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/_laplacian.py ADDED
@@ -0,0 +1,562 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Laplacian of a compressed-sparse graph
3
+ """
4
+
5
+ import numpy as np
6
+ from scipy.sparse import issparse
7
+ from scipy.sparse.linalg import LinearOperator
8
+ from scipy.sparse._sputils import convert_pydata_sparse_to_scipy, is_pydata_spmatrix
9
+
10
+
11
+ ###############################################################################
12
+ # Graph laplacian
13
+ def laplacian(
14
+ csgraph,
15
+ normed=False,
16
+ return_diag=False,
17
+ use_out_degree=False,
18
+ *,
19
+ copy=True,
20
+ form="array",
21
+ dtype=None,
22
+ symmetrized=False,
23
+ ):
24
+ """
25
+ Return the Laplacian of a directed graph.
26
+
27
+ Parameters
28
+ ----------
29
+ csgraph : array_like or sparse matrix, 2 dimensions
30
+ compressed-sparse graph, with shape (N, N).
31
+ normed : bool, optional
32
+ If True, then compute symmetrically normalized Laplacian.
33
+ Default: False.
34
+ return_diag : bool, optional
35
+ If True, then also return an array related to vertex degrees.
36
+ Default: False.
37
+ use_out_degree : bool, optional
38
+ If True, then use out-degree instead of in-degree.
39
+ This distinction matters only if the graph is asymmetric.
40
+ Default: False.
41
+ copy: bool, optional
42
+ If False, then change `csgraph` in place if possible,
43
+ avoiding doubling the memory use.
44
+ Default: True, for backward compatibility.
45
+ form: 'array', or 'function', or 'lo'
46
+ Determines the format of the output Laplacian:
47
+
48
+ * 'array' is a numpy array;
49
+ * 'function' is a pointer to evaluating the Laplacian-vector
50
+ or Laplacian-matrix product;
51
+ * 'lo' results in the format of the `LinearOperator`.
52
+
53
+ Choosing 'function' or 'lo' always avoids doubling
54
+ the memory use, ignoring `copy` value.
55
+ Default: 'array', for backward compatibility.
56
+ dtype: None or one of numeric numpy dtypes, optional
57
+ The dtype of the output. If ``dtype=None``, the dtype of the
58
+ output matches the dtype of the input csgraph, except for
59
+ the case ``normed=True`` and integer-like csgraph, where
60
+ the output dtype is 'float' allowing accurate normalization,
61
+ but dramatically increasing the memory use.
62
+ Default: None, for backward compatibility.
63
+ symmetrized: bool, optional
64
+ If True, then the output Laplacian is symmetric/Hermitian.
65
+ The symmetrization is done by ``csgraph + csgraph.T.conj``
66
+ without dividing by 2 to preserve integer dtypes if possible
67
+ prior to the construction of the Laplacian.
68
+ The symmetrization will increase the memory footprint of
69
+ sparse matrices unless the sparsity pattern is symmetric or
70
+ `form` is 'function' or 'lo'.
71
+ Default: False, for backward compatibility.
72
+
73
+ Returns
74
+ -------
75
+ lap : ndarray, or sparse matrix, or `LinearOperator`
76
+ The N x N Laplacian of csgraph. It will be a NumPy array (dense)
77
+ if the input was dense, or a sparse matrix otherwise, or
78
+ the format of a function or `LinearOperator` if
79
+ `form` equals 'function' or 'lo', respectively.
80
+ diag : ndarray, optional
81
+ The length-N main diagonal of the Laplacian matrix.
82
+ For the normalized Laplacian, this is the array of square roots
83
+ of vertex degrees or 1 if the degree is zero.
84
+
85
+ Notes
86
+ -----
87
+ The Laplacian matrix of a graph is sometimes referred to as the
88
+ "Kirchhoff matrix" or just the "Laplacian", and is useful in many
89
+ parts of spectral graph theory.
90
+ In particular, the eigen-decomposition of the Laplacian can give
91
+ insight into many properties of the graph, e.g.,
92
+ is commonly used for spectral data embedding and clustering.
93
+
94
+ The constructed Laplacian doubles the memory use if ``copy=True`` and
95
+ ``form="array"`` which is the default.
96
+ Choosing ``copy=False`` has no effect unless ``form="array"``
97
+ or the matrix is sparse in the ``coo`` format, or dense array, except
98
+ for the integer input with ``normed=True`` that forces the float output.
99
+
100
+ Sparse input is reformatted into ``coo`` if ``form="array"``,
101
+ which is the default.
102
+
103
+ If the input adjacency matrix is not symmetric, the Laplacian is
104
+ also non-symmetric unless ``symmetrized=True`` is used.
105
+
106
+ Diagonal entries of the input adjacency matrix are ignored and
107
+ replaced with zeros for the purpose of normalization where ``normed=True``.
108
+ The normalization uses the inverse square roots of row-sums of the input
109
+ adjacency matrix, and thus may fail if the row-sums contain
110
+ negative or complex with a non-zero imaginary part values.
111
+
112
+ The normalization is symmetric, making the normalized Laplacian also
113
+ symmetric if the input csgraph was symmetric.
114
+
115
+ References
116
+ ----------
117
+ .. [1] Laplacian matrix. https://en.wikipedia.org/wiki/Laplacian_matrix
118
+
119
+ Examples
120
+ --------
121
+ >>> import numpy as np
122
+ >>> from scipy.sparse import csgraph
123
+
124
+ Our first illustration is the symmetric graph
125
+
126
+ >>> G = np.arange(4) * np.arange(4)[:, np.newaxis]
127
+ >>> G
128
+ array([[0, 0, 0, 0],
129
+ [0, 1, 2, 3],
130
+ [0, 2, 4, 6],
131
+ [0, 3, 6, 9]])
132
+
133
+ and its symmetric Laplacian matrix
134
+
135
+ >>> csgraph.laplacian(G)
136
+ array([[ 0, 0, 0, 0],
137
+ [ 0, 5, -2, -3],
138
+ [ 0, -2, 8, -6],
139
+ [ 0, -3, -6, 9]])
140
+
141
+ The non-symmetric graph
142
+
143
+ >>> G = np.arange(9).reshape(3, 3)
144
+ >>> G
145
+ array([[0, 1, 2],
146
+ [3, 4, 5],
147
+ [6, 7, 8]])
148
+
149
+ has different row- and column sums, resulting in two varieties
150
+ of the Laplacian matrix, using an in-degree, which is the default
151
+
152
+ >>> L_in_degree = csgraph.laplacian(G)
153
+ >>> L_in_degree
154
+ array([[ 9, -1, -2],
155
+ [-3, 8, -5],
156
+ [-6, -7, 7]])
157
+
158
+ or alternatively an out-degree
159
+
160
+ >>> L_out_degree = csgraph.laplacian(G, use_out_degree=True)
161
+ >>> L_out_degree
162
+ array([[ 3, -1, -2],
163
+ [-3, 8, -5],
164
+ [-6, -7, 13]])
165
+
166
+ Constructing a symmetric Laplacian matrix, one can add the two as
167
+
168
+ >>> L_in_degree + L_out_degree.T
169
+ array([[ 12, -4, -8],
170
+ [ -4, 16, -12],
171
+ [ -8, -12, 20]])
172
+
173
+ or use the ``symmetrized=True`` option
174
+
175
+ >>> csgraph.laplacian(G, symmetrized=True)
176
+ array([[ 12, -4, -8],
177
+ [ -4, 16, -12],
178
+ [ -8, -12, 20]])
179
+
180
+ that is equivalent to symmetrizing the original graph
181
+
182
+ >>> csgraph.laplacian(G + G.T)
183
+ array([[ 12, -4, -8],
184
+ [ -4, 16, -12],
185
+ [ -8, -12, 20]])
186
+
187
+ The goal of normalization is to make the non-zero diagonal entries
188
+ of the Laplacian matrix to be all unit, also scaling off-diagonal
189
+ entries correspondingly. The normalization can be done manually, e.g.,
190
+
191
+ >>> G = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]])
192
+ >>> L, d = csgraph.laplacian(G, return_diag=True)
193
+ >>> L
194
+ array([[ 2, -1, -1],
195
+ [-1, 2, -1],
196
+ [-1, -1, 2]])
197
+ >>> d
198
+ array([2, 2, 2])
199
+ >>> scaling = np.sqrt(d)
200
+ >>> scaling
201
+ array([1.41421356, 1.41421356, 1.41421356])
202
+ >>> (1/scaling)*L*(1/scaling)
203
+ array([[ 1. , -0.5, -0.5],
204
+ [-0.5, 1. , -0.5],
205
+ [-0.5, -0.5, 1. ]])
206
+
207
+ Or using ``normed=True`` option
208
+
209
+ >>> L, d = csgraph.laplacian(G, return_diag=True, normed=True)
210
+ >>> L
211
+ array([[ 1. , -0.5, -0.5],
212
+ [-0.5, 1. , -0.5],
213
+ [-0.5, -0.5, 1. ]])
214
+
215
+ which now instead of the diagonal returns the scaling coefficients
216
+
217
+ >>> d
218
+ array([1.41421356, 1.41421356, 1.41421356])
219
+
220
+ Zero scaling coefficients are substituted with 1s, where scaling
221
+ has thus no effect, e.g.,
222
+
223
+ >>> G = np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0]])
224
+ >>> G
225
+ array([[0, 0, 0],
226
+ [0, 0, 1],
227
+ [0, 1, 0]])
228
+ >>> L, d = csgraph.laplacian(G, return_diag=True, normed=True)
229
+ >>> L
230
+ array([[ 0., -0., -0.],
231
+ [-0., 1., -1.],
232
+ [-0., -1., 1.]])
233
+ >>> d
234
+ array([1., 1., 1.])
235
+
236
+ Only the symmetric normalization is implemented, resulting
237
+ in a symmetric Laplacian matrix if and only if its graph is symmetric
238
+ and has all non-negative degrees, like in the examples above.
239
+
240
+ The output Laplacian matrix is by default a dense array or a sparse matrix
241
+ inferring its shape, format, and dtype from the input graph matrix:
242
+
243
+ >>> G = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]]).astype(np.float32)
244
+ >>> G
245
+ array([[0., 1., 1.],
246
+ [1., 0., 1.],
247
+ [1., 1., 0.]], dtype=float32)
248
+ >>> csgraph.laplacian(G)
249
+ array([[ 2., -1., -1.],
250
+ [-1., 2., -1.],
251
+ [-1., -1., 2.]], dtype=float32)
252
+
253
+ but can alternatively be generated matrix-free as a LinearOperator:
254
+
255
+ >>> L = csgraph.laplacian(G, form="lo")
256
+ >>> L
257
+ <3x3 _CustomLinearOperator with dtype=float32>
258
+ >>> L(np.eye(3))
259
+ array([[ 2., -1., -1.],
260
+ [-1., 2., -1.],
261
+ [-1., -1., 2.]])
262
+
263
+ or as a lambda-function:
264
+
265
+ >>> L = csgraph.laplacian(G, form="function")
266
+ >>> L
267
+ <function _laplace.<locals>.<lambda> at 0x0000012AE6F5A598>
268
+ >>> L(np.eye(3))
269
+ array([[ 2., -1., -1.],
270
+ [-1., 2., -1.],
271
+ [-1., -1., 2.]])
272
+
273
+ The Laplacian matrix is used for
274
+ spectral data clustering and embedding
275
+ as well as for spectral graph partitioning.
276
+ Our final example illustrates the latter
277
+ for a noisy directed linear graph.
278
+
279
+ >>> from scipy.sparse import diags, random
280
+ >>> from scipy.sparse.linalg import lobpcg
281
+
282
+ Create a directed linear graph with ``N=35`` vertices
283
+ using a sparse adjacency matrix ``G``:
284
+
285
+ >>> N = 35
286
+ >>> G = diags(np.ones(N-1), 1, format="csr")
287
+
288
+ Fix a random seed ``rng`` and add a random sparse noise to the graph ``G``:
289
+
290
+ >>> rng = np.random.default_rng()
291
+ >>> G += 1e-2 * random(N, N, density=0.1, random_state=rng)
292
+
293
+ Set initial approximations for eigenvectors:
294
+
295
+ >>> X = rng.random((N, 2))
296
+
297
+ The constant vector of ones is always a trivial eigenvector
298
+ of the non-normalized Laplacian to be filtered out:
299
+
300
+ >>> Y = np.ones((N, 1))
301
+
302
+ Alternating (1) the sign of the graph weights allows determining
303
+ labels for spectral max- and min- cuts in a single loop.
304
+ Since the graph is undirected, the option ``symmetrized=True``
305
+ must be used in the construction of the Laplacian.
306
+ The option ``normed=True`` cannot be used in (2) for the negative weights
307
+ here as the symmetric normalization evaluates square roots.
308
+ The option ``form="lo"`` in (2) is matrix-free, i.e., guarantees
309
+ a fixed memory footprint and read-only access to the graph.
310
+ Calling the eigenvalue solver ``lobpcg`` (3) computes the Fiedler vector
311
+ that determines the labels as the signs of its components in (5).
312
+ Since the sign in an eigenvector is not deterministic and can flip,
313
+ we fix the sign of the first component to be always +1 in (4).
314
+
315
+ >>> for cut in ["max", "min"]:
316
+ ... G = -G # 1.
317
+ ... L = csgraph.laplacian(G, symmetrized=True, form="lo") # 2.
318
+ ... _, eves = lobpcg(L, X, Y=Y, largest=False, tol=1e-3) # 3.
319
+ ... eves *= np.sign(eves[0, 0]) # 4.
320
+ ... print(cut + "-cut labels:\\n", 1 * (eves[:, 0]>0)) # 5.
321
+ max-cut labels:
322
+ [1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1 0 1]
323
+ min-cut labels:
324
+ [1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0]
325
+
326
+ As anticipated for a (slightly noisy) linear graph,
327
+ the max-cut strips all the edges of the graph coloring all
328
+ odd vertices into one color and all even vertices into another one,
329
+ while the balanced min-cut partitions the graph
330
+ in the middle by deleting a single edge.
331
+ Both determined partitions are optimal.
332
+ """
333
+ is_pydata_sparse = is_pydata_spmatrix(csgraph)
334
+ if is_pydata_sparse:
335
+ pydata_sparse_cls = csgraph.__class__
336
+ csgraph = convert_pydata_sparse_to_scipy(csgraph)
337
+ if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
338
+ raise ValueError('csgraph must be a square matrix or array')
339
+
340
+ if normed and (
341
+ np.issubdtype(csgraph.dtype, np.signedinteger)
342
+ or np.issubdtype(csgraph.dtype, np.uint)
343
+ ):
344
+ csgraph = csgraph.astype(np.float64)
345
+
346
+ if form == "array":
347
+ create_lap = (
348
+ _laplacian_sparse if issparse(csgraph) else _laplacian_dense
349
+ )
350
+ else:
351
+ create_lap = (
352
+ _laplacian_sparse_flo
353
+ if issparse(csgraph)
354
+ else _laplacian_dense_flo
355
+ )
356
+
357
+ degree_axis = 1 if use_out_degree else 0
358
+
359
+ lap, d = create_lap(
360
+ csgraph,
361
+ normed=normed,
362
+ axis=degree_axis,
363
+ copy=copy,
364
+ form=form,
365
+ dtype=dtype,
366
+ symmetrized=symmetrized,
367
+ )
368
+ if is_pydata_sparse:
369
+ lap = pydata_sparse_cls.from_scipy_sparse(lap)
370
+ if return_diag:
371
+ return lap, d
372
+ return lap
373
+
374
+
375
+ def _setdiag_dense(m, d):
376
+ step = len(d) + 1
377
+ m.flat[::step] = d
378
+
379
+
380
+ def _laplace(m, d):
381
+ return lambda v: v * d[:, np.newaxis] - m @ v
382
+
383
+
384
+ def _laplace_normed(m, d, nd):
385
+ laplace = _laplace(m, d)
386
+ return lambda v: nd[:, np.newaxis] * laplace(v * nd[:, np.newaxis])
387
+
388
+
389
+ def _laplace_sym(m, d):
390
+ return (
391
+ lambda v: v * d[:, np.newaxis]
392
+ - m @ v
393
+ - np.transpose(np.conjugate(np.transpose(np.conjugate(v)) @ m))
394
+ )
395
+
396
+
397
+ def _laplace_normed_sym(m, d, nd):
398
+ laplace_sym = _laplace_sym(m, d)
399
+ return lambda v: nd[:, np.newaxis] * laplace_sym(v * nd[:, np.newaxis])
400
+
401
+
402
+ def _linearoperator(mv, shape, dtype):
403
+ return LinearOperator(matvec=mv, matmat=mv, shape=shape, dtype=dtype)
404
+
405
+
406
+ def _laplacian_sparse_flo(graph, normed, axis, copy, form, dtype, symmetrized):
407
+ # The keyword argument `copy` is unused and has no effect here.
408
+ del copy
409
+
410
+ if dtype is None:
411
+ dtype = graph.dtype
412
+
413
+ graph_sum = np.asarray(graph.sum(axis=axis)).ravel()
414
+ graph_diagonal = graph.diagonal()
415
+ diag = graph_sum - graph_diagonal
416
+ if symmetrized:
417
+ graph_sum += np.asarray(graph.sum(axis=1 - axis)).ravel()
418
+ diag = graph_sum - graph_diagonal - graph_diagonal
419
+
420
+ if normed:
421
+ isolated_node_mask = diag == 0
422
+ w = np.where(isolated_node_mask, 1, np.sqrt(diag))
423
+ if symmetrized:
424
+ md = _laplace_normed_sym(graph, graph_sum, 1.0 / w)
425
+ else:
426
+ md = _laplace_normed(graph, graph_sum, 1.0 / w)
427
+ if form == "function":
428
+ return md, w.astype(dtype, copy=False)
429
+ elif form == "lo":
430
+ m = _linearoperator(md, shape=graph.shape, dtype=dtype)
431
+ return m, w.astype(dtype, copy=False)
432
+ else:
433
+ raise ValueError(f"Invalid form: {form!r}")
434
+ else:
435
+ if symmetrized:
436
+ md = _laplace_sym(graph, graph_sum)
437
+ else:
438
+ md = _laplace(graph, graph_sum)
439
+ if form == "function":
440
+ return md, diag.astype(dtype, copy=False)
441
+ elif form == "lo":
442
+ m = _linearoperator(md, shape=graph.shape, dtype=dtype)
443
+ return m, diag.astype(dtype, copy=False)
444
+ else:
445
+ raise ValueError(f"Invalid form: {form!r}")
446
+
447
+
448
+ def _laplacian_sparse(graph, normed, axis, copy, form, dtype, symmetrized):
449
+ # The keyword argument `form` is unused and has no effect here.
450
+ del form
451
+
452
+ if dtype is None:
453
+ dtype = graph.dtype
454
+
455
+ needs_copy = False
456
+ if graph.format in ('lil', 'dok'):
457
+ m = graph.tocoo()
458
+ else:
459
+ m = graph
460
+ if copy:
461
+ needs_copy = True
462
+
463
+ if symmetrized:
464
+ m += m.T.conj()
465
+
466
+ w = np.asarray(m.sum(axis=axis)).ravel() - m.diagonal()
467
+ if normed:
468
+ m = m.tocoo(copy=needs_copy)
469
+ isolated_node_mask = (w == 0)
470
+ w = np.where(isolated_node_mask, 1, np.sqrt(w))
471
+ m.data /= w[m.row]
472
+ m.data /= w[m.col]
473
+ m.data *= -1
474
+ m.setdiag(1 - isolated_node_mask)
475
+ else:
476
+ if m.format == 'dia':
477
+ m = m.copy()
478
+ else:
479
+ m = m.tocoo(copy=needs_copy)
480
+ m.data *= -1
481
+ m.setdiag(w)
482
+
483
+ return m.astype(dtype, copy=False), w.astype(dtype)
484
+
485
+
486
+ def _laplacian_dense_flo(graph, normed, axis, copy, form, dtype, symmetrized):
487
+
488
+ if copy:
489
+ m = np.array(graph)
490
+ else:
491
+ m = np.asarray(graph)
492
+
493
+ if dtype is None:
494
+ dtype = m.dtype
495
+
496
+ graph_sum = m.sum(axis=axis)
497
+ graph_diagonal = m.diagonal()
498
+ diag = graph_sum - graph_diagonal
499
+ if symmetrized:
500
+ graph_sum += m.sum(axis=1 - axis)
501
+ diag = graph_sum - graph_diagonal - graph_diagonal
502
+
503
+ if normed:
504
+ isolated_node_mask = diag == 0
505
+ w = np.where(isolated_node_mask, 1, np.sqrt(diag))
506
+ if symmetrized:
507
+ md = _laplace_normed_sym(m, graph_sum, 1.0 / w)
508
+ else:
509
+ md = _laplace_normed(m, graph_sum, 1.0 / w)
510
+ if form == "function":
511
+ return md, w.astype(dtype, copy=False)
512
+ elif form == "lo":
513
+ m = _linearoperator(md, shape=graph.shape, dtype=dtype)
514
+ return m, w.astype(dtype, copy=False)
515
+ else:
516
+ raise ValueError(f"Invalid form: {form!r}")
517
+ else:
518
+ if symmetrized:
519
+ md = _laplace_sym(m, graph_sum)
520
+ else:
521
+ md = _laplace(m, graph_sum)
522
+ if form == "function":
523
+ return md, diag.astype(dtype, copy=False)
524
+ elif form == "lo":
525
+ m = _linearoperator(md, shape=graph.shape, dtype=dtype)
526
+ return m, diag.astype(dtype, copy=False)
527
+ else:
528
+ raise ValueError(f"Invalid form: {form!r}")
529
+
530
+
531
+ def _laplacian_dense(graph, normed, axis, copy, form, dtype, symmetrized):
532
+
533
+ if form != "array":
534
+ raise ValueError(f'{form!r} must be "array"')
535
+
536
+ if dtype is None:
537
+ dtype = graph.dtype
538
+
539
+ if copy:
540
+ m = np.array(graph)
541
+ else:
542
+ m = np.asarray(graph)
543
+
544
+ if dtype is None:
545
+ dtype = m.dtype
546
+
547
+ if symmetrized:
548
+ m += m.T.conj()
549
+ np.fill_diagonal(m, 0)
550
+ w = m.sum(axis=axis)
551
+ if normed:
552
+ isolated_node_mask = (w == 0)
553
+ w = np.where(isolated_node_mask, 1, np.sqrt(w))
554
+ m /= w
555
+ m /= w[:, np.newaxis]
556
+ m *= -1
557
+ _setdiag_dense(m, 1 - isolated_node_mask)
558
+ else:
559
+ m *= -1
560
+ _setdiag_dense(m, w)
561
+
562
+ return m.astype(dtype, copy=False), w.astype(dtype, copy=False)
env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/_matching.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (348 kB). View file