applied-ai-018 commited on
Commit
0668510
·
verified ·
1 Parent(s): 110ba22

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/data/bug-1310.npz +3 -0
  3. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/data/estimate_gradients_hang.npy +3 -0
  4. env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/data/gcvspl.npz +3 -0
  5. env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/__init__.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_dia.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_sputils.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/base.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/bsr.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/csr.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/extract.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/sputils.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_connected_components.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_conversions.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_flow.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_graph_laplacian.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_matching.cpython-310.pyc +0 -0
  18. env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_pydata_sparse.cpython-310.pyc +0 -0
  19. env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_reordering.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_shortest_path.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_traversal.cpython-310.pyc +0 -0
  22. env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__init__.py +146 -0
  23. env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/__init__.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_expm_multiply.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_interface.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_matfuncs.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_norm.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_special_sparse_arrays.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_svdp.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/dsolve.cpython-310.pyc +0 -0
  31. env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/eigen.cpython-310.pyc +0 -0
  32. env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/interface.cpython-310.pyc +0 -0
  33. env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/isolve.cpython-310.pyc +0 -0
  34. env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/matfuncs.cpython-310.pyc +0 -0
  35. env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__init__.py +71 -0
  36. env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/__init__.cpython-310.pyc +0 -0
  37. env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/_add_newdocs.cpython-310.pyc +0 -0
  38. env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/_add_newdocs.py +153 -0
  39. env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/_superlu.cpython-310-x86_64-linux-gnu.so +0 -0
  40. env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/linsolve.py +746 -0
  41. env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/tests/__init__.py +0 -0
  42. env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  43. env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/tests/__pycache__/test_linsolve.cpython-310.pyc +0 -0
  44. env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/tests/test_linsolve.py +805 -0
  45. env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_special_sparse_arrays.py +948 -0
  46. env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/dsolve.py +24 -0
  47. env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/eigen.py +23 -0
  48. env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__init__.py +0 -0
  49. env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  50. env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_array_api.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -156,3 +156,4 @@ env-llmeval/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcheckpoint.so
156
  env-llmeval/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcupti.so.12 filter=lfs diff=lfs merge=lfs -text
157
  env-llmeval/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_host.so filter=lfs diff=lfs merge=lfs -text
158
  env-llmeval/lib/python3.10/site-packages/tokenizers/tokenizers.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
156
  env-llmeval/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcupti.so.12 filter=lfs diff=lfs merge=lfs -text
157
  env-llmeval/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_host.so filter=lfs diff=lfs merge=lfs -text
158
  env-llmeval/lib/python3.10/site-packages/tokenizers/tokenizers.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
159
+ llmeval-env/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so filter=lfs diff=lfs merge=lfs -text
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/data/bug-1310.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d6803c0b398f2704c236f1d1b9e8e5ede06bd165a0abb0f228281abbd455ae9
3
+ size 2648
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/data/estimate_gradients_hang.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:406c10857417ff5ea98d8cd28945c9d0e4f5c24f92a48ad0e8fab955bf2477f1
3
+ size 35680
env-llmeval/lib/python3.10/site-packages/scipy/interpolate/tests/data/gcvspl.npz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03ce8155a6cba0c1bf0a2441a10c228191f916dec36cb820723429811296bba8
3
+ size 3138
env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (9.37 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_dia.cpython-310.pyc ADDED
Binary file (14.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/_sputils.cpython-310.pyc ADDED
Binary file (12.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/base.cpython-310.pyc ADDED
Binary file (783 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/bsr.cpython-310.pyc ADDED
Binary file (784 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/csr.cpython-310.pyc ADDED
Binary file (685 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/extract.cpython-310.pyc ADDED
Binary file (622 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/__pycache__/sputils.cpython-310.pyc ADDED
Binary file (902 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_connected_components.cpython-310.pyc ADDED
Binary file (3.22 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_conversions.cpython-310.pyc ADDED
Binary file (1.74 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_flow.cpython-310.pyc ADDED
Binary file (6.99 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_graph_laplacian.cpython-310.pyc ADDED
Binary file (7.71 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_matching.cpython-310.pyc ADDED
Binary file (10.4 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_pydata_sparse.cpython-310.pyc ADDED
Binary file (3.62 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_reordering.cpython-310.pyc ADDED
Binary file (2.93 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_shortest_path.cpython-310.pyc ADDED
Binary file (12 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/csgraph/tests/__pycache__/test_traversal.cpython-310.pyc ADDED
Binary file (2.27 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__init__.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Sparse linear algebra (:mod:`scipy.sparse.linalg`)
3
+ ==================================================
4
+
5
+ .. currentmodule:: scipy.sparse.linalg
6
+
7
+ Abstract linear operators
8
+ -------------------------
9
+
10
+ .. autosummary::
11
+ :toctree: generated/
12
+
13
+ LinearOperator -- abstract representation of a linear operator
14
+ aslinearoperator -- convert an object to an abstract linear operator
15
+
16
+ Matrix Operations
17
+ -----------------
18
+
19
+ .. autosummary::
20
+ :toctree: generated/
21
+
22
+ inv -- compute the sparse matrix inverse
23
+ expm -- compute the sparse matrix exponential
24
+ expm_multiply -- compute the product of a matrix exponential and a matrix
25
+ matrix_power -- compute the matrix power by raising a matrix to an exponent
26
+
27
+ Matrix norms
28
+ ------------
29
+
30
+ .. autosummary::
31
+ :toctree: generated/
32
+
33
+ norm -- Norm of a sparse matrix
34
+ onenormest -- Estimate the 1-norm of a sparse matrix
35
+
36
+ Solving linear problems
37
+ -----------------------
38
+
39
+ Direct methods for linear equation systems:
40
+
41
+ .. autosummary::
42
+ :toctree: generated/
43
+
44
+ spsolve -- Solve the sparse linear system Ax=b
45
+ spsolve_triangular -- Solve sparse linear system Ax=b for a triangular A.
46
+ factorized -- Pre-factorize matrix to a function solving a linear system
47
+ MatrixRankWarning -- Warning on exactly singular matrices
48
+ use_solver -- Select direct solver to use
49
+
50
+ Iterative methods for linear equation systems:
51
+
52
+ .. autosummary::
53
+ :toctree: generated/
54
+
55
+ bicg -- Use BIConjugate Gradient iteration to solve Ax = b
56
+ bicgstab -- Use BIConjugate Gradient STABilized iteration to solve Ax = b
57
+ cg -- Use Conjugate Gradient iteration to solve Ax = b
58
+ cgs -- Use Conjugate Gradient Squared iteration to solve Ax = b
59
+ gmres -- Use Generalized Minimal RESidual iteration to solve Ax = b
60
+ lgmres -- Solve a matrix equation using the LGMRES algorithm
61
+ minres -- Use MINimum RESidual iteration to solve Ax = b
62
+ qmr -- Use Quasi-Minimal Residual iteration to solve Ax = b
63
+ gcrotmk -- Solve a matrix equation using the GCROT(m,k) algorithm
64
+ tfqmr -- Use Transpose-Free Quasi-Minimal Residual iteration to solve Ax = b
65
+
66
+ Iterative methods for least-squares problems:
67
+
68
+ .. autosummary::
69
+ :toctree: generated/
70
+
71
+ lsqr -- Find the least-squares solution to a sparse linear equation system
72
+ lsmr -- Find the least-squares solution to a sparse linear equation system
73
+
74
+ Matrix factorizations
75
+ ---------------------
76
+
77
+ Eigenvalue problems:
78
+
79
+ .. autosummary::
80
+ :toctree: generated/
81
+
82
+ eigs -- Find k eigenvalues and eigenvectors of the square matrix A
83
+ eigsh -- Find k eigenvalues and eigenvectors of a symmetric matrix
84
+ lobpcg -- Solve symmetric partial eigenproblems with optional preconditioning
85
+
86
+ Singular values problems:
87
+
88
+ .. autosummary::
89
+ :toctree: generated/
90
+
91
+ svds -- Compute k singular values/vectors for a sparse matrix
92
+
93
+ The `svds` function supports the following solvers:
94
+
95
+ .. toctree::
96
+
97
+ sparse.linalg.svds-arpack
98
+ sparse.linalg.svds-lobpcg
99
+ sparse.linalg.svds-propack
100
+
101
+ Complete or incomplete LU factorizations
102
+
103
+ .. autosummary::
104
+ :toctree: generated/
105
+
106
+ splu -- Compute a LU decomposition for a sparse matrix
107
+ spilu -- Compute an incomplete LU decomposition for a sparse matrix
108
+ SuperLU -- Object representing an LU factorization
109
+
110
+ Sparse arrays with structure
111
+ ----------------------------
112
+
113
+ .. autosummary::
114
+ :toctree: generated/
115
+
116
+ LaplacianNd -- Laplacian on a uniform rectangular grid in ``N`` dimensions
117
+
118
+ Exceptions
119
+ ----------
120
+
121
+ .. autosummary::
122
+ :toctree: generated/
123
+
124
+ ArpackNoConvergence
125
+ ArpackError
126
+
127
+ """
128
+
129
+ from ._isolve import *
130
+ from ._dsolve import *
131
+ from ._interface import *
132
+ from ._eigen import *
133
+ from ._matfuncs import *
134
+ from ._onenormest import *
135
+ from ._norm import *
136
+ from ._expm_multiply import *
137
+ from ._special_sparse_arrays import *
138
+
139
+ # Deprecated namespaces, to be removed in v2.0.0
140
+ from . import isolve, dsolve, interface, eigen, matfuncs
141
+
142
+ __all__ = [s for s in dir() if not s.startswith('_')]
143
+
144
+ from scipy._lib._testutils import PytestTester
145
+ test = PytestTester(__name__)
146
+ del PytestTester
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (4.26 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_expm_multiply.cpython-310.pyc ADDED
Binary file (23.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_interface.cpython-310.pyc ADDED
Binary file (30 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_matfuncs.cpython-310.pyc ADDED
Binary file (25.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_norm.cpython-310.pyc ADDED
Binary file (5.07 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_special_sparse_arrays.cpython-310.pyc ADDED
Binary file (33.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_svdp.cpython-310.pyc ADDED
Binary file (9.18 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/dsolve.cpython-310.pyc ADDED
Binary file (742 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/eigen.cpython-310.pyc ADDED
Binary file (708 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/interface.cpython-310.pyc ADDED
Binary file (736 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/isolve.cpython-310.pyc ADDED
Binary file (708 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/matfuncs.cpython-310.pyc ADDED
Binary file (746 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__init__.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Linear Solvers
3
+ ==============
4
+
5
+ The default solver is SuperLU (included in the scipy distribution),
6
+ which can solve real or complex linear systems in both single and
7
+ double precisions. It is automatically replaced by UMFPACK, if
8
+ available. Note that UMFPACK works in double precision only, so
9
+ switch it off by::
10
+
11
+ >>> from scipy.sparse.linalg import spsolve, use_solver
12
+ >>> use_solver(useUmfpack=False)
13
+
14
+ to solve in the single precision. See also use_solver documentation.
15
+
16
+ Example session::
17
+
18
+ >>> from scipy.sparse import csc_matrix, spdiags
19
+ >>> from numpy import array
20
+ >>>
21
+ >>> print("Inverting a sparse linear system:")
22
+ >>> print("The sparse matrix (constructed from diagonals):")
23
+ >>> a = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5)
24
+ >>> b = array([1, 2, 3, 4, 5])
25
+ >>> print("Solve: single precision complex:")
26
+ >>> use_solver( useUmfpack = False )
27
+ >>> a = a.astype('F')
28
+ >>> x = spsolve(a, b)
29
+ >>> print(x)
30
+ >>> print("Error: ", a@x-b)
31
+ >>>
32
+ >>> print("Solve: double precision complex:")
33
+ >>> use_solver( useUmfpack = True )
34
+ >>> a = a.astype('D')
35
+ >>> x = spsolve(a, b)
36
+ >>> print(x)
37
+ >>> print("Error: ", a@x-b)
38
+ >>>
39
+ >>> print("Solve: double precision:")
40
+ >>> a = a.astype('d')
41
+ >>> x = spsolve(a, b)
42
+ >>> print(x)
43
+ >>> print("Error: ", a@x-b)
44
+ >>>
45
+ >>> print("Solve: single precision:")
46
+ >>> use_solver( useUmfpack = False )
47
+ >>> a = a.astype('f')
48
+ >>> x = spsolve(a, b.astype('f'))
49
+ >>> print(x)
50
+ >>> print("Error: ", a@x-b)
51
+
52
+ """
53
+
54
+ #import umfpack
55
+ #__doc__ = '\n\n'.join( (__doc__, umfpack.__doc__) )
56
+ #del umfpack
57
+
58
+ from .linsolve import *
59
+ from ._superlu import SuperLU
60
+ from . import _add_newdocs
61
+ from . import linsolve
62
+
63
+ __all__ = [
64
+ 'MatrixRankWarning', 'SuperLU', 'factorized',
65
+ 'spilu', 'splu', 'spsolve',
66
+ 'spsolve_triangular', 'use_solver'
67
+ ]
68
+
69
+ from scipy._lib._testutils import PytestTester
70
+ test = PytestTester(__name__)
71
+ del PytestTester
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/_add_newdocs.cpython-310.pyc ADDED
Binary file (3.69 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/_add_newdocs.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numpy.lib import add_newdoc
2
+
3
+ add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU',
4
+ """
5
+ LU factorization of a sparse matrix.
6
+
7
+ Factorization is represented as::
8
+
9
+ Pr @ A @ Pc = L @ U
10
+
11
+ To construct these `SuperLU` objects, call the `splu` and `spilu`
12
+ functions.
13
+
14
+ Attributes
15
+ ----------
16
+ shape
17
+ nnz
18
+ perm_c
19
+ perm_r
20
+ L
21
+ U
22
+
23
+ Methods
24
+ -------
25
+ solve
26
+
27
+ Notes
28
+ -----
29
+
30
+ .. versionadded:: 0.14.0
31
+
32
+ Examples
33
+ --------
34
+ The LU decomposition can be used to solve matrix equations. Consider:
35
+
36
+ >>> import numpy as np
37
+ >>> from scipy.sparse import csc_matrix
38
+ >>> from scipy.sparse.linalg import splu
39
+ >>> A = csc_matrix([[1,2,0,4], [1,0,0,1], [1,0,2,1], [2,2,1,0.]])
40
+
41
+ This can be solved for a given right-hand side:
42
+
43
+ >>> lu = splu(A)
44
+ >>> b = np.array([1, 2, 3, 4])
45
+ >>> x = lu.solve(b)
46
+ >>> A.dot(x)
47
+ array([ 1., 2., 3., 4.])
48
+
49
+ The ``lu`` object also contains an explicit representation of the
50
+ decomposition. The permutations are represented as mappings of
51
+ indices:
52
+
53
+ >>> lu.perm_r
54
+ array([2, 1, 3, 0], dtype=int32) # may vary
55
+ >>> lu.perm_c
56
+ array([0, 1, 3, 2], dtype=int32) # may vary
57
+
58
+ The L and U factors are sparse matrices in CSC format:
59
+
60
+ >>> lu.L.toarray()
61
+ array([[ 1. , 0. , 0. , 0. ], # may vary
62
+ [ 0.5, 1. , 0. , 0. ],
63
+ [ 0.5, -1. , 1. , 0. ],
64
+ [ 0.5, 1. , 0. , 1. ]])
65
+ >>> lu.U.toarray()
66
+ array([[ 2. , 2. , 0. , 1. ], # may vary
67
+ [ 0. , -1. , 1. , -0.5],
68
+ [ 0. , 0. , 5. , -1. ],
69
+ [ 0. , 0. , 0. , 2. ]])
70
+
71
+ The permutation matrices can be constructed:
72
+
73
+ >>> Pr = csc_matrix((np.ones(4), (lu.perm_r, np.arange(4))))
74
+ >>> Pc = csc_matrix((np.ones(4), (np.arange(4), lu.perm_c)))
75
+
76
+ We can reassemble the original matrix:
77
+
78
+ >>> (Pr.T @ (lu.L @ lu.U) @ Pc.T).toarray()
79
+ array([[ 1., 2., 0., 4.],
80
+ [ 1., 0., 0., 1.],
81
+ [ 1., 0., 2., 1.],
82
+ [ 2., 2., 1., 0.]])
83
+ """)
84
+
85
+ add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('solve',
86
+ """
87
+ solve(rhs[, trans])
88
+
89
+ Solves linear system of equations with one or several right-hand sides.
90
+
91
+ Parameters
92
+ ----------
93
+ rhs : ndarray, shape (n,) or (n, k)
94
+ Right hand side(s) of equation
95
+ trans : {'N', 'T', 'H'}, optional
96
+ Type of system to solve::
97
+
98
+ 'N': A @ x == rhs (default)
99
+ 'T': A^T @ x == rhs
100
+ 'H': A^H @ x == rhs
101
+
102
+ i.e., normal, transposed, and hermitian conjugate.
103
+
104
+ Returns
105
+ -------
106
+ x : ndarray, shape ``rhs.shape``
107
+ Solution vector(s)
108
+ """))
109
+
110
+ add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('L',
111
+ """
112
+ Lower triangular factor with unit diagonal as a
113
+ `scipy.sparse.csc_matrix`.
114
+
115
+ .. versionadded:: 0.14.0
116
+ """))
117
+
118
+ add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('U',
119
+ """
120
+ Upper triangular factor as a `scipy.sparse.csc_matrix`.
121
+
122
+ .. versionadded:: 0.14.0
123
+ """))
124
+
125
+ add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('shape',
126
+ """
127
+ Shape of the original matrix as a tuple of ints.
128
+ """))
129
+
130
+ add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('nnz',
131
+ """
132
+ Number of nonzero elements in the matrix.
133
+ """))
134
+
135
+ add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('perm_c',
136
+ """
137
+ Permutation Pc represented as an array of indices.
138
+
139
+ The column permutation matrix can be reconstructed via:
140
+
141
+ >>> Pc = np.zeros((n, n))
142
+ >>> Pc[np.arange(n), perm_c] = 1
143
+ """))
144
+
145
+ add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('perm_r',
146
+ """
147
+ Permutation Pr represented as an array of indices.
148
+
149
+ The row permutation matrix can be reconstructed via:
150
+
151
+ >>> Pr = np.zeros((n, n))
152
+ >>> Pr[perm_r, np.arange(n)] = 1
153
+ """))
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/_superlu.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (379 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/linsolve.py ADDED
@@ -0,0 +1,746 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from warnings import warn
2
+
3
+ import numpy as np
4
+ from numpy import asarray
5
+ from scipy.sparse import (issparse,
6
+ SparseEfficiencyWarning, csc_matrix, csr_matrix)
7
+ from scipy.sparse._sputils import is_pydata_spmatrix, convert_pydata_sparse_to_scipy
8
+ from scipy.linalg import LinAlgError
9
+ import copy
10
+
11
+ from . import _superlu
12
+
13
+ noScikit = False
14
+ try:
15
+ import scikits.umfpack as umfpack
16
+ except ImportError:
17
+ noScikit = True
18
+
19
+ useUmfpack = not noScikit
20
+
21
+ __all__ = ['use_solver', 'spsolve', 'splu', 'spilu', 'factorized',
22
+ 'MatrixRankWarning', 'spsolve_triangular']
23
+
24
+
25
+ class MatrixRankWarning(UserWarning):
26
+ pass
27
+
28
+
29
+ def use_solver(**kwargs):
30
+ """
31
+ Select default sparse direct solver to be used.
32
+
33
+ Parameters
34
+ ----------
35
+ useUmfpack : bool, optional
36
+ Use UMFPACK [1]_, [2]_, [3]_, [4]_. over SuperLU. Has effect only
37
+ if ``scikits.umfpack`` is installed. Default: True
38
+ assumeSortedIndices : bool, optional
39
+ Allow UMFPACK to skip the step of sorting indices for a CSR/CSC matrix.
40
+ Has effect only if useUmfpack is True and ``scikits.umfpack`` is
41
+ installed. Default: False
42
+
43
+ Notes
44
+ -----
45
+ The default sparse solver is UMFPACK when available
46
+ (``scikits.umfpack`` is installed). This can be changed by passing
47
+ useUmfpack = False, which then causes the always present SuperLU
48
+ based solver to be used.
49
+
50
+ UMFPACK requires a CSR/CSC matrix to have sorted column/row indices. If
51
+ sure that the matrix fulfills this, pass ``assumeSortedIndices=True``
52
+ to gain some speed.
53
+
54
+ References
55
+ ----------
56
+ .. [1] T. A. Davis, Algorithm 832: UMFPACK - an unsymmetric-pattern
57
+ multifrontal method with a column pre-ordering strategy, ACM
58
+ Trans. on Mathematical Software, 30(2), 2004, pp. 196--199.
59
+ https://dl.acm.org/doi/abs/10.1145/992200.992206
60
+
61
+ .. [2] T. A. Davis, A column pre-ordering strategy for the
62
+ unsymmetric-pattern multifrontal method, ACM Trans.
63
+ on Mathematical Software, 30(2), 2004, pp. 165--195.
64
+ https://dl.acm.org/doi/abs/10.1145/992200.992205
65
+
66
+ .. [3] T. A. Davis and I. S. Duff, A combined unifrontal/multifrontal
67
+ method for unsymmetric sparse matrices, ACM Trans. on
68
+ Mathematical Software, 25(1), 1999, pp. 1--19.
69
+ https://doi.org/10.1145/305658.287640
70
+
71
+ .. [4] T. A. Davis and I. S. Duff, An unsymmetric-pattern multifrontal
72
+ method for sparse LU factorization, SIAM J. Matrix Analysis and
73
+ Computations, 18(1), 1997, pp. 140--158.
74
+ https://doi.org/10.1137/S0895479894246905T.
75
+
76
+ Examples
77
+ --------
78
+ >>> import numpy as np
79
+ >>> from scipy.sparse.linalg import use_solver, spsolve
80
+ >>> from scipy.sparse import csc_matrix
81
+ >>> R = np.random.randn(5, 5)
82
+ >>> A = csc_matrix(R)
83
+ >>> b = np.random.randn(5)
84
+ >>> use_solver(useUmfpack=False) # enforce superLU over UMFPACK
85
+ >>> x = spsolve(A, b)
86
+ >>> np.allclose(A.dot(x), b)
87
+ True
88
+ >>> use_solver(useUmfpack=True) # reset umfPack usage to default
89
+ """
90
+ if 'useUmfpack' in kwargs:
91
+ globals()['useUmfpack'] = kwargs['useUmfpack']
92
+ if useUmfpack and 'assumeSortedIndices' in kwargs:
93
+ umfpack.configure(assumeSortedIndices=kwargs['assumeSortedIndices'])
94
+
95
+ def _get_umf_family(A):
96
+ """Get umfpack family string given the sparse matrix dtype."""
97
+ _families = {
98
+ (np.float64, np.int32): 'di',
99
+ (np.complex128, np.int32): 'zi',
100
+ (np.float64, np.int64): 'dl',
101
+ (np.complex128, np.int64): 'zl'
102
+ }
103
+
104
+ # A.dtype.name can only be "float64" or
105
+ # "complex128" in control flow
106
+ f_type = getattr(np, A.dtype.name)
107
+ # control flow may allow for more index
108
+ # types to get through here
109
+ i_type = getattr(np, A.indices.dtype.name)
110
+
111
+ try:
112
+ family = _families[(f_type, i_type)]
113
+
114
+ except KeyError as e:
115
+ msg = ('only float64 or complex128 matrices with int32 or int64 '
116
+ f'indices are supported! (got: matrix: {f_type}, indices: {i_type})')
117
+ raise ValueError(msg) from e
118
+
119
+ # See gh-8278. Considered converting only if
120
+ # A.shape[0]*A.shape[1] > np.iinfo(np.int32).max,
121
+ # but that didn't always fix the issue.
122
+ family = family[0] + "l"
123
+ A_new = copy.copy(A)
124
+ A_new.indptr = np.asarray(A.indptr, dtype=np.int64)
125
+ A_new.indices = np.asarray(A.indices, dtype=np.int64)
126
+
127
+ return family, A_new
128
+
129
+ def _safe_downcast_indices(A):
130
+ # check for safe downcasting
131
+ max_value = np.iinfo(np.intc).max
132
+
133
+ if A.indptr[-1] > max_value: # indptr[-1] is max b/c indptr always sorted
134
+ raise ValueError("indptr values too large for SuperLU")
135
+
136
+ if max(*A.shape) > max_value: # only check large enough arrays
137
+ if np.any(A.indices > max_value):
138
+ raise ValueError("indices values too large for SuperLU")
139
+
140
+ indices = A.indices.astype(np.intc, copy=False)
141
+ indptr = A.indptr.astype(np.intc, copy=False)
142
+ return indices, indptr
143
+
144
+ def spsolve(A, b, permc_spec=None, use_umfpack=True):
145
+ """Solve the sparse linear system Ax=b, where b may be a vector or a matrix.
146
+
147
+ Parameters
148
+ ----------
149
+ A : ndarray or sparse matrix
150
+ The square matrix A will be converted into CSC or CSR form
151
+ b : ndarray or sparse matrix
152
+ The matrix or vector representing the right hand side of the equation.
153
+ If a vector, b.shape must be (n,) or (n, 1).
154
+ permc_spec : str, optional
155
+ How to permute the columns of the matrix for sparsity preservation.
156
+ (default: 'COLAMD')
157
+
158
+ - ``NATURAL``: natural ordering.
159
+ - ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
160
+ - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
161
+ - ``COLAMD``: approximate minimum degree column ordering [1]_, [2]_.
162
+
163
+ use_umfpack : bool, optional
164
+ if True (default) then use UMFPACK for the solution [3]_, [4]_, [5]_,
165
+ [6]_ . This is only referenced if b is a vector and
166
+ ``scikits.umfpack`` is installed.
167
+
168
+ Returns
169
+ -------
170
+ x : ndarray or sparse matrix
171
+ the solution of the sparse linear equation.
172
+ If b is a vector, then x is a vector of size A.shape[1]
173
+ If b is a matrix, then x is a matrix of size (A.shape[1], b.shape[1])
174
+
175
+ Notes
176
+ -----
177
+ For solving the matrix expression AX = B, this solver assumes the resulting
178
+ matrix X is sparse, as is often the case for very sparse inputs. If the
179
+ resulting X is dense, the construction of this sparse result will be
180
+ relatively expensive. In that case, consider converting A to a dense
181
+ matrix and using scipy.linalg.solve or its variants.
182
+
183
+ References
184
+ ----------
185
+ .. [1] T. A. Davis, J. R. Gilbert, S. Larimore, E. Ng, Algorithm 836:
186
+ COLAMD, an approximate column minimum degree ordering algorithm,
187
+ ACM Trans. on Mathematical Software, 30(3), 2004, pp. 377--380.
188
+ :doi:`10.1145/1024074.1024080`
189
+
190
+ .. [2] T. A. Davis, J. R. Gilbert, S. Larimore, E. Ng, A column approximate
191
+ minimum degree ordering algorithm, ACM Trans. on Mathematical
192
+ Software, 30(3), 2004, pp. 353--376. :doi:`10.1145/1024074.1024079`
193
+
194
+ .. [3] T. A. Davis, Algorithm 832: UMFPACK - an unsymmetric-pattern
195
+ multifrontal method with a column pre-ordering strategy, ACM
196
+ Trans. on Mathematical Software, 30(2), 2004, pp. 196--199.
197
+ https://dl.acm.org/doi/abs/10.1145/992200.992206
198
+
199
+ .. [4] T. A. Davis, A column pre-ordering strategy for the
200
+ unsymmetric-pattern multifrontal method, ACM Trans.
201
+ on Mathematical Software, 30(2), 2004, pp. 165--195.
202
+ https://dl.acm.org/doi/abs/10.1145/992200.992205
203
+
204
+ .. [5] T. A. Davis and I. S. Duff, A combined unifrontal/multifrontal
205
+ method for unsymmetric sparse matrices, ACM Trans. on
206
+ Mathematical Software, 25(1), 1999, pp. 1--19.
207
+ https://doi.org/10.1145/305658.287640
208
+
209
+ .. [6] T. A. Davis and I. S. Duff, An unsymmetric-pattern multifrontal
210
+ method for sparse LU factorization, SIAM J. Matrix Analysis and
211
+ Computations, 18(1), 1997, pp. 140--158.
212
+ https://doi.org/10.1137/S0895479894246905T.
213
+
214
+
215
+ Examples
216
+ --------
217
+ >>> import numpy as np
218
+ >>> from scipy.sparse import csc_matrix
219
+ >>> from scipy.sparse.linalg import spsolve
220
+ >>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
221
+ >>> B = csc_matrix([[2, 0], [-1, 0], [2, 0]], dtype=float)
222
+ >>> x = spsolve(A, B)
223
+ >>> np.allclose(A.dot(x).toarray(), B.toarray())
224
+ True
225
+ """
226
+ is_pydata_sparse = is_pydata_spmatrix(b)
227
+ pydata_sparse_cls = b.__class__ if is_pydata_sparse else None
228
+ A = convert_pydata_sparse_to_scipy(A)
229
+ b = convert_pydata_sparse_to_scipy(b)
230
+
231
+ if not (issparse(A) and A.format in ("csc", "csr")):
232
+ A = csc_matrix(A)
233
+ warn('spsolve requires A be CSC or CSR matrix format',
234
+ SparseEfficiencyWarning, stacklevel=2)
235
+
236
+ # b is a vector only if b have shape (n,) or (n, 1)
237
+ b_is_sparse = issparse(b)
238
+ if not b_is_sparse:
239
+ b = asarray(b)
240
+ b_is_vector = ((b.ndim == 1) or (b.ndim == 2 and b.shape[1] == 1))
241
+
242
+ # sum duplicates for non-canonical format
243
+ A.sum_duplicates()
244
+ A = A._asfptype() # upcast to a floating point format
245
+ result_dtype = np.promote_types(A.dtype, b.dtype)
246
+ if A.dtype != result_dtype:
247
+ A = A.astype(result_dtype)
248
+ if b.dtype != result_dtype:
249
+ b = b.astype(result_dtype)
250
+
251
+ # validate input shapes
252
+ M, N = A.shape
253
+ if (M != N):
254
+ raise ValueError(f"matrix must be square (has shape {(M, N)})")
255
+
256
+ if M != b.shape[0]:
257
+ raise ValueError(f"matrix - rhs dimension mismatch ({A.shape} - {b.shape[0]})")
258
+
259
+ use_umfpack = use_umfpack and useUmfpack
260
+
261
+ if b_is_vector and use_umfpack:
262
+ if b_is_sparse:
263
+ b_vec = b.toarray()
264
+ else:
265
+ b_vec = b
266
+ b_vec = asarray(b_vec, dtype=A.dtype).ravel()
267
+
268
+ if noScikit:
269
+ raise RuntimeError('Scikits.umfpack not installed.')
270
+
271
+ if A.dtype.char not in 'dD':
272
+ raise ValueError("convert matrix data to double, please, using"
273
+ " .astype(), or set linsolve.useUmfpack = False")
274
+
275
+ umf_family, A = _get_umf_family(A)
276
+ umf = umfpack.UmfpackContext(umf_family)
277
+ x = umf.linsolve(umfpack.UMFPACK_A, A, b_vec,
278
+ autoTranspose=True)
279
+ else:
280
+ if b_is_vector and b_is_sparse:
281
+ b = b.toarray()
282
+ b_is_sparse = False
283
+
284
+ if not b_is_sparse:
285
+ if A.format == "csc":
286
+ flag = 1 # CSC format
287
+ else:
288
+ flag = 0 # CSR format
289
+
290
+ indices = A.indices.astype(np.intc, copy=False)
291
+ indptr = A.indptr.astype(np.intc, copy=False)
292
+ options = dict(ColPerm=permc_spec)
293
+ x, info = _superlu.gssv(N, A.nnz, A.data, indices, indptr,
294
+ b, flag, options=options)
295
+ if info != 0:
296
+ warn("Matrix is exactly singular", MatrixRankWarning, stacklevel=2)
297
+ x.fill(np.nan)
298
+ if b_is_vector:
299
+ x = x.ravel()
300
+ else:
301
+ # b is sparse
302
+ Afactsolve = factorized(A)
303
+
304
+ if not (b.format == "csc" or is_pydata_spmatrix(b)):
305
+ warn('spsolve is more efficient when sparse b '
306
+ 'is in the CSC matrix format',
307
+ SparseEfficiencyWarning, stacklevel=2)
308
+ b = csc_matrix(b)
309
+
310
+ # Create a sparse output matrix by repeatedly applying
311
+ # the sparse factorization to solve columns of b.
312
+ data_segs = []
313
+ row_segs = []
314
+ col_segs = []
315
+ for j in range(b.shape[1]):
316
+ # TODO: replace this with
317
+ # bj = b[:, j].toarray().ravel()
318
+ # once 1D sparse arrays are supported.
319
+ # That is a slightly faster code path.
320
+ bj = b[:, [j]].toarray().ravel()
321
+ xj = Afactsolve(bj)
322
+ w = np.flatnonzero(xj)
323
+ segment_length = w.shape[0]
324
+ row_segs.append(w)
325
+ col_segs.append(np.full(segment_length, j, dtype=int))
326
+ data_segs.append(np.asarray(xj[w], dtype=A.dtype))
327
+ sparse_data = np.concatenate(data_segs)
328
+ sparse_row = np.concatenate(row_segs)
329
+ sparse_col = np.concatenate(col_segs)
330
+ x = A.__class__((sparse_data, (sparse_row, sparse_col)),
331
+ shape=b.shape, dtype=A.dtype)
332
+
333
+ if is_pydata_sparse:
334
+ x = pydata_sparse_cls.from_scipy_sparse(x)
335
+
336
+ return x
337
+
338
+
339
+ def splu(A, permc_spec=None, diag_pivot_thresh=None,
340
+ relax=None, panel_size=None, options=dict()):
341
+ """
342
+ Compute the LU decomposition of a sparse, square matrix.
343
+
344
+ Parameters
345
+ ----------
346
+ A : sparse matrix
347
+ Sparse matrix to factorize. Most efficient when provided in CSC
348
+ format. Other formats will be converted to CSC before factorization.
349
+ permc_spec : str, optional
350
+ How to permute the columns of the matrix for sparsity preservation.
351
+ (default: 'COLAMD')
352
+
353
+ - ``NATURAL``: natural ordering.
354
+ - ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
355
+ - ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
356
+ - ``COLAMD``: approximate minimum degree column ordering
357
+
358
+ diag_pivot_thresh : float, optional
359
+ Threshold used for a diagonal entry to be an acceptable pivot.
360
+ See SuperLU user's guide for details [1]_
361
+ relax : int, optional
362
+ Expert option for customizing the degree of relaxing supernodes.
363
+ See SuperLU user's guide for details [1]_
364
+ panel_size : int, optional
365
+ Expert option for customizing the panel size.
366
+ See SuperLU user's guide for details [1]_
367
+ options : dict, optional
368
+ Dictionary containing additional expert options to SuperLU.
369
+ See SuperLU user guide [1]_ (section 2.4 on the 'Options' argument)
370
+ for more details. For example, you can specify
371
+ ``options=dict(Equil=False, IterRefine='SINGLE'))``
372
+ to turn equilibration off and perform a single iterative refinement.
373
+
374
+ Returns
375
+ -------
376
+ invA : scipy.sparse.linalg.SuperLU
377
+ Object, which has a ``solve`` method.
378
+
379
+ See also
380
+ --------
381
+ spilu : incomplete LU decomposition
382
+
383
+ Notes
384
+ -----
385
+ This function uses the SuperLU library.
386
+
387
+ References
388
+ ----------
389
+ .. [1] SuperLU https://portal.nersc.gov/project/sparse/superlu/
390
+
391
+ Examples
392
+ --------
393
+ >>> import numpy as np
394
+ >>> from scipy.sparse import csc_matrix
395
+ >>> from scipy.sparse.linalg import splu
396
+ >>> A = csc_matrix([[1., 0., 0.], [5., 0., 2.], [0., -1., 0.]], dtype=float)
397
+ >>> B = splu(A)
398
+ >>> x = np.array([1., 2., 3.], dtype=float)
399
+ >>> B.solve(x)
400
+ array([ 1. , -3. , -1.5])
401
+ >>> A.dot(B.solve(x))
402
+ array([ 1., 2., 3.])
403
+ >>> B.solve(A.dot(x))
404
+ array([ 1., 2., 3.])
405
+ """
406
+
407
+ if is_pydata_spmatrix(A):
408
+ def csc_construct_func(*a, cls=type(A)):
409
+ return cls.from_scipy_sparse(csc_matrix(*a))
410
+ A = A.to_scipy_sparse().tocsc()
411
+ else:
412
+ csc_construct_func = csc_matrix
413
+
414
+ if not (issparse(A) and A.format == "csc"):
415
+ A = csc_matrix(A)
416
+ warn('splu converted its input to CSC format',
417
+ SparseEfficiencyWarning, stacklevel=2)
418
+
419
+ # sum duplicates for non-canonical format
420
+ A.sum_duplicates()
421
+ A = A._asfptype() # upcast to a floating point format
422
+
423
+ M, N = A.shape
424
+ if (M != N):
425
+ raise ValueError("can only factor square matrices") # is this true?
426
+
427
+ indices, indptr = _safe_downcast_indices(A)
428
+
429
+ _options = dict(DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec,
430
+ PanelSize=panel_size, Relax=relax)
431
+ if options is not None:
432
+ _options.update(options)
433
+
434
+ # Ensure that no column permutations are applied
435
+ if (_options["ColPerm"] == "NATURAL"):
436
+ _options["SymmetricMode"] = True
437
+
438
+ return _superlu.gstrf(N, A.nnz, A.data, indices, indptr,
439
+ csc_construct_func=csc_construct_func,
440
+ ilu=False, options=_options)
441
+
442
+
443
+ def spilu(A, drop_tol=None, fill_factor=None, drop_rule=None, permc_spec=None,
444
+ diag_pivot_thresh=None, relax=None, panel_size=None, options=None):
445
+ """
446
+ Compute an incomplete LU decomposition for a sparse, square matrix.
447
+
448
+ The resulting object is an approximation to the inverse of `A`.
449
+
450
+ Parameters
451
+ ----------
452
+ A : (N, N) array_like
453
+ Sparse matrix to factorize. Most efficient when provided in CSC format.
454
+ Other formats will be converted to CSC before factorization.
455
+ drop_tol : float, optional
456
+ Drop tolerance (0 <= tol <= 1) for an incomplete LU decomposition.
457
+ (default: 1e-4)
458
+ fill_factor : float, optional
459
+ Specifies the fill ratio upper bound (>= 1.0) for ILU. (default: 10)
460
+ drop_rule : str, optional
461
+ Comma-separated string of drop rules to use.
462
+ Available rules: ``basic``, ``prows``, ``column``, ``area``,
463
+ ``secondary``, ``dynamic``, ``interp``. (Default: ``basic,area``)
464
+
465
+ See SuperLU documentation for details.
466
+
467
+ Remaining other options
468
+ Same as for `splu`
469
+
470
+ Returns
471
+ -------
472
+ invA_approx : scipy.sparse.linalg.SuperLU
473
+ Object, which has a ``solve`` method.
474
+
475
+ See also
476
+ --------
477
+ splu : complete LU decomposition
478
+
479
+ Notes
480
+ -----
481
+ To improve the better approximation to the inverse, you may need to
482
+ increase `fill_factor` AND decrease `drop_tol`.
483
+
484
+ This function uses the SuperLU library.
485
+
486
+ Examples
487
+ --------
488
+ >>> import numpy as np
489
+ >>> from scipy.sparse import csc_matrix
490
+ >>> from scipy.sparse.linalg import spilu
491
+ >>> A = csc_matrix([[1., 0., 0.], [5., 0., 2.], [0., -1., 0.]], dtype=float)
492
+ >>> B = spilu(A)
493
+ >>> x = np.array([1., 2., 3.], dtype=float)
494
+ >>> B.solve(x)
495
+ array([ 1. , -3. , -1.5])
496
+ >>> A.dot(B.solve(x))
497
+ array([ 1., 2., 3.])
498
+ >>> B.solve(A.dot(x))
499
+ array([ 1., 2., 3.])
500
+ """
501
+
502
+ if is_pydata_spmatrix(A):
503
+ def csc_construct_func(*a, cls=type(A)):
504
+ return cls.from_scipy_sparse(csc_matrix(*a))
505
+ A = A.to_scipy_sparse().tocsc()
506
+ else:
507
+ csc_construct_func = csc_matrix
508
+
509
+ if not (issparse(A) and A.format == "csc"):
510
+ A = csc_matrix(A)
511
+ warn('spilu converted its input to CSC format',
512
+ SparseEfficiencyWarning, stacklevel=2)
513
+
514
+ # sum duplicates for non-canonical format
515
+ A.sum_duplicates()
516
+ A = A._asfptype() # upcast to a floating point format
517
+
518
+ M, N = A.shape
519
+ if (M != N):
520
+ raise ValueError("can only factor square matrices") # is this true?
521
+
522
+ indices, indptr = _safe_downcast_indices(A)
523
+
524
+ _options = dict(ILU_DropRule=drop_rule, ILU_DropTol=drop_tol,
525
+ ILU_FillFactor=fill_factor,
526
+ DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec,
527
+ PanelSize=panel_size, Relax=relax)
528
+ if options is not None:
529
+ _options.update(options)
530
+
531
+ # Ensure that no column permutations are applied
532
+ if (_options["ColPerm"] == "NATURAL"):
533
+ _options["SymmetricMode"] = True
534
+
535
+ return _superlu.gstrf(N, A.nnz, A.data, indices, indptr,
536
+ csc_construct_func=csc_construct_func,
537
+ ilu=True, options=_options)
538
+
539
+
540
+ def factorized(A):
541
+ """
542
+ Return a function for solving a sparse linear system, with A pre-factorized.
543
+
544
+ Parameters
545
+ ----------
546
+ A : (N, N) array_like
547
+ Input. A in CSC format is most efficient. A CSR format matrix will
548
+ be converted to CSC before factorization.
549
+
550
+ Returns
551
+ -------
552
+ solve : callable
553
+ To solve the linear system of equations given in `A`, the `solve`
554
+ callable should be passed an ndarray of shape (N,).
555
+
556
+ Examples
557
+ --------
558
+ >>> import numpy as np
559
+ >>> from scipy.sparse.linalg import factorized
560
+ >>> from scipy.sparse import csc_matrix
561
+ >>> A = np.array([[ 3. , 2. , -1. ],
562
+ ... [ 2. , -2. , 4. ],
563
+ ... [-1. , 0.5, -1. ]])
564
+ >>> solve = factorized(csc_matrix(A)) # Makes LU decomposition.
565
+ >>> rhs1 = np.array([1, -2, 0])
566
+ >>> solve(rhs1) # Uses the LU factors.
567
+ array([ 1., -2., -2.])
568
+
569
+ """
570
+ if is_pydata_spmatrix(A):
571
+ A = A.to_scipy_sparse().tocsc()
572
+
573
+ if useUmfpack:
574
+ if noScikit:
575
+ raise RuntimeError('Scikits.umfpack not installed.')
576
+
577
+ if not (issparse(A) and A.format == "csc"):
578
+ A = csc_matrix(A)
579
+ warn('splu converted its input to CSC format',
580
+ SparseEfficiencyWarning, stacklevel=2)
581
+
582
+ A = A._asfptype() # upcast to a floating point format
583
+
584
+ if A.dtype.char not in 'dD':
585
+ raise ValueError("convert matrix data to double, please, using"
586
+ " .astype(), or set linsolve.useUmfpack = False")
587
+
588
+ umf_family, A = _get_umf_family(A)
589
+ umf = umfpack.UmfpackContext(umf_family)
590
+
591
+ # Make LU decomposition.
592
+ umf.numeric(A)
593
+
594
+ def solve(b):
595
+ with np.errstate(divide="ignore", invalid="ignore"):
596
+ # Ignoring warnings with numpy >= 1.23.0, see gh-16523
597
+ result = umf.solve(umfpack.UMFPACK_A, A, b, autoTranspose=True)
598
+
599
+ return result
600
+
601
+ return solve
602
+ else:
603
+ return splu(A).solve
604
+
605
+
606
+ def spsolve_triangular(A, b, lower=True, overwrite_A=False, overwrite_b=False,
607
+ unit_diagonal=False):
608
+ """
609
+ Solve the equation ``A x = b`` for `x`, assuming A is a triangular matrix.
610
+
611
+ Parameters
612
+ ----------
613
+ A : (M, M) sparse matrix
614
+ A sparse square triangular matrix. Should be in CSR format.
615
+ b : (M,) or (M, N) array_like
616
+ Right-hand side matrix in ``A x = b``
617
+ lower : bool, optional
618
+ Whether `A` is a lower or upper triangular matrix.
619
+ Default is lower triangular matrix.
620
+ overwrite_A : bool, optional
621
+ Allow changing `A`. The indices of `A` are going to be sorted and zero
622
+ entries are going to be removed.
623
+ Enabling gives a performance gain. Default is False.
624
+ overwrite_b : bool, optional
625
+ Allow overwriting data in `b`.
626
+ Enabling gives a performance gain. Default is False.
627
+ If `overwrite_b` is True, it should be ensured that
628
+ `b` has an appropriate dtype to be able to store the result.
629
+ unit_diagonal : bool, optional
630
+ If True, diagonal elements of `a` are assumed to be 1 and will not be
631
+ referenced.
632
+
633
+ .. versionadded:: 1.4.0
634
+
635
+ Returns
636
+ -------
637
+ x : (M,) or (M, N) ndarray
638
+ Solution to the system ``A x = b``. Shape of return matches shape
639
+ of `b`.
640
+
641
+ Raises
642
+ ------
643
+ LinAlgError
644
+ If `A` is singular or not triangular.
645
+ ValueError
646
+ If shape of `A` or shape of `b` do not match the requirements.
647
+
648
+ Notes
649
+ -----
650
+ .. versionadded:: 0.19.0
651
+
652
+ Examples
653
+ --------
654
+ >>> import numpy as np
655
+ >>> from scipy.sparse import csr_matrix
656
+ >>> from scipy.sparse.linalg import spsolve_triangular
657
+ >>> A = csr_matrix([[3, 0, 0], [1, -1, 0], [2, 0, 1]], dtype=float)
658
+ >>> B = np.array([[2, 0], [-1, 0], [2, 0]], dtype=float)
659
+ >>> x = spsolve_triangular(A, B)
660
+ >>> np.allclose(A.dot(x), B)
661
+ True
662
+ """
663
+
664
+ if is_pydata_spmatrix(A):
665
+ A = A.to_scipy_sparse().tocsr()
666
+
667
+ # Check the input for correct type and format.
668
+ if not (issparse(A) and A.format == "csr"):
669
+ warn('CSR matrix format is required. Converting to CSR matrix.',
670
+ SparseEfficiencyWarning, stacklevel=2)
671
+ A = csr_matrix(A)
672
+ elif not overwrite_A:
673
+ A = A.copy()
674
+
675
+ if A.shape[0] != A.shape[1]:
676
+ raise ValueError(
677
+ f'A must be a square matrix but its shape is {A.shape}.')
678
+
679
+ # sum duplicates for non-canonical format
680
+ A.sum_duplicates()
681
+
682
+ b = np.asanyarray(b)
683
+
684
+ if b.ndim not in [1, 2]:
685
+ raise ValueError(
686
+ f'b must have 1 or 2 dims but its shape is {b.shape}.')
687
+ if A.shape[0] != b.shape[0]:
688
+ raise ValueError(
689
+ 'The size of the dimensions of A must be equal to '
690
+ 'the size of the first dimension of b but the shape of A is '
691
+ f'{A.shape} and the shape of b is {b.shape}.'
692
+ )
693
+
694
+ # Init x as (a copy of) b.
695
+ x_dtype = np.result_type(A.data, b, np.float64)
696
+ if overwrite_b:
697
+ if np.can_cast(b.dtype, x_dtype, casting='same_kind'):
698
+ x = b
699
+ else:
700
+ raise ValueError(
701
+ f'Cannot overwrite b (dtype {b.dtype}) with result '
702
+ f'of type {x_dtype}.'
703
+ )
704
+ else:
705
+ x = b.astype(x_dtype, copy=True)
706
+
707
+ # Choose forward or backward order.
708
+ if lower:
709
+ row_indices = range(len(b))
710
+ else:
711
+ row_indices = range(len(b) - 1, -1, -1)
712
+
713
+ # Fill x iteratively.
714
+ for i in row_indices:
715
+
716
+ # Get indices for i-th row.
717
+ indptr_start = A.indptr[i]
718
+ indptr_stop = A.indptr[i + 1]
719
+
720
+ if lower:
721
+ A_diagonal_index_row_i = indptr_stop - 1
722
+ A_off_diagonal_indices_row_i = slice(indptr_start, indptr_stop - 1)
723
+ else:
724
+ A_diagonal_index_row_i = indptr_start
725
+ A_off_diagonal_indices_row_i = slice(indptr_start + 1, indptr_stop)
726
+
727
+ # Check regularity and triangularity of A.
728
+ if not unit_diagonal and (indptr_stop <= indptr_start
729
+ or A.indices[A_diagonal_index_row_i] < i):
730
+ raise LinAlgError(
731
+ f'A is singular: diagonal {i} is zero.')
732
+ if not unit_diagonal and A.indices[A_diagonal_index_row_i] > i:
733
+ raise LinAlgError(
734
+ 'A is not triangular: A[{}, {}] is nonzero.'
735
+ ''.format(i, A.indices[A_diagonal_index_row_i]))
736
+
737
+ # Incorporate off-diagonal entries.
738
+ A_column_indices_in_row_i = A.indices[A_off_diagonal_indices_row_i]
739
+ A_values_in_row_i = A.data[A_off_diagonal_indices_row_i]
740
+ x[i] -= np.dot(x[A_column_indices_in_row_i].T, A_values_in_row_i)
741
+
742
+ # Compute i-th entry of x.
743
+ if not unit_diagonal:
744
+ x[i] /= A.data[A_diagonal_index_row_i]
745
+
746
+ return x
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/tests/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (198 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/tests/__pycache__/test_linsolve.cpython-310.pyc ADDED
Binary file (26 kB). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/tests/test_linsolve.py ADDED
@@ -0,0 +1,805 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import threading
3
+
4
+ import numpy as np
5
+ from numpy import array, finfo, arange, eye, all, unique, ones, dot
6
+ import numpy.random as random
7
+ from numpy.testing import (
8
+ assert_array_almost_equal, assert_almost_equal,
9
+ assert_equal, assert_array_equal, assert_, assert_allclose,
10
+ assert_warns, suppress_warnings)
11
+ import pytest
12
+ from pytest import raises as assert_raises
13
+
14
+ import scipy.linalg
15
+ from scipy.linalg import norm, inv
16
+ from scipy.sparse import (spdiags, SparseEfficiencyWarning, csc_matrix,
17
+ csr_matrix, identity, issparse, dok_matrix, lil_matrix, bsr_matrix)
18
+ from scipy.sparse.linalg import SuperLU
19
+ from scipy.sparse.linalg._dsolve import (spsolve, use_solver, splu, spilu,
20
+ MatrixRankWarning, _superlu, spsolve_triangular, factorized)
21
+ import scipy.sparse
22
+
23
+ from scipy._lib._testutils import check_free_memory
24
+ from scipy._lib._util import ComplexWarning
25
+
26
+
27
+ sup_sparse_efficiency = suppress_warnings()
28
+ sup_sparse_efficiency.filter(SparseEfficiencyWarning)
29
+
30
+ # scikits.umfpack is not a SciPy dependency but it is optionally used in
31
+ # dsolve, so check whether it's available
32
+ try:
33
+ import scikits.umfpack as umfpack
34
+ has_umfpack = True
35
+ except ImportError:
36
+ has_umfpack = False
37
+
38
+ def toarray(a):
39
+ if issparse(a):
40
+ return a.toarray()
41
+ else:
42
+ return a
43
+
44
+
45
+ def setup_bug_8278():
46
+ N = 2 ** 6
47
+ h = 1/N
48
+ Ah1D = scipy.sparse.diags([-1, 2, -1], [-1, 0, 1],
49
+ shape=(N-1, N-1))/(h**2)
50
+ eyeN = scipy.sparse.eye(N - 1)
51
+ A = (scipy.sparse.kron(eyeN, scipy.sparse.kron(eyeN, Ah1D))
52
+ + scipy.sparse.kron(eyeN, scipy.sparse.kron(Ah1D, eyeN))
53
+ + scipy.sparse.kron(Ah1D, scipy.sparse.kron(eyeN, eyeN)))
54
+ b = np.random.rand((N-1)**3)
55
+ return A, b
56
+
57
+
58
+ class TestFactorized:
59
+ def setup_method(self):
60
+ n = 5
61
+ d = arange(n) + 1
62
+ self.n = n
63
+ self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n).tocsc()
64
+ random.seed(1234)
65
+
66
+ def _check_singular(self):
67
+ A = csc_matrix((5,5), dtype='d')
68
+ b = ones(5)
69
+ assert_array_almost_equal(0. * b, factorized(A)(b))
70
+
71
+ def _check_non_singular(self):
72
+ # Make a diagonal dominant, to make sure it is not singular
73
+ n = 5
74
+ a = csc_matrix(random.rand(n, n))
75
+ b = ones(n)
76
+
77
+ expected = splu(a).solve(b)
78
+ assert_array_almost_equal(factorized(a)(b), expected)
79
+
80
+ def test_singular_without_umfpack(self):
81
+ use_solver(useUmfpack=False)
82
+ with assert_raises(RuntimeError, match="Factor is exactly singular"):
83
+ self._check_singular()
84
+
85
+ @pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
86
+ def test_singular_with_umfpack(self):
87
+ use_solver(useUmfpack=True)
88
+ with suppress_warnings() as sup:
89
+ sup.filter(RuntimeWarning, "divide by zero encountered in double_scalars")
90
+ assert_warns(umfpack.UmfpackWarning, self._check_singular)
91
+
92
+ def test_non_singular_without_umfpack(self):
93
+ use_solver(useUmfpack=False)
94
+ self._check_non_singular()
95
+
96
+ @pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
97
+ def test_non_singular_with_umfpack(self):
98
+ use_solver(useUmfpack=True)
99
+ self._check_non_singular()
100
+
101
+ def test_cannot_factorize_nonsquare_matrix_without_umfpack(self):
102
+ use_solver(useUmfpack=False)
103
+ msg = "can only factor square matrices"
104
+ with assert_raises(ValueError, match=msg):
105
+ factorized(self.A[:, :4])
106
+
107
+ @pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
108
+ def test_factorizes_nonsquare_matrix_with_umfpack(self):
109
+ use_solver(useUmfpack=True)
110
+ # does not raise
111
+ factorized(self.A[:,:4])
112
+
113
+ def test_call_with_incorrectly_sized_matrix_without_umfpack(self):
114
+ use_solver(useUmfpack=False)
115
+ solve = factorized(self.A)
116
+ b = random.rand(4)
117
+ B = random.rand(4, 3)
118
+ BB = random.rand(self.n, 3, 9)
119
+
120
+ with assert_raises(ValueError, match="is of incompatible size"):
121
+ solve(b)
122
+ with assert_raises(ValueError, match="is of incompatible size"):
123
+ solve(B)
124
+ with assert_raises(ValueError,
125
+ match="object too deep for desired array"):
126
+ solve(BB)
127
+
128
+ @pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
129
+ def test_call_with_incorrectly_sized_matrix_with_umfpack(self):
130
+ use_solver(useUmfpack=True)
131
+ solve = factorized(self.A)
132
+ b = random.rand(4)
133
+ B = random.rand(4, 3)
134
+ BB = random.rand(self.n, 3, 9)
135
+
136
+ # does not raise
137
+ solve(b)
138
+ msg = "object too deep for desired array"
139
+ with assert_raises(ValueError, match=msg):
140
+ solve(B)
141
+ with assert_raises(ValueError, match=msg):
142
+ solve(BB)
143
+
144
+ def test_call_with_cast_to_complex_without_umfpack(self):
145
+ use_solver(useUmfpack=False)
146
+ solve = factorized(self.A)
147
+ b = random.rand(4)
148
+ for t in [np.complex64, np.complex128]:
149
+ with assert_raises(TypeError, match="Cannot cast array data"):
150
+ solve(b.astype(t))
151
+
152
+ @pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
153
+ def test_call_with_cast_to_complex_with_umfpack(self):
154
+ use_solver(useUmfpack=True)
155
+ solve = factorized(self.A)
156
+ b = random.rand(4)
157
+ for t in [np.complex64, np.complex128]:
158
+ assert_warns(ComplexWarning, solve, b.astype(t))
159
+
160
+ @pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
161
+ def test_assume_sorted_indices_flag(self):
162
+ # a sparse matrix with unsorted indices
163
+ unsorted_inds = np.array([2, 0, 1, 0])
164
+ data = np.array([10, 16, 5, 0.4])
165
+ indptr = np.array([0, 1, 2, 4])
166
+ A = csc_matrix((data, unsorted_inds, indptr), (3, 3))
167
+ b = ones(3)
168
+
169
+ # should raise when incorrectly assuming indices are sorted
170
+ use_solver(useUmfpack=True, assumeSortedIndices=True)
171
+ with assert_raises(RuntimeError,
172
+ match="UMFPACK_ERROR_invalid_matrix"):
173
+ factorized(A)
174
+
175
+ # should sort indices and succeed when not assuming indices are sorted
176
+ use_solver(useUmfpack=True, assumeSortedIndices=False)
177
+ expected = splu(A.copy()).solve(b)
178
+
179
+ assert_equal(A.has_sorted_indices, 0)
180
+ assert_array_almost_equal(factorized(A)(b), expected)
181
+
182
+ @pytest.mark.slow
183
+ @pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
184
+ def test_bug_8278(self):
185
+ check_free_memory(8000)
186
+ use_solver(useUmfpack=True)
187
+ A, b = setup_bug_8278()
188
+ A = A.tocsc()
189
+ f = factorized(A)
190
+ x = f(b)
191
+ assert_array_almost_equal(A @ x, b)
192
+
193
+
194
+ class TestLinsolve:
195
+ def setup_method(self):
196
+ use_solver(useUmfpack=False)
197
+
198
+ def test_singular(self):
199
+ A = csc_matrix((5,5), dtype='d')
200
+ b = array([1, 2, 3, 4, 5],dtype='d')
201
+ with suppress_warnings() as sup:
202
+ sup.filter(MatrixRankWarning, "Matrix is exactly singular")
203
+ x = spsolve(A, b)
204
+ assert_(not np.isfinite(x).any())
205
+
206
+ def test_singular_gh_3312(self):
207
+ # "Bad" test case that leads SuperLU to call LAPACK with invalid
208
+ # arguments. Check that it fails moderately gracefully.
209
+ ij = np.array([(17, 0), (17, 6), (17, 12), (10, 13)], dtype=np.int32)
210
+ v = np.array([0.284213, 0.94933781, 0.15767017, 0.38797296])
211
+ A = csc_matrix((v, ij.T), shape=(20, 20))
212
+ b = np.arange(20)
213
+
214
+ try:
215
+ # should either raise a runtime error or return value
216
+ # appropriate for singular input (which yields the warning)
217
+ with suppress_warnings() as sup:
218
+ sup.filter(MatrixRankWarning, "Matrix is exactly singular")
219
+ x = spsolve(A, b)
220
+ assert not np.isfinite(x).any()
221
+ except RuntimeError:
222
+ pass
223
+
224
+ @pytest.mark.parametrize('format', ['csc', 'csr'])
225
+ @pytest.mark.parametrize('idx_dtype', [np.int32, np.int64])
226
+ def test_twodiags(self, format: str, idx_dtype: np.dtype):
227
+ A = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5,
228
+ format=format)
229
+ b = array([1, 2, 3, 4, 5])
230
+
231
+ # condition number of A
232
+ cond_A = norm(A.toarray(), 2) * norm(inv(A.toarray()), 2)
233
+
234
+ for t in ['f','d','F','D']:
235
+ eps = finfo(t).eps # floating point epsilon
236
+ b = b.astype(t)
237
+ Asp = A.astype(t)
238
+ Asp.indices = Asp.indices.astype(idx_dtype, copy=False)
239
+ Asp.indptr = Asp.indptr.astype(idx_dtype, copy=False)
240
+
241
+ x = spsolve(Asp, b)
242
+ assert_(norm(b - Asp@x) < 10 * cond_A * eps)
243
+
244
+ def test_bvector_smoketest(self):
245
+ Adense = array([[0., 1., 1.],
246
+ [1., 0., 1.],
247
+ [0., 0., 1.]])
248
+ As = csc_matrix(Adense)
249
+ random.seed(1234)
250
+ x = random.randn(3)
251
+ b = As@x
252
+ x2 = spsolve(As, b)
253
+
254
+ assert_array_almost_equal(x, x2)
255
+
256
+ def test_bmatrix_smoketest(self):
257
+ Adense = array([[0., 1., 1.],
258
+ [1., 0., 1.],
259
+ [0., 0., 1.]])
260
+ As = csc_matrix(Adense)
261
+ random.seed(1234)
262
+ x = random.randn(3, 4)
263
+ Bdense = As.dot(x)
264
+ Bs = csc_matrix(Bdense)
265
+ x2 = spsolve(As, Bs)
266
+ assert_array_almost_equal(x, x2.toarray())
267
+
268
+ @sup_sparse_efficiency
269
+ def test_non_square(self):
270
+ # A is not square.
271
+ A = ones((3, 4))
272
+ b = ones((4, 1))
273
+ assert_raises(ValueError, spsolve, A, b)
274
+ # A2 and b2 have incompatible shapes.
275
+ A2 = csc_matrix(eye(3))
276
+ b2 = array([1.0, 2.0])
277
+ assert_raises(ValueError, spsolve, A2, b2)
278
+
279
+ @sup_sparse_efficiency
280
+ def test_example_comparison(self):
281
+ row = array([0,0,1,2,2,2])
282
+ col = array([0,2,2,0,1,2])
283
+ data = array([1,2,3,-4,5,6])
284
+ sM = csr_matrix((data,(row,col)), shape=(3,3), dtype=float)
285
+ M = sM.toarray()
286
+
287
+ row = array([0,0,1,1,0,0])
288
+ col = array([0,2,1,1,0,0])
289
+ data = array([1,1,1,1,1,1])
290
+ sN = csr_matrix((data, (row,col)), shape=(3,3), dtype=float)
291
+ N = sN.toarray()
292
+
293
+ sX = spsolve(sM, sN)
294
+ X = scipy.linalg.solve(M, N)
295
+
296
+ assert_array_almost_equal(X, sX.toarray())
297
+
298
+ @sup_sparse_efficiency
299
+ @pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
300
+ def test_shape_compatibility(self):
301
+ use_solver(useUmfpack=True)
302
+ A = csc_matrix([[1., 0], [0, 2]])
303
+ bs = [
304
+ [1, 6],
305
+ array([1, 6]),
306
+ [[1], [6]],
307
+ array([[1], [6]]),
308
+ csc_matrix([[1], [6]]),
309
+ csr_matrix([[1], [6]]),
310
+ dok_matrix([[1], [6]]),
311
+ bsr_matrix([[1], [6]]),
312
+ array([[1., 2., 3.], [6., 8., 10.]]),
313
+ csc_matrix([[1., 2., 3.], [6., 8., 10.]]),
314
+ csr_matrix([[1., 2., 3.], [6., 8., 10.]]),
315
+ dok_matrix([[1., 2., 3.], [6., 8., 10.]]),
316
+ bsr_matrix([[1., 2., 3.], [6., 8., 10.]]),
317
+ ]
318
+
319
+ for b in bs:
320
+ x = np.linalg.solve(A.toarray(), toarray(b))
321
+ for spmattype in [csc_matrix, csr_matrix, dok_matrix, lil_matrix]:
322
+ x1 = spsolve(spmattype(A), b, use_umfpack=True)
323
+ x2 = spsolve(spmattype(A), b, use_umfpack=False)
324
+
325
+ # check solution
326
+ if x.ndim == 2 and x.shape[1] == 1:
327
+ # interprets also these as "vectors"
328
+ x = x.ravel()
329
+
330
+ assert_array_almost_equal(toarray(x1), x,
331
+ err_msg=repr((b, spmattype, 1)))
332
+ assert_array_almost_equal(toarray(x2), x,
333
+ err_msg=repr((b, spmattype, 2)))
334
+
335
+ # dense vs. sparse output ("vectors" are always dense)
336
+ if issparse(b) and x.ndim > 1:
337
+ assert_(issparse(x1), repr((b, spmattype, 1)))
338
+ assert_(issparse(x2), repr((b, spmattype, 2)))
339
+ else:
340
+ assert_(isinstance(x1, np.ndarray), repr((b, spmattype, 1)))
341
+ assert_(isinstance(x2, np.ndarray), repr((b, spmattype, 2)))
342
+
343
+ # check output shape
344
+ if x.ndim == 1:
345
+ # "vector"
346
+ assert_equal(x1.shape, (A.shape[1],))
347
+ assert_equal(x2.shape, (A.shape[1],))
348
+ else:
349
+ # "matrix"
350
+ assert_equal(x1.shape, x.shape)
351
+ assert_equal(x2.shape, x.shape)
352
+
353
+ A = csc_matrix((3, 3))
354
+ b = csc_matrix((1, 3))
355
+ assert_raises(ValueError, spsolve, A, b)
356
+
357
+ @sup_sparse_efficiency
358
+ def test_ndarray_support(self):
359
+ A = array([[1., 2.], [2., 0.]])
360
+ x = array([[1., 1.], [0.5, -0.5]])
361
+ b = array([[2., 0.], [2., 2.]])
362
+
363
+ assert_array_almost_equal(x, spsolve(A, b))
364
+
365
+ def test_gssv_badinput(self):
366
+ N = 10
367
+ d = arange(N) + 1.0
368
+ A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), N, N)
369
+
370
+ for spmatrix in (csc_matrix, csr_matrix):
371
+ A = spmatrix(A)
372
+ b = np.arange(N)
373
+
374
+ def not_c_contig(x):
375
+ return x.repeat(2)[::2]
376
+
377
+ def not_1dim(x):
378
+ return x[:,None]
379
+
380
+ def bad_type(x):
381
+ return x.astype(bool)
382
+
383
+ def too_short(x):
384
+ return x[:-1]
385
+
386
+ badops = [not_c_contig, not_1dim, bad_type, too_short]
387
+
388
+ for badop in badops:
389
+ msg = f"{spmatrix!r} {badop!r}"
390
+ # Not C-contiguous
391
+ assert_raises((ValueError, TypeError), _superlu.gssv,
392
+ N, A.nnz, badop(A.data), A.indices, A.indptr,
393
+ b, int(spmatrix == csc_matrix), err_msg=msg)
394
+ assert_raises((ValueError, TypeError), _superlu.gssv,
395
+ N, A.nnz, A.data, badop(A.indices), A.indptr,
396
+ b, int(spmatrix == csc_matrix), err_msg=msg)
397
+ assert_raises((ValueError, TypeError), _superlu.gssv,
398
+ N, A.nnz, A.data, A.indices, badop(A.indptr),
399
+ b, int(spmatrix == csc_matrix), err_msg=msg)
400
+
401
+ def test_sparsity_preservation(self):
402
+ ident = csc_matrix([
403
+ [1, 0, 0],
404
+ [0, 1, 0],
405
+ [0, 0, 1]])
406
+ b = csc_matrix([
407
+ [0, 1],
408
+ [1, 0],
409
+ [0, 0]])
410
+ x = spsolve(ident, b)
411
+ assert_equal(ident.nnz, 3)
412
+ assert_equal(b.nnz, 2)
413
+ assert_equal(x.nnz, 2)
414
+ assert_allclose(x.A, b.A, atol=1e-12, rtol=1e-12)
415
+
416
+ def test_dtype_cast(self):
417
+ A_real = scipy.sparse.csr_matrix([[1, 2, 0],
418
+ [0, 0, 3],
419
+ [4, 0, 5]])
420
+ A_complex = scipy.sparse.csr_matrix([[1, 2, 0],
421
+ [0, 0, 3],
422
+ [4, 0, 5 + 1j]])
423
+ b_real = np.array([1,1,1])
424
+ b_complex = np.array([1,1,1]) + 1j*np.array([1,1,1])
425
+ x = spsolve(A_real, b_real)
426
+ assert_(np.issubdtype(x.dtype, np.floating))
427
+ x = spsolve(A_real, b_complex)
428
+ assert_(np.issubdtype(x.dtype, np.complexfloating))
429
+ x = spsolve(A_complex, b_real)
430
+ assert_(np.issubdtype(x.dtype, np.complexfloating))
431
+ x = spsolve(A_complex, b_complex)
432
+ assert_(np.issubdtype(x.dtype, np.complexfloating))
433
+
434
+ @pytest.mark.slow
435
+ @pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
436
+ def test_bug_8278(self):
437
+ check_free_memory(8000)
438
+ use_solver(useUmfpack=True)
439
+ A, b = setup_bug_8278()
440
+ x = spsolve(A, b)
441
+ assert_array_almost_equal(A @ x, b)
442
+
443
+
444
+ class TestSplu:
445
+ def setup_method(self):
446
+ use_solver(useUmfpack=False)
447
+ n = 40
448
+ d = arange(n) + 1
449
+ self.n = n
450
+ self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n, format='csc')
451
+ random.seed(1234)
452
+
453
+ def _smoketest(self, spxlu, check, dtype, idx_dtype):
454
+ if np.issubdtype(dtype, np.complexfloating):
455
+ A = self.A + 1j*self.A.T
456
+ else:
457
+ A = self.A
458
+
459
+ A = A.astype(dtype)
460
+ A.indices = A.indices.astype(idx_dtype, copy=False)
461
+ A.indptr = A.indptr.astype(idx_dtype, copy=False)
462
+ lu = spxlu(A)
463
+
464
+ rng = random.RandomState(1234)
465
+
466
+ # Input shapes
467
+ for k in [None, 1, 2, self.n, self.n+2]:
468
+ msg = f"k={k!r}"
469
+
470
+ if k is None:
471
+ b = rng.rand(self.n)
472
+ else:
473
+ b = rng.rand(self.n, k)
474
+
475
+ if np.issubdtype(dtype, np.complexfloating):
476
+ b = b + 1j*rng.rand(*b.shape)
477
+ b = b.astype(dtype)
478
+
479
+ x = lu.solve(b)
480
+ check(A, b, x, msg)
481
+
482
+ x = lu.solve(b, 'T')
483
+ check(A.T, b, x, msg)
484
+
485
+ x = lu.solve(b, 'H')
486
+ check(A.T.conj(), b, x, msg)
487
+
488
+ @sup_sparse_efficiency
489
+ def test_splu_smoketest(self):
490
+ self._internal_test_splu_smoketest()
491
+
492
+ def _internal_test_splu_smoketest(self):
493
+ # Check that splu works at all
494
+ def check(A, b, x, msg=""):
495
+ eps = np.finfo(A.dtype).eps
496
+ r = A @ x
497
+ assert_(abs(r - b).max() < 1e3*eps, msg)
498
+
499
+ for dtype in [np.float32, np.float64, np.complex64, np.complex128]:
500
+ for idx_dtype in [np.int32, np.int64]:
501
+ self._smoketest(splu, check, dtype, idx_dtype)
502
+
503
+ @sup_sparse_efficiency
504
+ def test_spilu_smoketest(self):
505
+ self._internal_test_spilu_smoketest()
506
+
507
+ def _internal_test_spilu_smoketest(self):
508
+ errors = []
509
+
510
+ def check(A, b, x, msg=""):
511
+ r = A @ x
512
+ err = abs(r - b).max()
513
+ assert_(err < 1e-2, msg)
514
+ if b.dtype in (np.float64, np.complex128):
515
+ errors.append(err)
516
+
517
+ for dtype in [np.float32, np.float64, np.complex64, np.complex128]:
518
+ for idx_dtype in [np.int32, np.int64]:
519
+ self._smoketest(spilu, check, dtype, idx_dtype)
520
+
521
+ assert_(max(errors) > 1e-5)
522
+
523
+ @sup_sparse_efficiency
524
+ def test_spilu_drop_rule(self):
525
+ # Test passing in the drop_rule argument to spilu.
526
+ A = identity(2)
527
+
528
+ rules = [
529
+ b'basic,area'.decode('ascii'), # unicode
530
+ b'basic,area', # ascii
531
+ [b'basic', b'area'.decode('ascii')]
532
+ ]
533
+ for rule in rules:
534
+ # Argument should be accepted
535
+ assert_(isinstance(spilu(A, drop_rule=rule), SuperLU))
536
+
537
+ def test_splu_nnz0(self):
538
+ A = csc_matrix((5,5), dtype='d')
539
+ assert_raises(RuntimeError, splu, A)
540
+
541
+ def test_spilu_nnz0(self):
542
+ A = csc_matrix((5,5), dtype='d')
543
+ assert_raises(RuntimeError, spilu, A)
544
+
545
+ def test_splu_basic(self):
546
+ # Test basic splu functionality.
547
+ n = 30
548
+ rng = random.RandomState(12)
549
+ a = rng.rand(n, n)
550
+ a[a < 0.95] = 0
551
+ # First test with a singular matrix
552
+ a[:, 0] = 0
553
+ a_ = csc_matrix(a)
554
+ # Matrix is exactly singular
555
+ assert_raises(RuntimeError, splu, a_)
556
+
557
+ # Make a diagonal dominant, to make sure it is not singular
558
+ a += 4*eye(n)
559
+ a_ = csc_matrix(a)
560
+ lu = splu(a_)
561
+ b = ones(n)
562
+ x = lu.solve(b)
563
+ assert_almost_equal(dot(a, x), b)
564
+
565
+ def test_splu_perm(self):
566
+ # Test the permutation vectors exposed by splu.
567
+ n = 30
568
+ a = random.random((n, n))
569
+ a[a < 0.95] = 0
570
+ # Make a diagonal dominant, to make sure it is not singular
571
+ a += 4*eye(n)
572
+ a_ = csc_matrix(a)
573
+ lu = splu(a_)
574
+ # Check that the permutation indices do belong to [0, n-1].
575
+ for perm in (lu.perm_r, lu.perm_c):
576
+ assert_(all(perm > -1))
577
+ assert_(all(perm < n))
578
+ assert_equal(len(unique(perm)), len(perm))
579
+
580
+ # Now make a symmetric, and test that the two permutation vectors are
581
+ # the same
582
+ # Note: a += a.T relies on undefined behavior.
583
+ a = a + a.T
584
+ a_ = csc_matrix(a)
585
+ lu = splu(a_)
586
+ assert_array_equal(lu.perm_r, lu.perm_c)
587
+
588
+ @pytest.mark.parametrize("splu_fun, rtol", [(splu, 1e-7), (spilu, 1e-1)])
589
+ def test_natural_permc(self, splu_fun, rtol):
590
+ # Test that the "NATURAL" permc_spec does not permute the matrix
591
+ np.random.seed(42)
592
+ n = 500
593
+ p = 0.01
594
+ A = scipy.sparse.random(n, n, p)
595
+ x = np.random.rand(n)
596
+ # Make A diagonal dominant to make sure it is not singular
597
+ A += (n+1)*scipy.sparse.identity(n)
598
+ A_ = csc_matrix(A)
599
+ b = A_ @ x
600
+
601
+ # without permc_spec, permutation is not identity
602
+ lu = splu_fun(A_)
603
+ assert_(np.any(lu.perm_c != np.arange(n)))
604
+
605
+ # with permc_spec="NATURAL", permutation is identity
606
+ lu = splu_fun(A_, permc_spec="NATURAL")
607
+ assert_array_equal(lu.perm_c, np.arange(n))
608
+
609
+ # Also, lu decomposition is valid
610
+ x2 = lu.solve(b)
611
+ assert_allclose(x, x2, rtol=rtol)
612
+
613
+ @pytest.mark.skipif(not hasattr(sys, 'getrefcount'), reason="no sys.getrefcount")
614
+ def test_lu_refcount(self):
615
+ # Test that we are keeping track of the reference count with splu.
616
+ n = 30
617
+ a = random.random((n, n))
618
+ a[a < 0.95] = 0
619
+ # Make a diagonal dominant, to make sure it is not singular
620
+ a += 4*eye(n)
621
+ a_ = csc_matrix(a)
622
+ lu = splu(a_)
623
+
624
+ # And now test that we don't have a refcount bug
625
+ rc = sys.getrefcount(lu)
626
+ for attr in ('perm_r', 'perm_c'):
627
+ perm = getattr(lu, attr)
628
+ assert_equal(sys.getrefcount(lu), rc + 1)
629
+ del perm
630
+ assert_equal(sys.getrefcount(lu), rc)
631
+
632
+ def test_bad_inputs(self):
633
+ A = self.A.tocsc()
634
+
635
+ assert_raises(ValueError, splu, A[:,:4])
636
+ assert_raises(ValueError, spilu, A[:,:4])
637
+
638
+ for lu in [splu(A), spilu(A)]:
639
+ b = random.rand(42)
640
+ B = random.rand(42, 3)
641
+ BB = random.rand(self.n, 3, 9)
642
+ assert_raises(ValueError, lu.solve, b)
643
+ assert_raises(ValueError, lu.solve, B)
644
+ assert_raises(ValueError, lu.solve, BB)
645
+ assert_raises(TypeError, lu.solve,
646
+ b.astype(np.complex64))
647
+ assert_raises(TypeError, lu.solve,
648
+ b.astype(np.complex128))
649
+
650
+ @sup_sparse_efficiency
651
+ def test_superlu_dlamch_i386_nan(self):
652
+ # SuperLU 4.3 calls some functions returning floats without
653
+ # declaring them. On i386@linux call convention, this fails to
654
+ # clear floating point registers after call. As a result, NaN
655
+ # can appear in the next floating point operation made.
656
+ #
657
+ # Here's a test case that triggered the issue.
658
+ n = 8
659
+ d = np.arange(n) + 1
660
+ A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n)
661
+ A = A.astype(np.float32)
662
+ spilu(A)
663
+ A = A + 1j*A
664
+ B = A.A
665
+ assert_(not np.isnan(B).any())
666
+
667
+ @sup_sparse_efficiency
668
+ def test_lu_attr(self):
669
+
670
+ def check(dtype, complex_2=False):
671
+ A = self.A.astype(dtype)
672
+
673
+ if complex_2:
674
+ A = A + 1j*A.T
675
+
676
+ n = A.shape[0]
677
+ lu = splu(A)
678
+
679
+ # Check that the decomposition is as advertised
680
+
681
+ Pc = np.zeros((n, n))
682
+ Pc[np.arange(n), lu.perm_c] = 1
683
+
684
+ Pr = np.zeros((n, n))
685
+ Pr[lu.perm_r, np.arange(n)] = 1
686
+
687
+ Ad = A.toarray()
688
+ lhs = Pr.dot(Ad).dot(Pc)
689
+ rhs = (lu.L @ lu.U).toarray()
690
+
691
+ eps = np.finfo(dtype).eps
692
+
693
+ assert_allclose(lhs, rhs, atol=100*eps)
694
+
695
+ check(np.float32)
696
+ check(np.float64)
697
+ check(np.complex64)
698
+ check(np.complex128)
699
+ check(np.complex64, True)
700
+ check(np.complex128, True)
701
+
702
+ @pytest.mark.slow
703
+ @sup_sparse_efficiency
704
+ def test_threads_parallel(self):
705
+ oks = []
706
+
707
+ def worker():
708
+ try:
709
+ self.test_splu_basic()
710
+ self._internal_test_splu_smoketest()
711
+ self._internal_test_spilu_smoketest()
712
+ oks.append(True)
713
+ except Exception:
714
+ pass
715
+
716
+ threads = [threading.Thread(target=worker)
717
+ for k in range(20)]
718
+ for t in threads:
719
+ t.start()
720
+ for t in threads:
721
+ t.join()
722
+
723
+ assert_equal(len(oks), 20)
724
+
725
+
726
+ class TestSpsolveTriangular:
727
+ def setup_method(self):
728
+ use_solver(useUmfpack=False)
729
+
730
+ def test_zero_diagonal(self):
731
+ n = 5
732
+ rng = np.random.default_rng(43876432987)
733
+ A = rng.standard_normal((n, n))
734
+ b = np.arange(n)
735
+ A = scipy.sparse.tril(A, k=0, format='csr')
736
+
737
+ x = spsolve_triangular(A, b, unit_diagonal=True, lower=True)
738
+
739
+ A.setdiag(1)
740
+ assert_allclose(A.dot(x), b)
741
+
742
+ # Regression test from gh-15199
743
+ A = np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0]], dtype=np.float64)
744
+ b = np.array([1., 2., 3.])
745
+ with suppress_warnings() as sup:
746
+ sup.filter(SparseEfficiencyWarning, "CSR matrix format is")
747
+ spsolve_triangular(A, b, unit_diagonal=True)
748
+
749
+ def test_singular(self):
750
+ n = 5
751
+ A = csr_matrix((n, n))
752
+ b = np.arange(n)
753
+ for lower in (True, False):
754
+ assert_raises(scipy.linalg.LinAlgError,
755
+ spsolve_triangular, A, b, lower=lower)
756
+
757
+ @sup_sparse_efficiency
758
+ def test_bad_shape(self):
759
+ # A is not square.
760
+ A = np.zeros((3, 4))
761
+ b = ones((4, 1))
762
+ assert_raises(ValueError, spsolve_triangular, A, b)
763
+ # A2 and b2 have incompatible shapes.
764
+ A2 = csr_matrix(eye(3))
765
+ b2 = array([1.0, 2.0])
766
+ assert_raises(ValueError, spsolve_triangular, A2, b2)
767
+
768
+ @sup_sparse_efficiency
769
+ def test_input_types(self):
770
+ A = array([[1., 0.], [1., 2.]])
771
+ b = array([[2., 0.], [2., 2.]])
772
+ for matrix_type in (array, csc_matrix, csr_matrix):
773
+ x = spsolve_triangular(matrix_type(A), b, lower=True)
774
+ assert_array_almost_equal(A.dot(x), b)
775
+
776
+ @pytest.mark.slow
777
+ @pytest.mark.timeout(120) # prerelease_deps_coverage_64bit_blas job
778
+ @sup_sparse_efficiency
779
+ def test_random(self):
780
+ def random_triangle_matrix(n, lower=True):
781
+ A = scipy.sparse.random(n, n, density=0.1, format='coo')
782
+ if lower:
783
+ A = scipy.sparse.tril(A)
784
+ else:
785
+ A = scipy.sparse.triu(A)
786
+ A = A.tocsr(copy=False)
787
+ for i in range(n):
788
+ A[i, i] = np.random.rand() + 1
789
+ return A
790
+
791
+ np.random.seed(1234)
792
+ for lower in (True, False):
793
+ for n in (10, 10**2, 10**3):
794
+ A = random_triangle_matrix(n, lower=lower)
795
+ for m in (1, 10):
796
+ for b in (np.random.rand(n, m),
797
+ np.random.randint(-9, 9, (n, m)),
798
+ np.random.randint(-9, 9, (n, m)) +
799
+ np.random.randint(-9, 9, (n, m)) * 1j):
800
+ x = spsolve_triangular(A, b, lower=lower)
801
+ assert_array_almost_equal(A.dot(x), b)
802
+ x = spsolve_triangular(A, b, lower=lower,
803
+ unit_diagonal=True)
804
+ A.setdiag(1)
805
+ assert_array_almost_equal(A.dot(x), b)
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_special_sparse_arrays.py ADDED
@@ -0,0 +1,948 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from scipy.sparse.linalg import LinearOperator
3
+ from scipy.sparse import kron, eye, dia_array
4
+
5
+ __all__ = ['LaplacianNd']
6
+ # Sakurai and Mikota classes are intended for tests and benchmarks
7
+ # and explicitly not included in the public API of this module.
8
+
9
+
10
+ class LaplacianNd(LinearOperator):
11
+ """
12
+ The grid Laplacian in ``N`` dimensions and its eigenvalues/eigenvectors.
13
+
14
+ Construct Laplacian on a uniform rectangular grid in `N` dimensions
15
+ and output its eigenvalues and eigenvectors.
16
+ The Laplacian ``L`` is square, negative definite, real symmetric array
17
+ with signed integer entries and zeros otherwise.
18
+
19
+ Parameters
20
+ ----------
21
+ grid_shape : tuple
22
+ A tuple of integers of length ``N`` (corresponding to the dimension of
23
+ the Lapacian), where each entry gives the size of that dimension. The
24
+ Laplacian matrix is square of the size ``np.prod(grid_shape)``.
25
+ boundary_conditions : {'neumann', 'dirichlet', 'periodic'}, optional
26
+ The type of the boundary conditions on the boundaries of the grid.
27
+ Valid values are ``'dirichlet'`` or ``'neumann'``(default) or
28
+ ``'periodic'``.
29
+ dtype : dtype
30
+ Numerical type of the array. Default is ``np.int8``.
31
+
32
+ Methods
33
+ -------
34
+ toarray()
35
+ Construct a dense array from Laplacian data
36
+ tosparse()
37
+ Construct a sparse array from Laplacian data
38
+ eigenvalues(m=None)
39
+ Construct a 1D array of `m` largest (smallest in absolute value)
40
+ eigenvalues of the Laplacian matrix in ascending order.
41
+ eigenvectors(m=None):
42
+ Construct the array with columns made of `m` eigenvectors (``float``)
43
+ of the ``Nd`` Laplacian corresponding to the `m` ordered eigenvalues.
44
+
45
+ .. versionadded:: 1.12.0
46
+
47
+ Notes
48
+ -----
49
+ Compared to the MATLAB/Octave implementation [1] of 1-, 2-, and 3-D
50
+ Laplacian, this code allows the arbitrary N-D case and the matrix-free
51
+ callable option, but is currently limited to pure Dirichlet, Neumann or
52
+ Periodic boundary conditions only.
53
+
54
+ The Laplacian matrix of a graph (`scipy.sparse.csgraph.laplacian`) of a
55
+ rectangular grid corresponds to the negative Laplacian with the Neumann
56
+ conditions, i.e., ``boundary_conditions = 'neumann'``.
57
+
58
+ All eigenvalues and eigenvectors of the discrete Laplacian operator for
59
+ an ``N``-dimensional regular grid of shape `grid_shape` with the grid
60
+ step size ``h=1`` are analytically known [2].
61
+
62
+ References
63
+ ----------
64
+ .. [1] https://github.com/lobpcg/blopex/blob/master/blopex_\
65
+ tools/matlab/laplacian/laplacian.m
66
+ .. [2] "Eigenvalues and eigenvectors of the second derivative", Wikipedia
67
+ https://en.wikipedia.org/wiki/Eigenvalues_and_eigenvectors_\
68
+ of_the_second_derivative
69
+
70
+ Examples
71
+ --------
72
+ >>> import numpy as np
73
+ >>> from scipy.sparse.linalg import LaplacianNd
74
+ >>> from scipy.sparse import diags, csgraph
75
+ >>> from scipy.linalg import eigvalsh
76
+
77
+ The one-dimensional Laplacian demonstrated below for pure Neumann boundary
78
+ conditions on a regular grid with ``n=6`` grid points is exactly the
79
+ negative graph Laplacian for the undirected linear graph with ``n``
80
+ vertices using the sparse adjacency matrix ``G`` represented by the
81
+ famous tri-diagonal matrix:
82
+
83
+ >>> n = 6
84
+ >>> G = diags(np.ones(n - 1), 1, format='csr')
85
+ >>> Lf = csgraph.laplacian(G, symmetrized=True, form='function')
86
+ >>> grid_shape = (n, )
87
+ >>> lap = LaplacianNd(grid_shape, boundary_conditions='neumann')
88
+ >>> np.array_equal(lap.matmat(np.eye(n)), -Lf(np.eye(n)))
89
+ True
90
+
91
+ Since all matrix entries of the Laplacian are integers, ``'int8'`` is
92
+ the default dtype for storing matrix representations.
93
+
94
+ >>> lap.tosparse()
95
+ <6x6 sparse array of type '<class 'numpy.int8'>'
96
+ with 16 stored elements (3 diagonals) in DIAgonal format>
97
+ >>> lap.toarray()
98
+ array([[-1, 1, 0, 0, 0, 0],
99
+ [ 1, -2, 1, 0, 0, 0],
100
+ [ 0, 1, -2, 1, 0, 0],
101
+ [ 0, 0, 1, -2, 1, 0],
102
+ [ 0, 0, 0, 1, -2, 1],
103
+ [ 0, 0, 0, 0, 1, -1]], dtype=int8)
104
+ >>> np.array_equal(lap.matmat(np.eye(n)), lap.toarray())
105
+ True
106
+ >>> np.array_equal(lap.tosparse().toarray(), lap.toarray())
107
+ True
108
+
109
+ Any number of extreme eigenvalues and/or eigenvectors can be computed.
110
+
111
+ >>> lap = LaplacianNd(grid_shape, boundary_conditions='periodic')
112
+ >>> lap.eigenvalues()
113
+ array([-4., -3., -3., -1., -1., 0.])
114
+ >>> lap.eigenvalues()[-2:]
115
+ array([-1., 0.])
116
+ >>> lap.eigenvalues(2)
117
+ array([-1., 0.])
118
+ >>> lap.eigenvectors(1)
119
+ array([[0.40824829],
120
+ [0.40824829],
121
+ [0.40824829],
122
+ [0.40824829],
123
+ [0.40824829],
124
+ [0.40824829]])
125
+ >>> lap.eigenvectors(2)
126
+ array([[ 0.5 , 0.40824829],
127
+ [ 0. , 0.40824829],
128
+ [-0.5 , 0.40824829],
129
+ [-0.5 , 0.40824829],
130
+ [ 0. , 0.40824829],
131
+ [ 0.5 , 0.40824829]])
132
+ >>> lap.eigenvectors()
133
+ array([[ 0.40824829, 0.28867513, 0.28867513, 0.5 , 0.5 ,
134
+ 0.40824829],
135
+ [-0.40824829, -0.57735027, -0.57735027, 0. , 0. ,
136
+ 0.40824829],
137
+ [ 0.40824829, 0.28867513, 0.28867513, -0.5 , -0.5 ,
138
+ 0.40824829],
139
+ [-0.40824829, 0.28867513, 0.28867513, -0.5 , -0.5 ,
140
+ 0.40824829],
141
+ [ 0.40824829, -0.57735027, -0.57735027, 0. , 0. ,
142
+ 0.40824829],
143
+ [-0.40824829, 0.28867513, 0.28867513, 0.5 , 0.5 ,
144
+ 0.40824829]])
145
+
146
+ The two-dimensional Laplacian is illustrated on a regular grid with
147
+ ``grid_shape = (2, 3)`` points in each dimension.
148
+
149
+ >>> grid_shape = (2, 3)
150
+ >>> n = np.prod(grid_shape)
151
+
152
+ Numeration of grid points is as follows:
153
+
154
+ >>> np.arange(n).reshape(grid_shape + (-1,))
155
+ array([[[0],
156
+ [1],
157
+ [2]],
158
+ <BLANKLINE>
159
+ [[3],
160
+ [4],
161
+ [5]]])
162
+
163
+ Each of the boundary conditions ``'dirichlet'``, ``'periodic'``, and
164
+ ``'neumann'`` is illustrated separately; with ``'dirichlet'``
165
+
166
+ >>> lap = LaplacianNd(grid_shape, boundary_conditions='dirichlet')
167
+ >>> lap.tosparse()
168
+ <6x6 sparse array of type '<class 'numpy.int8'>'
169
+ with 20 stored elements in Compressed Sparse Row format>
170
+ >>> lap.toarray()
171
+ array([[-4, 1, 0, 1, 0, 0],
172
+ [ 1, -4, 1, 0, 1, 0],
173
+ [ 0, 1, -4, 0, 0, 1],
174
+ [ 1, 0, 0, -4, 1, 0],
175
+ [ 0, 1, 0, 1, -4, 1],
176
+ [ 0, 0, 1, 0, 1, -4]], dtype=int8)
177
+ >>> np.array_equal(lap.matmat(np.eye(n)), lap.toarray())
178
+ True
179
+ >>> np.array_equal(lap.tosparse().toarray(), lap.toarray())
180
+ True
181
+ >>> lap.eigenvalues()
182
+ array([-6.41421356, -5. , -4.41421356, -3.58578644, -3. ,
183
+ -1.58578644])
184
+ >>> eigvals = eigvalsh(lap.toarray().astype(np.float64))
185
+ >>> np.allclose(lap.eigenvalues(), eigvals)
186
+ True
187
+ >>> np.allclose(lap.toarray() @ lap.eigenvectors(),
188
+ ... lap.eigenvectors() @ np.diag(lap.eigenvalues()))
189
+ True
190
+
191
+ with ``'periodic'``
192
+
193
+ >>> lap = LaplacianNd(grid_shape, boundary_conditions='periodic')
194
+ >>> lap.tosparse()
195
+ <6x6 sparse array of type '<class 'numpy.int8'>'
196
+ with 24 stored elements in Compressed Sparse Row format>
197
+ >>> lap.toarray()
198
+ array([[-4, 1, 1, 2, 0, 0],
199
+ [ 1, -4, 1, 0, 2, 0],
200
+ [ 1, 1, -4, 0, 0, 2],
201
+ [ 2, 0, 0, -4, 1, 1],
202
+ [ 0, 2, 0, 1, -4, 1],
203
+ [ 0, 0, 2, 1, 1, -4]], dtype=int8)
204
+ >>> np.array_equal(lap.matmat(np.eye(n)), lap.toarray())
205
+ True
206
+ >>> np.array_equal(lap.tosparse().toarray(), lap.toarray())
207
+ True
208
+ >>> lap.eigenvalues()
209
+ array([-7., -7., -4., -3., -3., 0.])
210
+ >>> eigvals = eigvalsh(lap.toarray().astype(np.float64))
211
+ >>> np.allclose(lap.eigenvalues(), eigvals)
212
+ True
213
+ >>> np.allclose(lap.toarray() @ lap.eigenvectors(),
214
+ ... lap.eigenvectors() @ np.diag(lap.eigenvalues()))
215
+ True
216
+
217
+ and with ``'neumann'``
218
+
219
+ >>> lap = LaplacianNd(grid_shape, boundary_conditions='neumann')
220
+ >>> lap.tosparse()
221
+ <6x6 sparse array of type '<class 'numpy.int8'>'
222
+ with 20 stored elements in Compressed Sparse Row format>
223
+ >>> lap.toarray()
224
+ array([[-2, 1, 0, 1, 0, 0],
225
+ [ 1, -3, 1, 0, 1, 0],
226
+ [ 0, 1, -2, 0, 0, 1],
227
+ [ 1, 0, 0, -2, 1, 0],
228
+ [ 0, 1, 0, 1, -3, 1],
229
+ [ 0, 0, 1, 0, 1, -2]])
230
+ >>> np.array_equal(lap.matmat(np.eye(n)), lap.toarray())
231
+ True
232
+ >>> np.array_equal(lap.tosparse().toarray(), lap.toarray())
233
+ True
234
+ >>> lap.eigenvalues()
235
+ array([-5., -3., -3., -2., -1., 0.])
236
+ >>> eigvals = eigvalsh(lap.toarray().astype(np.float64))
237
+ >>> np.allclose(lap.eigenvalues(), eigvals)
238
+ True
239
+ >>> np.allclose(lap.toarray() @ lap.eigenvectors(),
240
+ ... lap.eigenvectors() @ np.diag(lap.eigenvalues()))
241
+ True
242
+
243
+ """
244
+
245
+ def __init__(self, grid_shape, *,
246
+ boundary_conditions='neumann',
247
+ dtype=np.int8):
248
+
249
+ if boundary_conditions not in ('dirichlet', 'neumann', 'periodic'):
250
+ raise ValueError(
251
+ f"Unknown value {boundary_conditions!r} is given for "
252
+ "'boundary_conditions' parameter. The valid options are "
253
+ "'dirichlet', 'periodic', and 'neumann' (default)."
254
+ )
255
+
256
+ self.grid_shape = grid_shape
257
+ self.boundary_conditions = boundary_conditions
258
+ # LaplacianNd folds all dimensions in `grid_shape` into a single one
259
+ N = np.prod(grid_shape)
260
+ super().__init__(dtype=dtype, shape=(N, N))
261
+
262
+ def _eigenvalue_ordering(self, m):
263
+ """Compute `m` largest eigenvalues in each of the ``N`` directions,
264
+ i.e., up to ``m * N`` total, order them and return `m` largest.
265
+ """
266
+ grid_shape = self.grid_shape
267
+ if m is None:
268
+ indices = np.indices(grid_shape)
269
+ Leig = np.zeros(grid_shape)
270
+ else:
271
+ grid_shape_min = min(grid_shape,
272
+ tuple(np.ones_like(grid_shape) * m))
273
+ indices = np.indices(grid_shape_min)
274
+ Leig = np.zeros(grid_shape_min)
275
+
276
+ for j, n in zip(indices, grid_shape):
277
+ if self.boundary_conditions == 'dirichlet':
278
+ Leig += -4 * np.sin(np.pi * (j + 1) / (2 * (n + 1))) ** 2
279
+ elif self.boundary_conditions == 'neumann':
280
+ Leig += -4 * np.sin(np.pi * j / (2 * n)) ** 2
281
+ else: # boundary_conditions == 'periodic'
282
+ Leig += -4 * np.sin(np.pi * np.floor((j + 1) / 2) / n) ** 2
283
+
284
+ Leig_ravel = Leig.ravel()
285
+ ind = np.argsort(Leig_ravel)
286
+ eigenvalues = Leig_ravel[ind]
287
+ if m is not None:
288
+ eigenvalues = eigenvalues[-m:]
289
+ ind = ind[-m:]
290
+
291
+ return eigenvalues, ind
292
+
293
+ def eigenvalues(self, m=None):
294
+ """Return the requested number of eigenvalues.
295
+
296
+ Parameters
297
+ ----------
298
+ m : int, optional
299
+ The positive number of smallest eigenvalues to return.
300
+ If not provided, then all eigenvalues will be returned.
301
+
302
+ Returns
303
+ -------
304
+ eigenvalues : float array
305
+ The requested `m` smallest or all eigenvalues, in ascending order.
306
+ """
307
+ eigenvalues, _ = self._eigenvalue_ordering(m)
308
+ return eigenvalues
309
+
310
+ def _ev1d(self, j, n):
311
+ """Return 1 eigenvector in 1d with index `j`
312
+ and number of grid points `n` where ``j < n``.
313
+ """
314
+ if self.boundary_conditions == 'dirichlet':
315
+ i = np.pi * (np.arange(n) + 1) / (n + 1)
316
+ ev = np.sqrt(2. / (n + 1.)) * np.sin(i * (j + 1))
317
+ elif self.boundary_conditions == 'neumann':
318
+ i = np.pi * (np.arange(n) + 0.5) / n
319
+ ev = np.sqrt((1. if j == 0 else 2.) / n) * np.cos(i * j)
320
+ else: # boundary_conditions == 'periodic'
321
+ if j == 0:
322
+ ev = np.sqrt(1. / n) * np.ones(n)
323
+ elif j + 1 == n and n % 2 == 0:
324
+ ev = np.sqrt(1. / n) * np.tile([1, -1], n//2)
325
+ else:
326
+ i = 2. * np.pi * (np.arange(n) + 0.5) / n
327
+ ev = np.sqrt(2. / n) * np.cos(i * np.floor((j + 1) / 2))
328
+ # make small values exact zeros correcting round-off errors
329
+ # due to symmetry of eigenvectors the exact 0. is correct
330
+ ev[np.abs(ev) < np.finfo(np.float64).eps] = 0.
331
+ return ev
332
+
333
+ def _one_eve(self, k):
334
+ """Return 1 eigenvector in Nd with multi-index `j`
335
+ as a tensor product of the corresponding 1d eigenvectors.
336
+ """
337
+ phi = [self._ev1d(j, n) for j, n in zip(k, self.grid_shape)]
338
+ result = phi[0]
339
+ for phi in phi[1:]:
340
+ result = np.tensordot(result, phi, axes=0)
341
+ return np.asarray(result).ravel()
342
+
343
+ def eigenvectors(self, m=None):
344
+ """Return the requested number of eigenvectors for ordered eigenvalues.
345
+
346
+ Parameters
347
+ ----------
348
+ m : int, optional
349
+ The positive number of eigenvectors to return. If not provided,
350
+ then all eigenvectors will be returned.
351
+
352
+ Returns
353
+ -------
354
+ eigenvectors : float array
355
+ An array with columns made of the requested `m` or all eigenvectors.
356
+ The columns are ordered according to the `m` ordered eigenvalues.
357
+ """
358
+ _, ind = self._eigenvalue_ordering(m)
359
+ if m is None:
360
+ grid_shape_min = self.grid_shape
361
+ else:
362
+ grid_shape_min = min(self.grid_shape,
363
+ tuple(np.ones_like(self.grid_shape) * m))
364
+
365
+ N_indices = np.unravel_index(ind, grid_shape_min)
366
+ N_indices = [tuple(x) for x in zip(*N_indices)]
367
+ eigenvectors_list = [self._one_eve(k) for k in N_indices]
368
+ return np.column_stack(eigenvectors_list)
369
+
370
+ def toarray(self):
371
+ """
372
+ Converts the Laplacian data to a dense array.
373
+
374
+ Returns
375
+ -------
376
+ L : ndarray
377
+ The shape is ``(N, N)`` where ``N = np.prod(grid_shape)``.
378
+
379
+ """
380
+ grid_shape = self.grid_shape
381
+ n = np.prod(grid_shape)
382
+ L = np.zeros([n, n], dtype=np.int8)
383
+ # Scratch arrays
384
+ L_i = np.empty_like(L)
385
+ Ltemp = np.empty_like(L)
386
+
387
+ for ind, dim in enumerate(grid_shape):
388
+ # Start zeroing out L_i
389
+ L_i[:] = 0
390
+ # Allocate the top left corner with the kernel of L_i
391
+ # Einsum returns writable view of arrays
392
+ np.einsum("ii->i", L_i[:dim, :dim])[:] = -2
393
+ np.einsum("ii->i", L_i[: dim - 1, 1:dim])[:] = 1
394
+ np.einsum("ii->i", L_i[1:dim, : dim - 1])[:] = 1
395
+
396
+ if self.boundary_conditions == 'neumann':
397
+ L_i[0, 0] = -1
398
+ L_i[dim - 1, dim - 1] = -1
399
+ elif self.boundary_conditions == 'periodic':
400
+ if dim > 1:
401
+ L_i[0, dim - 1] += 1
402
+ L_i[dim - 1, 0] += 1
403
+ else:
404
+ L_i[0, 0] += 1
405
+
406
+ # kron is too slow for large matrices hence the next two tricks
407
+ # 1- kron(eye, mat) is block_diag(mat, mat, ...)
408
+ # 2- kron(mat, eye) can be performed by 4d stride trick
409
+
410
+ # 1-
411
+ new_dim = dim
412
+ # for block_diag we tile the top left portion on the diagonal
413
+ if ind > 0:
414
+ tiles = np.prod(grid_shape[:ind])
415
+ for j in range(1, tiles):
416
+ L_i[j*dim:(j+1)*dim, j*dim:(j+1)*dim] = L_i[:dim, :dim]
417
+ new_dim += dim
418
+ # 2-
419
+ # we need the keep L_i, but reset the array
420
+ Ltemp[:new_dim, :new_dim] = L_i[:new_dim, :new_dim]
421
+ tiles = int(np.prod(grid_shape[ind+1:]))
422
+ # Zero out the top left, the rest is already 0
423
+ L_i[:new_dim, :new_dim] = 0
424
+ idx = [x for x in range(tiles)]
425
+ L_i.reshape(
426
+ (new_dim, tiles,
427
+ new_dim, tiles)
428
+ )[:, idx, :, idx] = Ltemp[:new_dim, :new_dim]
429
+
430
+ L += L_i
431
+
432
+ return L.astype(self.dtype)
433
+
434
+ def tosparse(self):
435
+ """
436
+ Constructs a sparse array from the Laplacian data. The returned sparse
437
+ array format is dependent on the selected boundary conditions.
438
+
439
+ Returns
440
+ -------
441
+ L : scipy.sparse.sparray
442
+ The shape is ``(N, N)`` where ``N = np.prod(grid_shape)``.
443
+
444
+ """
445
+ N = len(self.grid_shape)
446
+ p = np.prod(self.grid_shape)
447
+ L = dia_array((p, p), dtype=np.int8)
448
+
449
+ for i in range(N):
450
+ dim = self.grid_shape[i]
451
+ data = np.ones([3, dim], dtype=np.int8)
452
+ data[1, :] *= -2
453
+
454
+ if self.boundary_conditions == 'neumann':
455
+ data[1, 0] = -1
456
+ data[1, -1] = -1
457
+
458
+ L_i = dia_array((data, [-1, 0, 1]), shape=(dim, dim),
459
+ dtype=np.int8
460
+ )
461
+
462
+ if self.boundary_conditions == 'periodic':
463
+ t = dia_array((dim, dim), dtype=np.int8)
464
+ t.setdiag([1], k=-dim+1)
465
+ t.setdiag([1], k=dim-1)
466
+ L_i += t
467
+
468
+ for j in range(i):
469
+ L_i = kron(eye(self.grid_shape[j], dtype=np.int8), L_i)
470
+ for j in range(i + 1, N):
471
+ L_i = kron(L_i, eye(self.grid_shape[j], dtype=np.int8))
472
+ L += L_i
473
+ return L.astype(self.dtype)
474
+
475
+ def _matvec(self, x):
476
+ grid_shape = self.grid_shape
477
+ N = len(grid_shape)
478
+ X = x.reshape(grid_shape + (-1,))
479
+ Y = -2 * N * X
480
+ for i in range(N):
481
+ Y += np.roll(X, 1, axis=i)
482
+ Y += np.roll(X, -1, axis=i)
483
+ if self.boundary_conditions in ('neumann', 'dirichlet'):
484
+ Y[(slice(None),)*i + (0,) + (slice(None),)*(N-i-1)
485
+ ] -= np.roll(X, 1, axis=i)[
486
+ (slice(None),) * i + (0,) + (slice(None),) * (N-i-1)
487
+ ]
488
+ Y[
489
+ (slice(None),) * i + (-1,) + (slice(None),) * (N-i-1)
490
+ ] -= np.roll(X, -1, axis=i)[
491
+ (slice(None),) * i + (-1,) + (slice(None),) * (N-i-1)
492
+ ]
493
+
494
+ if self.boundary_conditions == 'neumann':
495
+ Y[
496
+ (slice(None),) * i + (0,) + (slice(None),) * (N-i-1)
497
+ ] += np.roll(X, 0, axis=i)[
498
+ (slice(None),) * i + (0,) + (slice(None),) * (N-i-1)
499
+ ]
500
+ Y[
501
+ (slice(None),) * i + (-1,) + (slice(None),) * (N-i-1)
502
+ ] += np.roll(X, 0, axis=i)[
503
+ (slice(None),) * i + (-1,) + (slice(None),) * (N-i-1)
504
+ ]
505
+
506
+ return Y.reshape(-1, X.shape[-1])
507
+
508
+ def _matmat(self, x):
509
+ return self._matvec(x)
510
+
511
+ def _adjoint(self):
512
+ return self
513
+
514
+ def _transpose(self):
515
+ return self
516
+
517
+
518
+ class Sakurai(LinearOperator):
519
+ """
520
+ Construct a Sakurai matrix in various formats and its eigenvalues.
521
+
522
+ Constructs the "Sakurai" matrix motivated by reference [1]_:
523
+ square real symmetric positive definite and 5-diagonal
524
+ with the main digonal ``[5, 6, 6, ..., 6, 6, 5], the ``+1`` and ``-1``
525
+ diagonals filled with ``-4``, and the ``+2`` and ``-2`` diagonals
526
+ made of ``1``. Its eigenvalues are analytically known to be
527
+ ``16. * np.power(np.cos(0.5 * k * np.pi / (n + 1)), 4)``.
528
+ The matrix gets ill-conditioned with its size growing.
529
+ It is useful for testing and benchmarking sparse eigenvalue solvers
530
+ especially those taking advantage of its banded 5-diagonal structure.
531
+ See the notes below for details.
532
+
533
+ Parameters
534
+ ----------
535
+ n : int
536
+ The size of the matrix.
537
+ dtype : dtype
538
+ Numerical type of the array. Default is ``np.int8``.
539
+
540
+ Methods
541
+ -------
542
+ toarray()
543
+ Construct a dense array from Laplacian data
544
+ tosparse()
545
+ Construct a sparse array from Laplacian data
546
+ tobanded()
547
+ The Sakurai matrix in the format for banded symmetric matrices,
548
+ i.e., (3, n) ndarray with 3 upper diagonals
549
+ placing the main diagonal at the bottom.
550
+ eigenvalues
551
+ All eigenvalues of the Sakurai matrix ordered ascending.
552
+
553
+ Notes
554
+ -----
555
+ Reference [1]_ introduces a generalized eigenproblem for the matrix pair
556
+ `A` and `B` where `A` is the identity so we turn it into an eigenproblem
557
+ just for the matrix `B` that this function outputs in various formats
558
+ together with its eigenvalues.
559
+
560
+ .. versionadded:: 1.12.0
561
+
562
+ References
563
+ ----------
564
+ .. [1] T. Sakurai, H. Tadano, Y. Inadomi, and U. Nagashima,
565
+ "A moment-based method for large-scale generalized
566
+ eigenvalue problems",
567
+ Appl. Num. Anal. Comp. Math. Vol. 1 No. 2 (2004).
568
+
569
+ Examples
570
+ --------
571
+ >>> import numpy as np
572
+ >>> from scipy.sparse.linalg._special_sparse_arrays import Sakurai
573
+ >>> from scipy.linalg import eig_banded
574
+ >>> n = 6
575
+ >>> sak = Sakurai(n)
576
+
577
+ Since all matrix entries are small integers, ``'int8'`` is
578
+ the default dtype for storing matrix representations.
579
+
580
+ >>> sak.toarray()
581
+ array([[ 5, -4, 1, 0, 0, 0],
582
+ [-4, 6, -4, 1, 0, 0],
583
+ [ 1, -4, 6, -4, 1, 0],
584
+ [ 0, 1, -4, 6, -4, 1],
585
+ [ 0, 0, 1, -4, 6, -4],
586
+ [ 0, 0, 0, 1, -4, 5]], dtype=int8)
587
+ >>> sak.tobanded()
588
+ array([[ 1, 1, 1, 1, 1, 1],
589
+ [-4, -4, -4, -4, -4, -4],
590
+ [ 5, 6, 6, 6, 6, 5]], dtype=int8)
591
+ >>> sak.tosparse()
592
+ <6x6 sparse matrix of type '<class 'numpy.int8'>'
593
+ with 24 stored elements (5 diagonals) in DIAgonal format>
594
+ >>> np.array_equal(sak.dot(np.eye(n)), sak.tosparse().toarray())
595
+ True
596
+ >>> sak.eigenvalues()
597
+ array([0.03922866, 0.56703972, 2.41789479, 5.97822974,
598
+ 10.54287655, 14.45473055])
599
+ >>> sak.eigenvalues(2)
600
+ array([0.03922866, 0.56703972])
601
+
602
+ The banded form can be used in scipy functions for banded matrices, e.g.,
603
+
604
+ >>> e = eig_banded(sak.tobanded(), eigvals_only=True)
605
+ >>> np.allclose(sak.eigenvalues, e, atol= n * n * n * np.finfo(float).eps)
606
+ True
607
+
608
+ """
609
+ def __init__(self, n, dtype=np.int8):
610
+ self.n = n
611
+ self.dtype = dtype
612
+ shape = (n, n)
613
+ super().__init__(dtype, shape)
614
+
615
+ def eigenvalues(self, m=None):
616
+ """Return the requested number of eigenvalues.
617
+
618
+ Parameters
619
+ ----------
620
+ m : int, optional
621
+ The positive number of smallest eigenvalues to return.
622
+ If not provided, then all eigenvalues will be returned.
623
+
624
+ Returns
625
+ -------
626
+ eigenvalues : `np.float64` array
627
+ The requested `m` smallest or all eigenvalues, in ascending order.
628
+ """
629
+ if m is None:
630
+ m = self.n
631
+ k = np.arange(self.n + 1 -m, self.n + 1)
632
+ return np.flip(16. * np.power(np.cos(0.5 * k * np.pi / (self.n + 1)), 4))
633
+
634
+ def tobanded(self):
635
+ """
636
+ Construct the Sakurai matrix as a banded array.
637
+ """
638
+ d0 = np.r_[5, 6 * np.ones(self.n - 2, dtype=self.dtype), 5]
639
+ d1 = -4 * np.ones(self.n, dtype=self.dtype)
640
+ d2 = np.ones(self.n, dtype=self.dtype)
641
+ return np.array([d2, d1, d0]).astype(self.dtype)
642
+
643
+ def tosparse(self):
644
+ """
645
+ Construct the Sakurai matrix is a sparse format.
646
+ """
647
+ from scipy.sparse import spdiags
648
+ d = self.tobanded()
649
+ # the banded format has the main diagonal at the bottom
650
+ # `spdiags` has no `dtype` parameter so inherits dtype from banded
651
+ return spdiags([d[0], d[1], d[2], d[1], d[0]], [-2, -1, 0, 1, 2],
652
+ self.n, self.n)
653
+
654
+ def toarray(self):
655
+ return self.tosparse().toarray()
656
+
657
+ def _matvec(self, x):
658
+ """
659
+ Construct matrix-free callable banded-matrix-vector multiplication by
660
+ the Sakurai matrix without constructing or storing the matrix itself
661
+ using the knowledge of its entries and the 5-diagonal format.
662
+ """
663
+ x = x.reshape(self.n, -1)
664
+ result_dtype = np.promote_types(x.dtype, self.dtype)
665
+ sx = np.zeros_like(x, dtype=result_dtype)
666
+ sx[0, :] = 5 * x[0, :] - 4 * x[1, :] + x[2, :]
667
+ sx[-1, :] = 5 * x[-1, :] - 4 * x[-2, :] + x[-3, :]
668
+ sx[1: -1, :] = (6 * x[1: -1, :] - 4 * (x[:-2, :] + x[2:, :])
669
+ + np.pad(x[:-3, :], ((1, 0), (0, 0)))
670
+ + np.pad(x[3:, :], ((0, 1), (0, 0))))
671
+ return sx
672
+
673
+ def _matmat(self, x):
674
+ """
675
+ Construct matrix-free callable matrix-matrix multiplication by
676
+ the Sakurai matrix without constructing or storing the matrix itself
677
+ by reusing the ``_matvec(x)`` that supports both 1D and 2D arrays ``x``.
678
+ """
679
+ return self._matvec(x)
680
+
681
+ def _adjoint(self):
682
+ return self
683
+
684
+ def _transpose(self):
685
+ return self
686
+
687
+
688
+ class MikotaM(LinearOperator):
689
+ """
690
+ Construct a mass matrix in various formats of Mikota pair.
691
+
692
+ The mass matrix `M` is square real diagonal
693
+ positive definite with entries that are reciprocal to integers.
694
+
695
+ Parameters
696
+ ----------
697
+ shape : tuple of int
698
+ The shape of the matrix.
699
+ dtype : dtype
700
+ Numerical type of the array. Default is ``np.float64``.
701
+
702
+ Methods
703
+ -------
704
+ toarray()
705
+ Construct a dense array from Mikota data
706
+ tosparse()
707
+ Construct a sparse array from Mikota data
708
+ tobanded()
709
+ The format for banded symmetric matrices,
710
+ i.e., (1, n) ndarray with the main diagonal.
711
+ """
712
+ def __init__(self, shape, dtype=np.float64):
713
+ self.shape = shape
714
+ self.dtype = dtype
715
+ super().__init__(dtype, shape)
716
+
717
+ def _diag(self):
718
+ # The matrix is constructed from its diagonal 1 / [1, ..., N+1];
719
+ # compute in a function to avoid duplicated code & storage footprint
720
+ return (1. / np.arange(1, self.shape[0] + 1)).astype(self.dtype)
721
+
722
+ def tobanded(self):
723
+ return self._diag()
724
+
725
+ def tosparse(self):
726
+ from scipy.sparse import diags
727
+ return diags([self._diag()], [0], shape=self.shape, dtype=self.dtype)
728
+
729
+ def toarray(self):
730
+ return np.diag(self._diag()).astype(self.dtype)
731
+
732
+ def _matvec(self, x):
733
+ """
734
+ Construct matrix-free callable banded-matrix-vector multiplication by
735
+ the Mikota mass matrix without constructing or storing the matrix itself
736
+ using the knowledge of its entries and the diagonal format.
737
+ """
738
+ x = x.reshape(self.shape[0], -1)
739
+ return self._diag()[:, np.newaxis] * x
740
+
741
+ def _matmat(self, x):
742
+ """
743
+ Construct matrix-free callable matrix-matrix multiplication by
744
+ the Mikota mass matrix without constructing or storing the matrix itself
745
+ by reusing the ``_matvec(x)`` that supports both 1D and 2D arrays ``x``.
746
+ """
747
+ return self._matvec(x)
748
+
749
+ def _adjoint(self):
750
+ return self
751
+
752
+ def _transpose(self):
753
+ return self
754
+
755
+
756
+ class MikotaK(LinearOperator):
757
+ """
758
+ Construct a stiffness matrix in various formats of Mikota pair.
759
+
760
+ The stiffness matrix `K` is square real tri-diagonal symmetric
761
+ positive definite with integer entries.
762
+
763
+ Parameters
764
+ ----------
765
+ shape : tuple of int
766
+ The shape of the matrix.
767
+ dtype : dtype
768
+ Numerical type of the array. Default is ``np.int32``.
769
+
770
+ Methods
771
+ -------
772
+ toarray()
773
+ Construct a dense array from Mikota data
774
+ tosparse()
775
+ Construct a sparse array from Mikota data
776
+ tobanded()
777
+ The format for banded symmetric matrices,
778
+ i.e., (2, n) ndarray with 2 upper diagonals
779
+ placing the main diagonal at the bottom.
780
+ """
781
+ def __init__(self, shape, dtype=np.int32):
782
+ self.shape = shape
783
+ self.dtype = dtype
784
+ super().__init__(dtype, shape)
785
+ # The matrix is constructed from its diagonals;
786
+ # we precompute these to avoid duplicating the computation
787
+ n = shape[0]
788
+ self._diag0 = np.arange(2 * n - 1, 0, -2, dtype=self.dtype)
789
+ self._diag1 = - np.arange(n - 1, 0, -1, dtype=self.dtype)
790
+
791
+ def tobanded(self):
792
+ return np.array([np.pad(self._diag1, (1, 0), 'constant'), self._diag0])
793
+
794
+ def tosparse(self):
795
+ from scipy.sparse import diags
796
+ return diags([self._diag1, self._diag0, self._diag1], [-1, 0, 1],
797
+ shape=self.shape, dtype=self.dtype)
798
+
799
+ def toarray(self):
800
+ return self.tosparse().toarray()
801
+
802
+ def _matvec(self, x):
803
+ """
804
+ Construct matrix-free callable banded-matrix-vector multiplication by
805
+ the Mikota stiffness matrix without constructing or storing the matrix
806
+ itself using the knowledge of its entries and the 3-diagonal format.
807
+ """
808
+ x = x.reshape(self.shape[0], -1)
809
+ result_dtype = np.promote_types(x.dtype, self.dtype)
810
+ kx = np.zeros_like(x, dtype=result_dtype)
811
+ d1 = self._diag1
812
+ d0 = self._diag0
813
+ kx[0, :] = d0[0] * x[0, :] + d1[0] * x[1, :]
814
+ kx[-1, :] = d1[-1] * x[-2, :] + d0[-1] * x[-1, :]
815
+ kx[1: -1, :] = (d1[:-1, None] * x[: -2, :]
816
+ + d0[1: -1, None] * x[1: -1, :]
817
+ + d1[1:, None] * x[2:, :])
818
+ return kx
819
+
820
+ def _matmat(self, x):
821
+ """
822
+ Construct matrix-free callable matrix-matrix multiplication by
823
+ the Stiffness mass matrix without constructing or storing the matrix itself
824
+ by reusing the ``_matvec(x)`` that supports both 1D and 2D arrays ``x``.
825
+ """
826
+ return self._matvec(x)
827
+
828
+ def _adjoint(self):
829
+ return self
830
+
831
+ def _transpose(self):
832
+ return self
833
+
834
+
835
+ class MikotaPair:
836
+ """
837
+ Construct the Mikota pair of matrices in various formats and
838
+ eigenvalues of the generalized eigenproblem with them.
839
+
840
+ The Mikota pair of matrices [1, 2]_ models a vibration problem
841
+ of a linear mass-spring system with the ends attached where
842
+ the stiffness of the springs and the masses increase along
843
+ the system length such that vibration frequencies are subsequent
844
+ integers 1, 2, ..., `n` where `n` is the number of the masses. Thus,
845
+ eigenvalues of the generalized eigenvalue problem for
846
+ the matrix pair `K` and `M` where `K` is he system stiffness matrix
847
+ and `M` is the system mass matrix are the squares of the integers,
848
+ i.e., 1, 4, 9, ..., ``n * n``.
849
+
850
+ The stiffness matrix `K` is square real tri-diagonal symmetric
851
+ positive definite. The mass matrix `M` is diagonal with diagonal
852
+ entries 1, 1/2, 1/3, ...., ``1/n``. Both matrices get
853
+ ill-conditioned with `n` growing.
854
+
855
+ Parameters
856
+ ----------
857
+ n : int
858
+ The size of the matrices of the Mikota pair.
859
+ dtype : dtype
860
+ Numerical type of the array. Default is ``np.float64``.
861
+
862
+ Attributes
863
+ ----------
864
+ eigenvalues : 1D ndarray, ``np.uint64``
865
+ All eigenvalues of the Mikota pair ordered ascending.
866
+
867
+ Methods
868
+ -------
869
+ MikotaK()
870
+ A `LinearOperator` custom object for the stiffness matrix.
871
+ MikotaM()
872
+ A `LinearOperator` custom object for the mass matrix.
873
+
874
+ .. versionadded:: 1.12.0
875
+
876
+ References
877
+ ----------
878
+ .. [1] J. Mikota, "Frequency tuning of chain structure multibody oscillators
879
+ to place the natural frequencies at omega1 and N-1 integer multiples
880
+ omega2,..., omegaN", Z. Angew. Math. Mech. 81 (2001), S2, S201-S202.
881
+ Appl. Num. Anal. Comp. Math. Vol. 1 No. 2 (2004).
882
+ .. [2] Peter C. Muller and Metin Gurgoze,
883
+ "Natural frequencies of a multi-degree-of-freedom vibration system",
884
+ Proc. Appl. Math. Mech. 6, 319-320 (2006).
885
+ http://dx.doi.org/10.1002/pamm.200610141.
886
+
887
+ Examples
888
+ --------
889
+ >>> import numpy as np
890
+ >>> from scipy.sparse.linalg._special_sparse_arrays import MikotaPair
891
+ >>> n = 6
892
+ >>> mik = MikotaPair(n)
893
+ >>> mik_k = mik.k
894
+ >>> mik_m = mik.m
895
+ >>> mik_k.toarray()
896
+ array([[11., -5., 0., 0., 0., 0.],
897
+ [-5., 9., -4., 0., 0., 0.],
898
+ [ 0., -4., 7., -3., 0., 0.],
899
+ [ 0., 0., -3., 5., -2., 0.],
900
+ [ 0., 0., 0., -2., 3., -1.],
901
+ [ 0., 0., 0., 0., -1., 1.]])
902
+ >>> mik_k.tobanded()
903
+ array([[ 0., -5., -4., -3., -2., -1.],
904
+ [11., 9., 7., 5., 3., 1.]])
905
+ >>> mik_m.tobanded()
906
+ array([1. , 0.5 , 0.33333333, 0.25 , 0.2 ,
907
+ 0.16666667])
908
+ >>> mik_k.tosparse()
909
+ <6x6 sparse matrix of type '<class 'numpy.float64'>'
910
+ with 16 stored elements (3 diagonals) in DIAgonal format>
911
+ >>> mik_m.tosparse()
912
+ <6x6 sparse matrix of type '<class 'numpy.float64'>'
913
+ with 6 stored elements (1 diagonals) in DIAgonal format>
914
+ >>> np.array_equal(mik_k(np.eye(n)), mik_k.toarray())
915
+ True
916
+ >>> np.array_equal(mik_m(np.eye(n)), mik_m.toarray())
917
+ True
918
+ >>> mik.eigenvalues()
919
+ array([ 1, 4, 9, 16, 25, 36])
920
+ >>> mik.eigenvalues(2)
921
+ array([ 1, 4])
922
+
923
+ """
924
+ def __init__(self, n, dtype=np.float64):
925
+ self.n = n
926
+ self.dtype = dtype
927
+ self.shape = (n, n)
928
+ self.m = MikotaM(self.shape, self.dtype)
929
+ self.k = MikotaK(self.shape, self.dtype)
930
+
931
+ def eigenvalues(self, m=None):
932
+ """Return the requested number of eigenvalues.
933
+
934
+ Parameters
935
+ ----------
936
+ m : int, optional
937
+ The positive number of smallest eigenvalues to return.
938
+ If not provided, then all eigenvalues will be returned.
939
+
940
+ Returns
941
+ -------
942
+ eigenvalues : `np.uint64` array
943
+ The requested `m` smallest or all eigenvalues, in ascending order.
944
+ """
945
+ if m is None:
946
+ m = self.n
947
+ arange_plus1 = np.arange(1, m + 1, dtype=np.uint64)
948
+ return arange_plus1 * arange_plus1
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/dsolve.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.sparse.linalg` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+
8
+ __all__ = [ # noqa: F822
9
+ 'MatrixRankWarning', 'SuperLU', 'factorized',
10
+ 'spilu', 'splu', 'spsolve',
11
+ 'spsolve_triangular', 'use_solver', 'linsolve', 'test'
12
+ ]
13
+
14
+ dsolve_modules = ['linsolve']
15
+
16
+
17
+ def __dir__():
18
+ return __all__
19
+
20
+
21
+ def __getattr__(name):
22
+ return _sub_module_deprecation(sub_package="sparse.linalg", module="dsolve",
23
+ private_modules=["_dsolve"], all=__all__,
24
+ attribute=name)
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/eigen.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.sparse.linalg` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+
8
+ __all__ = [ # noqa: F822
9
+ 'ArpackError', 'ArpackNoConvergence', 'ArpackError',
10
+ 'eigs', 'eigsh', 'lobpcg', 'svds', 'arpack', 'test'
11
+ ]
12
+
13
+ eigen_modules = ['arpack']
14
+
15
+
16
+ def __dir__():
17
+ return __all__
18
+
19
+
20
+ def __getattr__(name):
21
+ return _sub_module_deprecation(sub_package="sparse.linalg", module="eigen",
22
+ private_modules=["_eigen"], all=__all__,
23
+ attribute=name)
env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (183 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/scipy/sparse/tests/__pycache__/test_array_api.cpython-310.pyc ADDED
Binary file (15.1 kB). View file