Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/__init__.py +146 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_expm_multiply.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_interface.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_matfuncs.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_norm.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_onenormest.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_special_sparse_arrays.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_svdp.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/dsolve.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/eigen.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/interface.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/isolve.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/matfuncs.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/tests/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/_svds.py +545 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_expm_multiply.py +810 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_interface.py +896 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/_gcrotmk.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/iterative.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/lgmres.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/lsmr.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/lsqr.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/minres.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/tfqmr.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/utils.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/lgmres.py +242 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__init__.py +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/test_gcrotmk.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/test_iterative.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/test_lgmres.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/test_lsmr.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/test_lsqr.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/test_minres.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/test_utils.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/test_gcrotmk.py +165 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/test_iterative.py +796 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/test_lgmres.py +211 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/test_lsmr.py +185 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/test_lsqr.py +120 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/test_minres.py +97 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/test_utils.py +9 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tfqmr.py +191 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/utils.py +127 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_matfuncs.py +940 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_norm.py +193 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_onenormest.py +467 -0
- llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_propack/_cpropack.cpython-310-x86_64-linux-gnu.so +0 -0
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/__init__.py
ADDED
@@ -0,0 +1,146 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Sparse linear algebra (:mod:`scipy.sparse.linalg`)
|
3 |
+
==================================================
|
4 |
+
|
5 |
+
.. currentmodule:: scipy.sparse.linalg
|
6 |
+
|
7 |
+
Abstract linear operators
|
8 |
+
-------------------------
|
9 |
+
|
10 |
+
.. autosummary::
|
11 |
+
:toctree: generated/
|
12 |
+
|
13 |
+
LinearOperator -- abstract representation of a linear operator
|
14 |
+
aslinearoperator -- convert an object to an abstract linear operator
|
15 |
+
|
16 |
+
Matrix Operations
|
17 |
+
-----------------
|
18 |
+
|
19 |
+
.. autosummary::
|
20 |
+
:toctree: generated/
|
21 |
+
|
22 |
+
inv -- compute the sparse matrix inverse
|
23 |
+
expm -- compute the sparse matrix exponential
|
24 |
+
expm_multiply -- compute the product of a matrix exponential and a matrix
|
25 |
+
matrix_power -- compute the matrix power by raising a matrix to an exponent
|
26 |
+
|
27 |
+
Matrix norms
|
28 |
+
------------
|
29 |
+
|
30 |
+
.. autosummary::
|
31 |
+
:toctree: generated/
|
32 |
+
|
33 |
+
norm -- Norm of a sparse matrix
|
34 |
+
onenormest -- Estimate the 1-norm of a sparse matrix
|
35 |
+
|
36 |
+
Solving linear problems
|
37 |
+
-----------------------
|
38 |
+
|
39 |
+
Direct methods for linear equation systems:
|
40 |
+
|
41 |
+
.. autosummary::
|
42 |
+
:toctree: generated/
|
43 |
+
|
44 |
+
spsolve -- Solve the sparse linear system Ax=b
|
45 |
+
spsolve_triangular -- Solve sparse linear system Ax=b for a triangular A.
|
46 |
+
factorized -- Pre-factorize matrix to a function solving a linear system
|
47 |
+
MatrixRankWarning -- Warning on exactly singular matrices
|
48 |
+
use_solver -- Select direct solver to use
|
49 |
+
|
50 |
+
Iterative methods for linear equation systems:
|
51 |
+
|
52 |
+
.. autosummary::
|
53 |
+
:toctree: generated/
|
54 |
+
|
55 |
+
bicg -- Use BIConjugate Gradient iteration to solve Ax = b
|
56 |
+
bicgstab -- Use BIConjugate Gradient STABilized iteration to solve Ax = b
|
57 |
+
cg -- Use Conjugate Gradient iteration to solve Ax = b
|
58 |
+
cgs -- Use Conjugate Gradient Squared iteration to solve Ax = b
|
59 |
+
gmres -- Use Generalized Minimal RESidual iteration to solve Ax = b
|
60 |
+
lgmres -- Solve a matrix equation using the LGMRES algorithm
|
61 |
+
minres -- Use MINimum RESidual iteration to solve Ax = b
|
62 |
+
qmr -- Use Quasi-Minimal Residual iteration to solve Ax = b
|
63 |
+
gcrotmk -- Solve a matrix equation using the GCROT(m,k) algorithm
|
64 |
+
tfqmr -- Use Transpose-Free Quasi-Minimal Residual iteration to solve Ax = b
|
65 |
+
|
66 |
+
Iterative methods for least-squares problems:
|
67 |
+
|
68 |
+
.. autosummary::
|
69 |
+
:toctree: generated/
|
70 |
+
|
71 |
+
lsqr -- Find the least-squares solution to a sparse linear equation system
|
72 |
+
lsmr -- Find the least-squares solution to a sparse linear equation system
|
73 |
+
|
74 |
+
Matrix factorizations
|
75 |
+
---------------------
|
76 |
+
|
77 |
+
Eigenvalue problems:
|
78 |
+
|
79 |
+
.. autosummary::
|
80 |
+
:toctree: generated/
|
81 |
+
|
82 |
+
eigs -- Find k eigenvalues and eigenvectors of the square matrix A
|
83 |
+
eigsh -- Find k eigenvalues and eigenvectors of a symmetric matrix
|
84 |
+
lobpcg -- Solve symmetric partial eigenproblems with optional preconditioning
|
85 |
+
|
86 |
+
Singular values problems:
|
87 |
+
|
88 |
+
.. autosummary::
|
89 |
+
:toctree: generated/
|
90 |
+
|
91 |
+
svds -- Compute k singular values/vectors for a sparse matrix
|
92 |
+
|
93 |
+
The `svds` function supports the following solvers:
|
94 |
+
|
95 |
+
.. toctree::
|
96 |
+
|
97 |
+
sparse.linalg.svds-arpack
|
98 |
+
sparse.linalg.svds-lobpcg
|
99 |
+
sparse.linalg.svds-propack
|
100 |
+
|
101 |
+
Complete or incomplete LU factorizations
|
102 |
+
|
103 |
+
.. autosummary::
|
104 |
+
:toctree: generated/
|
105 |
+
|
106 |
+
splu -- Compute a LU decomposition for a sparse matrix
|
107 |
+
spilu -- Compute an incomplete LU decomposition for a sparse matrix
|
108 |
+
SuperLU -- Object representing an LU factorization
|
109 |
+
|
110 |
+
Sparse arrays with structure
|
111 |
+
----------------------------
|
112 |
+
|
113 |
+
.. autosummary::
|
114 |
+
:toctree: generated/
|
115 |
+
|
116 |
+
LaplacianNd -- Laplacian on a uniform rectangular grid in ``N`` dimensions
|
117 |
+
|
118 |
+
Exceptions
|
119 |
+
----------
|
120 |
+
|
121 |
+
.. autosummary::
|
122 |
+
:toctree: generated/
|
123 |
+
|
124 |
+
ArpackNoConvergence
|
125 |
+
ArpackError
|
126 |
+
|
127 |
+
"""
|
128 |
+
|
129 |
+
from ._isolve import *
|
130 |
+
from ._dsolve import *
|
131 |
+
from ._interface import *
|
132 |
+
from ._eigen import *
|
133 |
+
from ._matfuncs import *
|
134 |
+
from ._onenormest import *
|
135 |
+
from ._norm import *
|
136 |
+
from ._expm_multiply import *
|
137 |
+
from ._special_sparse_arrays import *
|
138 |
+
|
139 |
+
# Deprecated namespaces, to be removed in v2.0.0
|
140 |
+
from . import isolve, dsolve, interface, eigen, matfuncs
|
141 |
+
|
142 |
+
__all__ = [s for s in dir() if not s.startswith('_')]
|
143 |
+
|
144 |
+
from scipy._lib._testutils import PytestTester
|
145 |
+
test = PytestTester(__name__)
|
146 |
+
del PytestTester
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (4.27 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_expm_multiply.cpython-310.pyc
ADDED
Binary file (23.1 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_interface.cpython-310.pyc
ADDED
Binary file (30 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_matfuncs.cpython-310.pyc
ADDED
Binary file (25.5 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_norm.cpython-310.pyc
ADDED
Binary file (5.08 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_onenormest.cpython-310.pyc
ADDED
Binary file (11.9 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_special_sparse_arrays.cpython-310.pyc
ADDED
Binary file (33.9 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/_svdp.cpython-310.pyc
ADDED
Binary file (9.19 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/dsolve.cpython-310.pyc
ADDED
Binary file (750 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/eigen.cpython-310.pyc
ADDED
Binary file (716 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/interface.cpython-310.pyc
ADDED
Binary file (744 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/isolve.cpython-310.pyc
ADDED
Binary file (716 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/__pycache__/matfuncs.cpython-310.pyc
ADDED
Binary file (754 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/tests/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (206 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/_svds.py
ADDED
@@ -0,0 +1,545 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
|
3 |
+
from .arpack import _arpack # type: ignore[attr-defined]
|
4 |
+
from . import eigsh
|
5 |
+
|
6 |
+
from scipy._lib._util import check_random_state
|
7 |
+
from scipy.sparse.linalg._interface import LinearOperator, aslinearoperator
|
8 |
+
from scipy.sparse.linalg._eigen.lobpcg import lobpcg # type: ignore[no-redef]
|
9 |
+
from scipy.sparse.linalg._svdp import _svdp
|
10 |
+
from scipy.linalg import svd
|
11 |
+
|
12 |
+
arpack_int = _arpack.timing.nbx.dtype
|
13 |
+
__all__ = ['svds']
|
14 |
+
|
15 |
+
|
16 |
+
def _herm(x):
|
17 |
+
return x.T.conj()
|
18 |
+
|
19 |
+
|
20 |
+
def _iv(A, k, ncv, tol, which, v0, maxiter,
|
21 |
+
return_singular, solver, random_state):
|
22 |
+
|
23 |
+
# input validation/standardization for `solver`
|
24 |
+
# out of order because it's needed for other parameters
|
25 |
+
solver = str(solver).lower()
|
26 |
+
solvers = {"arpack", "lobpcg", "propack"}
|
27 |
+
if solver not in solvers:
|
28 |
+
raise ValueError(f"solver must be one of {solvers}.")
|
29 |
+
|
30 |
+
# input validation/standardization for `A`
|
31 |
+
A = aslinearoperator(A) # this takes care of some input validation
|
32 |
+
if not (np.issubdtype(A.dtype, np.complexfloating)
|
33 |
+
or np.issubdtype(A.dtype, np.floating)):
|
34 |
+
message = "`A` must be of floating or complex floating data type."
|
35 |
+
raise ValueError(message)
|
36 |
+
if np.prod(A.shape) == 0:
|
37 |
+
message = "`A` must not be empty."
|
38 |
+
raise ValueError(message)
|
39 |
+
|
40 |
+
# input validation/standardization for `k`
|
41 |
+
kmax = min(A.shape) if solver == 'propack' else min(A.shape) - 1
|
42 |
+
if int(k) != k or not (0 < k <= kmax):
|
43 |
+
message = "`k` must be an integer satisfying `0 < k < min(A.shape)`."
|
44 |
+
raise ValueError(message)
|
45 |
+
k = int(k)
|
46 |
+
|
47 |
+
# input validation/standardization for `ncv`
|
48 |
+
if solver == "arpack" and ncv is not None:
|
49 |
+
if int(ncv) != ncv or not (k < ncv < min(A.shape)):
|
50 |
+
message = ("`ncv` must be an integer satisfying "
|
51 |
+
"`k < ncv < min(A.shape)`.")
|
52 |
+
raise ValueError(message)
|
53 |
+
ncv = int(ncv)
|
54 |
+
|
55 |
+
# input validation/standardization for `tol`
|
56 |
+
if tol < 0 or not np.isfinite(tol):
|
57 |
+
message = "`tol` must be a non-negative floating point value."
|
58 |
+
raise ValueError(message)
|
59 |
+
tol = float(tol)
|
60 |
+
|
61 |
+
# input validation/standardization for `which`
|
62 |
+
which = str(which).upper()
|
63 |
+
whichs = {'LM', 'SM'}
|
64 |
+
if which not in whichs:
|
65 |
+
raise ValueError(f"`which` must be in {whichs}.")
|
66 |
+
|
67 |
+
# input validation/standardization for `v0`
|
68 |
+
if v0 is not None:
|
69 |
+
v0 = np.atleast_1d(v0)
|
70 |
+
if not (np.issubdtype(v0.dtype, np.complexfloating)
|
71 |
+
or np.issubdtype(v0.dtype, np.floating)):
|
72 |
+
message = ("`v0` must be of floating or complex floating "
|
73 |
+
"data type.")
|
74 |
+
raise ValueError(message)
|
75 |
+
|
76 |
+
shape = (A.shape[0],) if solver == 'propack' else (min(A.shape),)
|
77 |
+
if v0.shape != shape:
|
78 |
+
message = f"`v0` must have shape {shape}."
|
79 |
+
raise ValueError(message)
|
80 |
+
|
81 |
+
# input validation/standardization for `maxiter`
|
82 |
+
if maxiter is not None and (int(maxiter) != maxiter or maxiter <= 0):
|
83 |
+
message = "`maxiter` must be a positive integer."
|
84 |
+
raise ValueError(message)
|
85 |
+
maxiter = int(maxiter) if maxiter is not None else maxiter
|
86 |
+
|
87 |
+
# input validation/standardization for `return_singular_vectors`
|
88 |
+
# not going to be flexible with this; too complicated for little gain
|
89 |
+
rs_options = {True, False, "vh", "u"}
|
90 |
+
if return_singular not in rs_options:
|
91 |
+
raise ValueError(f"`return_singular_vectors` must be in {rs_options}.")
|
92 |
+
|
93 |
+
random_state = check_random_state(random_state)
|
94 |
+
|
95 |
+
return (A, k, ncv, tol, which, v0, maxiter,
|
96 |
+
return_singular, solver, random_state)
|
97 |
+
|
98 |
+
|
99 |
+
def svds(A, k=6, ncv=None, tol=0, which='LM', v0=None,
|
100 |
+
maxiter=None, return_singular_vectors=True,
|
101 |
+
solver='arpack', random_state=None, options=None):
|
102 |
+
"""
|
103 |
+
Partial singular value decomposition of a sparse matrix.
|
104 |
+
|
105 |
+
Compute the largest or smallest `k` singular values and corresponding
|
106 |
+
singular vectors of a sparse matrix `A`. The order in which the singular
|
107 |
+
values are returned is not guaranteed.
|
108 |
+
|
109 |
+
In the descriptions below, let ``M, N = A.shape``.
|
110 |
+
|
111 |
+
Parameters
|
112 |
+
----------
|
113 |
+
A : ndarray, sparse matrix, or LinearOperator
|
114 |
+
Matrix to decompose of a floating point numeric dtype.
|
115 |
+
k : int, default: 6
|
116 |
+
Number of singular values and singular vectors to compute.
|
117 |
+
Must satisfy ``1 <= k <= kmax``, where ``kmax=min(M, N)`` for
|
118 |
+
``solver='propack'`` and ``kmax=min(M, N) - 1`` otherwise.
|
119 |
+
ncv : int, optional
|
120 |
+
When ``solver='arpack'``, this is the number of Lanczos vectors
|
121 |
+
generated. See :ref:`'arpack' <sparse.linalg.svds-arpack>` for details.
|
122 |
+
When ``solver='lobpcg'`` or ``solver='propack'``, this parameter is
|
123 |
+
ignored.
|
124 |
+
tol : float, optional
|
125 |
+
Tolerance for singular values. Zero (default) means machine precision.
|
126 |
+
which : {'LM', 'SM'}
|
127 |
+
Which `k` singular values to find: either the largest magnitude ('LM')
|
128 |
+
or smallest magnitude ('SM') singular values.
|
129 |
+
v0 : ndarray, optional
|
130 |
+
The starting vector for iteration; see method-specific
|
131 |
+
documentation (:ref:`'arpack' <sparse.linalg.svds-arpack>`,
|
132 |
+
:ref:`'lobpcg' <sparse.linalg.svds-lobpcg>`), or
|
133 |
+
:ref:`'propack' <sparse.linalg.svds-propack>` for details.
|
134 |
+
maxiter : int, optional
|
135 |
+
Maximum number of iterations; see method-specific
|
136 |
+
documentation (:ref:`'arpack' <sparse.linalg.svds-arpack>`,
|
137 |
+
:ref:`'lobpcg' <sparse.linalg.svds-lobpcg>`), or
|
138 |
+
:ref:`'propack' <sparse.linalg.svds-propack>` for details.
|
139 |
+
return_singular_vectors : {True, False, "u", "vh"}
|
140 |
+
Singular values are always computed and returned; this parameter
|
141 |
+
controls the computation and return of singular vectors.
|
142 |
+
|
143 |
+
- ``True``: return singular vectors.
|
144 |
+
- ``False``: do not return singular vectors.
|
145 |
+
- ``"u"``: if ``M <= N``, compute only the left singular vectors and
|
146 |
+
return ``None`` for the right singular vectors. Otherwise, compute
|
147 |
+
all singular vectors.
|
148 |
+
- ``"vh"``: if ``M > N``, compute only the right singular vectors and
|
149 |
+
return ``None`` for the left singular vectors. Otherwise, compute
|
150 |
+
all singular vectors.
|
151 |
+
|
152 |
+
If ``solver='propack'``, the option is respected regardless of the
|
153 |
+
matrix shape.
|
154 |
+
|
155 |
+
solver : {'arpack', 'propack', 'lobpcg'}, optional
|
156 |
+
The solver used.
|
157 |
+
:ref:`'arpack' <sparse.linalg.svds-arpack>`,
|
158 |
+
:ref:`'lobpcg' <sparse.linalg.svds-lobpcg>`, and
|
159 |
+
:ref:`'propack' <sparse.linalg.svds-propack>` are supported.
|
160 |
+
Default: `'arpack'`.
|
161 |
+
random_state : {None, int, `numpy.random.Generator`,
|
162 |
+
`numpy.random.RandomState`}, optional
|
163 |
+
|
164 |
+
Pseudorandom number generator state used to generate resamples.
|
165 |
+
|
166 |
+
If `random_state` is ``None`` (or `np.random`), the
|
167 |
+
`numpy.random.RandomState` singleton is used.
|
168 |
+
If `random_state` is an int, a new ``RandomState`` instance is used,
|
169 |
+
seeded with `random_state`.
|
170 |
+
If `random_state` is already a ``Generator`` or ``RandomState``
|
171 |
+
instance then that instance is used.
|
172 |
+
options : dict, optional
|
173 |
+
A dictionary of solver-specific options. No solver-specific options
|
174 |
+
are currently supported; this parameter is reserved for future use.
|
175 |
+
|
176 |
+
Returns
|
177 |
+
-------
|
178 |
+
u : ndarray, shape=(M, k)
|
179 |
+
Unitary matrix having left singular vectors as columns.
|
180 |
+
s : ndarray, shape=(k,)
|
181 |
+
The singular values.
|
182 |
+
vh : ndarray, shape=(k, N)
|
183 |
+
Unitary matrix having right singular vectors as rows.
|
184 |
+
|
185 |
+
Notes
|
186 |
+
-----
|
187 |
+
This is a naive implementation using ARPACK or LOBPCG as an eigensolver
|
188 |
+
on the matrix ``A.conj().T @ A`` or ``A @ A.conj().T``, depending on
|
189 |
+
which one is smaller size, followed by the Rayleigh-Ritz method
|
190 |
+
as postprocessing; see
|
191 |
+
Using the normal matrix, in Rayleigh-Ritz method, (2022, Nov. 19),
|
192 |
+
Wikipedia, https://w.wiki/4zms.
|
193 |
+
|
194 |
+
Alternatively, the PROPACK solver can be called.
|
195 |
+
|
196 |
+
Choices of the input matrix `A` numeric dtype may be limited.
|
197 |
+
Only ``solver="lobpcg"`` supports all floating point dtypes
|
198 |
+
real: 'np.float32', 'np.float64', 'np.longdouble' and
|
199 |
+
complex: 'np.complex64', 'np.complex128', 'np.clongdouble'.
|
200 |
+
The ``solver="arpack"`` supports only
|
201 |
+
'np.float32', 'np.float64', and 'np.complex128'.
|
202 |
+
|
203 |
+
Examples
|
204 |
+
--------
|
205 |
+
Construct a matrix `A` from singular values and vectors.
|
206 |
+
|
207 |
+
>>> import numpy as np
|
208 |
+
>>> from scipy import sparse, linalg, stats
|
209 |
+
>>> from scipy.sparse.linalg import svds, aslinearoperator, LinearOperator
|
210 |
+
|
211 |
+
Construct a dense matrix `A` from singular values and vectors.
|
212 |
+
|
213 |
+
>>> rng = np.random.default_rng(258265244568965474821194062361901728911)
|
214 |
+
>>> orthogonal = stats.ortho_group.rvs(10, random_state=rng)
|
215 |
+
>>> s = [1e-3, 1, 2, 3, 4] # non-zero singular values
|
216 |
+
>>> u = orthogonal[:, :5] # left singular vectors
|
217 |
+
>>> vT = orthogonal[:, 5:].T # right singular vectors
|
218 |
+
>>> A = u @ np.diag(s) @ vT
|
219 |
+
|
220 |
+
With only four singular values/vectors, the SVD approximates the original
|
221 |
+
matrix.
|
222 |
+
|
223 |
+
>>> u4, s4, vT4 = svds(A, k=4)
|
224 |
+
>>> A4 = u4 @ np.diag(s4) @ vT4
|
225 |
+
>>> np.allclose(A4, A, atol=1e-3)
|
226 |
+
True
|
227 |
+
|
228 |
+
With all five non-zero singular values/vectors, we can reproduce
|
229 |
+
the original matrix more accurately.
|
230 |
+
|
231 |
+
>>> u5, s5, vT5 = svds(A, k=5)
|
232 |
+
>>> A5 = u5 @ np.diag(s5) @ vT5
|
233 |
+
>>> np.allclose(A5, A)
|
234 |
+
True
|
235 |
+
|
236 |
+
The singular values match the expected singular values.
|
237 |
+
|
238 |
+
>>> np.allclose(s5, s)
|
239 |
+
True
|
240 |
+
|
241 |
+
Since the singular values are not close to each other in this example,
|
242 |
+
every singular vector matches as expected up to a difference in sign.
|
243 |
+
|
244 |
+
>>> (np.allclose(np.abs(u5), np.abs(u)) and
|
245 |
+
... np.allclose(np.abs(vT5), np.abs(vT)))
|
246 |
+
True
|
247 |
+
|
248 |
+
The singular vectors are also orthogonal.
|
249 |
+
|
250 |
+
>>> (np.allclose(u5.T @ u5, np.eye(5)) and
|
251 |
+
... np.allclose(vT5 @ vT5.T, np.eye(5)))
|
252 |
+
True
|
253 |
+
|
254 |
+
If there are (nearly) multiple singular values, the corresponding
|
255 |
+
individual singular vectors may be unstable, but the whole invariant
|
256 |
+
subspace containing all such singular vectors is computed accurately
|
257 |
+
as can be measured by angles between subspaces via 'subspace_angles'.
|
258 |
+
|
259 |
+
>>> rng = np.random.default_rng(178686584221410808734965903901790843963)
|
260 |
+
>>> s = [1, 1 + 1e-6] # non-zero singular values
|
261 |
+
>>> u, _ = np.linalg.qr(rng.standard_normal((99, 2)))
|
262 |
+
>>> v, _ = np.linalg.qr(rng.standard_normal((99, 2)))
|
263 |
+
>>> vT = v.T
|
264 |
+
>>> A = u @ np.diag(s) @ vT
|
265 |
+
>>> A = A.astype(np.float32)
|
266 |
+
>>> u2, s2, vT2 = svds(A, k=2, random_state=rng)
|
267 |
+
>>> np.allclose(s2, s)
|
268 |
+
True
|
269 |
+
|
270 |
+
The angles between the individual exact and computed singular vectors
|
271 |
+
may not be so small. To check use:
|
272 |
+
|
273 |
+
>>> (linalg.subspace_angles(u2[:, :1], u[:, :1]) +
|
274 |
+
... linalg.subspace_angles(u2[:, 1:], u[:, 1:]))
|
275 |
+
array([0.06562513]) # may vary
|
276 |
+
>>> (linalg.subspace_angles(vT2[:1, :].T, vT[:1, :].T) +
|
277 |
+
... linalg.subspace_angles(vT2[1:, :].T, vT[1:, :].T))
|
278 |
+
array([0.06562507]) # may vary
|
279 |
+
|
280 |
+
As opposed to the angles between the 2-dimensional invariant subspaces
|
281 |
+
that these vectors span, which are small for rights singular vectors
|
282 |
+
|
283 |
+
>>> linalg.subspace_angles(u2, u).sum() < 1e-6
|
284 |
+
True
|
285 |
+
|
286 |
+
as well as for left singular vectors.
|
287 |
+
|
288 |
+
>>> linalg.subspace_angles(vT2.T, vT.T).sum() < 1e-6
|
289 |
+
True
|
290 |
+
|
291 |
+
The next example follows that of 'sklearn.decomposition.TruncatedSVD'.
|
292 |
+
|
293 |
+
>>> rng = np.random.RandomState(0)
|
294 |
+
>>> X_dense = rng.random(size=(100, 100))
|
295 |
+
>>> X_dense[:, 2 * np.arange(50)] = 0
|
296 |
+
>>> X = sparse.csr_matrix(X_dense)
|
297 |
+
>>> _, singular_values, _ = svds(X, k=5, random_state=rng)
|
298 |
+
>>> print(singular_values)
|
299 |
+
[ 4.3293... 4.4491... 4.5420... 4.5987... 35.2410...]
|
300 |
+
|
301 |
+
The function can be called without the transpose of the input matrix
|
302 |
+
ever explicitly constructed.
|
303 |
+
|
304 |
+
>>> rng = np.random.default_rng(102524723947864966825913730119128190974)
|
305 |
+
>>> G = sparse.rand(8, 9, density=0.5, random_state=rng)
|
306 |
+
>>> Glo = aslinearoperator(G)
|
307 |
+
>>> _, singular_values_svds, _ = svds(Glo, k=5, random_state=rng)
|
308 |
+
>>> _, singular_values_svd, _ = linalg.svd(G.toarray())
|
309 |
+
>>> np.allclose(singular_values_svds, singular_values_svd[-4::-1])
|
310 |
+
True
|
311 |
+
|
312 |
+
The most memory efficient scenario is where neither
|
313 |
+
the original matrix, nor its transpose, is explicitly constructed.
|
314 |
+
Our example computes the smallest singular values and vectors
|
315 |
+
of 'LinearOperator' constructed from the numpy function 'np.diff' used
|
316 |
+
column-wise to be consistent with 'LinearOperator' operating on columns.
|
317 |
+
|
318 |
+
>>> diff0 = lambda a: np.diff(a, axis=0)
|
319 |
+
|
320 |
+
Let us create the matrix from 'diff0' to be used for validation only.
|
321 |
+
|
322 |
+
>>> n = 5 # The dimension of the space.
|
323 |
+
>>> M_from_diff0 = diff0(np.eye(n))
|
324 |
+
>>> print(M_from_diff0.astype(int))
|
325 |
+
[[-1 1 0 0 0]
|
326 |
+
[ 0 -1 1 0 0]
|
327 |
+
[ 0 0 -1 1 0]
|
328 |
+
[ 0 0 0 -1 1]]
|
329 |
+
|
330 |
+
The matrix 'M_from_diff0' is bi-diagonal and could be alternatively
|
331 |
+
created directly by
|
332 |
+
|
333 |
+
>>> M = - np.eye(n - 1, n, dtype=int)
|
334 |
+
>>> np.fill_diagonal(M[:,1:], 1)
|
335 |
+
>>> np.allclose(M, M_from_diff0)
|
336 |
+
True
|
337 |
+
|
338 |
+
Its transpose
|
339 |
+
|
340 |
+
>>> print(M.T)
|
341 |
+
[[-1 0 0 0]
|
342 |
+
[ 1 -1 0 0]
|
343 |
+
[ 0 1 -1 0]
|
344 |
+
[ 0 0 1 -1]
|
345 |
+
[ 0 0 0 1]]
|
346 |
+
|
347 |
+
can be viewed as the incidence matrix; see
|
348 |
+
Incidence matrix, (2022, Nov. 19), Wikipedia, https://w.wiki/5YXU,
|
349 |
+
of a linear graph with 5 vertices and 4 edges. The 5x5 normal matrix
|
350 |
+
``M.T @ M`` thus is
|
351 |
+
|
352 |
+
>>> print(M.T @ M)
|
353 |
+
[[ 1 -1 0 0 0]
|
354 |
+
[-1 2 -1 0 0]
|
355 |
+
[ 0 -1 2 -1 0]
|
356 |
+
[ 0 0 -1 2 -1]
|
357 |
+
[ 0 0 0 -1 1]]
|
358 |
+
|
359 |
+
the graph Laplacian, while the actually used in 'svds' smaller size
|
360 |
+
4x4 normal matrix ``M @ M.T``
|
361 |
+
|
362 |
+
>>> print(M @ M.T)
|
363 |
+
[[ 2 -1 0 0]
|
364 |
+
[-1 2 -1 0]
|
365 |
+
[ 0 -1 2 -1]
|
366 |
+
[ 0 0 -1 2]]
|
367 |
+
|
368 |
+
is the so-called edge-based Laplacian; see
|
369 |
+
Symmetric Laplacian via the incidence matrix, in Laplacian matrix,
|
370 |
+
(2022, Nov. 19), Wikipedia, https://w.wiki/5YXW.
|
371 |
+
|
372 |
+
The 'LinearOperator' setup needs the options 'rmatvec' and 'rmatmat'
|
373 |
+
of multiplication by the matrix transpose ``M.T``, but we want to be
|
374 |
+
matrix-free to save memory, so knowing how ``M.T`` looks like, we
|
375 |
+
manually construct the following function to be
|
376 |
+
used in ``rmatmat=diff0t``.
|
377 |
+
|
378 |
+
>>> def diff0t(a):
|
379 |
+
... if a.ndim == 1:
|
380 |
+
... a = a[:,np.newaxis] # Turn 1D into 2D array
|
381 |
+
... d = np.zeros((a.shape[0] + 1, a.shape[1]), dtype=a.dtype)
|
382 |
+
... d[0, :] = - a[0, :]
|
383 |
+
... d[1:-1, :] = a[0:-1, :] - a[1:, :]
|
384 |
+
... d[-1, :] = a[-1, :]
|
385 |
+
... return d
|
386 |
+
|
387 |
+
We check that our function 'diff0t' for the matrix transpose is valid.
|
388 |
+
|
389 |
+
>>> np.allclose(M.T, diff0t(np.eye(n-1)))
|
390 |
+
True
|
391 |
+
|
392 |
+
Now we setup our matrix-free 'LinearOperator' called 'diff0_func_aslo'
|
393 |
+
and for validation the matrix-based 'diff0_matrix_aslo'.
|
394 |
+
|
395 |
+
>>> def diff0_func_aslo_def(n):
|
396 |
+
... return LinearOperator(matvec=diff0,
|
397 |
+
... matmat=diff0,
|
398 |
+
... rmatvec=diff0t,
|
399 |
+
... rmatmat=diff0t,
|
400 |
+
... shape=(n - 1, n))
|
401 |
+
>>> diff0_func_aslo = diff0_func_aslo_def(n)
|
402 |
+
>>> diff0_matrix_aslo = aslinearoperator(M_from_diff0)
|
403 |
+
|
404 |
+
And validate both the matrix and its transpose in 'LinearOperator'.
|
405 |
+
|
406 |
+
>>> np.allclose(diff0_func_aslo(np.eye(n)),
|
407 |
+
... diff0_matrix_aslo(np.eye(n)))
|
408 |
+
True
|
409 |
+
>>> np.allclose(diff0_func_aslo.T(np.eye(n-1)),
|
410 |
+
... diff0_matrix_aslo.T(np.eye(n-1)))
|
411 |
+
True
|
412 |
+
|
413 |
+
Having the 'LinearOperator' setup validated, we run the solver.
|
414 |
+
|
415 |
+
>>> n = 100
|
416 |
+
>>> diff0_func_aslo = diff0_func_aslo_def(n)
|
417 |
+
>>> u, s, vT = svds(diff0_func_aslo, k=3, which='SM')
|
418 |
+
|
419 |
+
The singular values squared and the singular vectors are known
|
420 |
+
explicitly; see
|
421 |
+
Pure Dirichlet boundary conditions, in
|
422 |
+
Eigenvalues and eigenvectors of the second derivative,
|
423 |
+
(2022, Nov. 19), Wikipedia, https://w.wiki/5YX6,
|
424 |
+
since 'diff' corresponds to first
|
425 |
+
derivative, and its smaller size n-1 x n-1 normal matrix
|
426 |
+
``M @ M.T`` represent the discrete second derivative with the Dirichlet
|
427 |
+
boundary conditions. We use these analytic expressions for validation.
|
428 |
+
|
429 |
+
>>> se = 2. * np.sin(np.pi * np.arange(1, 4) / (2. * n))
|
430 |
+
>>> ue = np.sqrt(2 / n) * np.sin(np.pi * np.outer(np.arange(1, n),
|
431 |
+
... np.arange(1, 4)) / n)
|
432 |
+
>>> np.allclose(s, se, atol=1e-3)
|
433 |
+
True
|
434 |
+
>>> print(np.allclose(np.abs(u), np.abs(ue), atol=1e-6))
|
435 |
+
True
|
436 |
+
|
437 |
+
"""
|
438 |
+
args = _iv(A, k, ncv, tol, which, v0, maxiter, return_singular_vectors,
|
439 |
+
solver, random_state)
|
440 |
+
(A, k, ncv, tol, which, v0, maxiter,
|
441 |
+
return_singular_vectors, solver, random_state) = args
|
442 |
+
|
443 |
+
largest = (which == 'LM')
|
444 |
+
n, m = A.shape
|
445 |
+
|
446 |
+
if n >= m:
|
447 |
+
X_dot = A.matvec
|
448 |
+
X_matmat = A.matmat
|
449 |
+
XH_dot = A.rmatvec
|
450 |
+
XH_mat = A.rmatmat
|
451 |
+
transpose = False
|
452 |
+
else:
|
453 |
+
X_dot = A.rmatvec
|
454 |
+
X_matmat = A.rmatmat
|
455 |
+
XH_dot = A.matvec
|
456 |
+
XH_mat = A.matmat
|
457 |
+
transpose = True
|
458 |
+
|
459 |
+
dtype = getattr(A, 'dtype', None)
|
460 |
+
if dtype is None:
|
461 |
+
dtype = A.dot(np.zeros([m, 1])).dtype
|
462 |
+
|
463 |
+
def matvec_XH_X(x):
|
464 |
+
return XH_dot(X_dot(x))
|
465 |
+
|
466 |
+
def matmat_XH_X(x):
|
467 |
+
return XH_mat(X_matmat(x))
|
468 |
+
|
469 |
+
XH_X = LinearOperator(matvec=matvec_XH_X, dtype=A.dtype,
|
470 |
+
matmat=matmat_XH_X,
|
471 |
+
shape=(min(A.shape), min(A.shape)))
|
472 |
+
|
473 |
+
# Get a low rank approximation of the implicitly defined gramian matrix.
|
474 |
+
# This is not a stable way to approach the problem.
|
475 |
+
if solver == 'lobpcg':
|
476 |
+
|
477 |
+
if k == 1 and v0 is not None:
|
478 |
+
X = np.reshape(v0, (-1, 1))
|
479 |
+
else:
|
480 |
+
X = random_state.standard_normal(size=(min(A.shape), k))
|
481 |
+
|
482 |
+
_, eigvec = lobpcg(XH_X, X, tol=tol ** 2, maxiter=maxiter,
|
483 |
+
largest=largest)
|
484 |
+
|
485 |
+
elif solver == 'propack':
|
486 |
+
jobu = return_singular_vectors in {True, 'u'}
|
487 |
+
jobv = return_singular_vectors in {True, 'vh'}
|
488 |
+
irl_mode = (which == 'SM')
|
489 |
+
res = _svdp(A, k=k, tol=tol**2, which=which, maxiter=None,
|
490 |
+
compute_u=jobu, compute_v=jobv, irl_mode=irl_mode,
|
491 |
+
kmax=maxiter, v0=v0, random_state=random_state)
|
492 |
+
|
493 |
+
u, s, vh, _ = res # but we'll ignore bnd, the last output
|
494 |
+
|
495 |
+
# PROPACK order appears to be largest first. `svds` output order is not
|
496 |
+
# guaranteed, according to documentation, but for ARPACK and LOBPCG
|
497 |
+
# they actually are ordered smallest to largest, so reverse for
|
498 |
+
# consistency.
|
499 |
+
s = s[::-1]
|
500 |
+
u = u[:, ::-1]
|
501 |
+
vh = vh[::-1]
|
502 |
+
|
503 |
+
u = u if jobu else None
|
504 |
+
vh = vh if jobv else None
|
505 |
+
|
506 |
+
if return_singular_vectors:
|
507 |
+
return u, s, vh
|
508 |
+
else:
|
509 |
+
return s
|
510 |
+
|
511 |
+
elif solver == 'arpack' or solver is None:
|
512 |
+
if v0 is None:
|
513 |
+
v0 = random_state.standard_normal(size=(min(A.shape),))
|
514 |
+
_, eigvec = eigsh(XH_X, k=k, tol=tol ** 2, maxiter=maxiter,
|
515 |
+
ncv=ncv, which=which, v0=v0)
|
516 |
+
# arpack do not guarantee exactly orthonormal eigenvectors
|
517 |
+
# for clustered eigenvalues, especially in complex arithmetic
|
518 |
+
eigvec, _ = np.linalg.qr(eigvec)
|
519 |
+
|
520 |
+
# the eigenvectors eigvec must be orthonomal here; see gh-16712
|
521 |
+
Av = X_matmat(eigvec)
|
522 |
+
if not return_singular_vectors:
|
523 |
+
s = svd(Av, compute_uv=False, overwrite_a=True)
|
524 |
+
return s[::-1]
|
525 |
+
|
526 |
+
# compute the left singular vectors of X and update the right ones
|
527 |
+
# accordingly
|
528 |
+
u, s, vh = svd(Av, full_matrices=False, overwrite_a=True)
|
529 |
+
u = u[:, ::-1]
|
530 |
+
s = s[::-1]
|
531 |
+
vh = vh[::-1]
|
532 |
+
|
533 |
+
jobu = return_singular_vectors in {True, 'u'}
|
534 |
+
jobv = return_singular_vectors in {True, 'vh'}
|
535 |
+
|
536 |
+
if transpose:
|
537 |
+
u_tmp = eigvec @ _herm(vh) if jobu else None
|
538 |
+
vh = _herm(u) if jobv else None
|
539 |
+
u = u_tmp
|
540 |
+
else:
|
541 |
+
if not jobu:
|
542 |
+
u = None
|
543 |
+
vh = vh @ _herm(eigvec) if jobv else None
|
544 |
+
|
545 |
+
return u, s, vh
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_expm_multiply.py
ADDED
@@ -0,0 +1,810 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Compute the action of the matrix exponential."""
|
2 |
+
from warnings import warn
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
|
6 |
+
import scipy.linalg
|
7 |
+
import scipy.sparse.linalg
|
8 |
+
from scipy.linalg._decomp_qr import qr
|
9 |
+
from scipy.sparse._sputils import is_pydata_spmatrix
|
10 |
+
from scipy.sparse.linalg import aslinearoperator
|
11 |
+
from scipy.sparse.linalg._interface import IdentityOperator
|
12 |
+
from scipy.sparse.linalg._onenormest import onenormest
|
13 |
+
|
14 |
+
__all__ = ['expm_multiply']
|
15 |
+
|
16 |
+
|
17 |
+
def _exact_inf_norm(A):
|
18 |
+
# A compatibility function which should eventually disappear.
|
19 |
+
if scipy.sparse.issparse(A):
|
20 |
+
return max(abs(A).sum(axis=1).flat)
|
21 |
+
elif is_pydata_spmatrix(A):
|
22 |
+
return max(abs(A).sum(axis=1))
|
23 |
+
else:
|
24 |
+
return np.linalg.norm(A, np.inf)
|
25 |
+
|
26 |
+
|
27 |
+
def _exact_1_norm(A):
|
28 |
+
# A compatibility function which should eventually disappear.
|
29 |
+
if scipy.sparse.issparse(A):
|
30 |
+
return max(abs(A).sum(axis=0).flat)
|
31 |
+
elif is_pydata_spmatrix(A):
|
32 |
+
return max(abs(A).sum(axis=0))
|
33 |
+
else:
|
34 |
+
return np.linalg.norm(A, 1)
|
35 |
+
|
36 |
+
|
37 |
+
def _trace(A):
|
38 |
+
# A compatibility function which should eventually disappear.
|
39 |
+
if is_pydata_spmatrix(A):
|
40 |
+
return A.to_scipy_sparse().trace()
|
41 |
+
else:
|
42 |
+
return A.trace()
|
43 |
+
|
44 |
+
|
45 |
+
def traceest(A, m3, seed=None):
|
46 |
+
"""Estimate `np.trace(A)` using `3*m3` matrix-vector products.
|
47 |
+
|
48 |
+
The result is not deterministic.
|
49 |
+
|
50 |
+
Parameters
|
51 |
+
----------
|
52 |
+
A : LinearOperator
|
53 |
+
Linear operator whose trace will be estimated. Has to be square.
|
54 |
+
m3 : int
|
55 |
+
Number of matrix-vector products divided by 3 used to estimate the
|
56 |
+
trace.
|
57 |
+
seed : optional
|
58 |
+
Seed for `numpy.random.default_rng`.
|
59 |
+
Can be provided to obtain deterministic results.
|
60 |
+
|
61 |
+
Returns
|
62 |
+
-------
|
63 |
+
trace : LinearOperator.dtype
|
64 |
+
Estimate of the trace
|
65 |
+
|
66 |
+
Notes
|
67 |
+
-----
|
68 |
+
This is the Hutch++ algorithm given in [1]_.
|
69 |
+
|
70 |
+
References
|
71 |
+
----------
|
72 |
+
.. [1] Meyer, Raphael A., Cameron Musco, Christopher Musco, and David P.
|
73 |
+
Woodruff. "Hutch++: Optimal Stochastic Trace Estimation." In Symposium
|
74 |
+
on Simplicity in Algorithms (SOSA), pp. 142-155. Society for Industrial
|
75 |
+
and Applied Mathematics, 2021
|
76 |
+
https://doi.org/10.1137/1.9781611976496.16
|
77 |
+
|
78 |
+
"""
|
79 |
+
rng = np.random.default_rng(seed)
|
80 |
+
if len(A.shape) != 2 or A.shape[-1] != A.shape[-2]:
|
81 |
+
raise ValueError("Expected A to be like a square matrix.")
|
82 |
+
n = A.shape[-1]
|
83 |
+
S = rng.choice([-1.0, +1.0], [n, m3])
|
84 |
+
Q, _ = qr(A.matmat(S), overwrite_a=True, mode='economic')
|
85 |
+
trQAQ = np.trace(Q.conj().T @ A.matmat(Q))
|
86 |
+
G = rng.choice([-1, +1], [n, m3])
|
87 |
+
right = G - Q@(Q.conj().T @ G)
|
88 |
+
trGAG = np.trace(right.conj().T @ A.matmat(right))
|
89 |
+
return trQAQ + trGAG/m3
|
90 |
+
|
91 |
+
|
92 |
+
def _ident_like(A):
|
93 |
+
# A compatibility function which should eventually disappear.
|
94 |
+
if scipy.sparse.issparse(A):
|
95 |
+
# Creates a sparse matrix in dia format
|
96 |
+
out = scipy.sparse.eye(A.shape[0], A.shape[1], dtype=A.dtype)
|
97 |
+
if isinstance(A, scipy.sparse.spmatrix):
|
98 |
+
return out.asformat(A.format)
|
99 |
+
return scipy.sparse.dia_array(out).asformat(A.format)
|
100 |
+
elif is_pydata_spmatrix(A):
|
101 |
+
import sparse
|
102 |
+
return sparse.eye(A.shape[0], A.shape[1], dtype=A.dtype)
|
103 |
+
elif isinstance(A, scipy.sparse.linalg.LinearOperator):
|
104 |
+
return IdentityOperator(A.shape, dtype=A.dtype)
|
105 |
+
else:
|
106 |
+
return np.eye(A.shape[0], A.shape[1], dtype=A.dtype)
|
107 |
+
|
108 |
+
|
109 |
+
def expm_multiply(A, B, start=None, stop=None, num=None,
|
110 |
+
endpoint=None, traceA=None):
|
111 |
+
"""
|
112 |
+
Compute the action of the matrix exponential of A on B.
|
113 |
+
|
114 |
+
Parameters
|
115 |
+
----------
|
116 |
+
A : transposable linear operator
|
117 |
+
The operator whose exponential is of interest.
|
118 |
+
B : ndarray
|
119 |
+
The matrix or vector to be multiplied by the matrix exponential of A.
|
120 |
+
start : scalar, optional
|
121 |
+
The starting time point of the sequence.
|
122 |
+
stop : scalar, optional
|
123 |
+
The end time point of the sequence, unless `endpoint` is set to False.
|
124 |
+
In that case, the sequence consists of all but the last of ``num + 1``
|
125 |
+
evenly spaced time points, so that `stop` is excluded.
|
126 |
+
Note that the step size changes when `endpoint` is False.
|
127 |
+
num : int, optional
|
128 |
+
Number of time points to use.
|
129 |
+
endpoint : bool, optional
|
130 |
+
If True, `stop` is the last time point. Otherwise, it is not included.
|
131 |
+
traceA : scalar, optional
|
132 |
+
Trace of `A`. If not given the trace is estimated for linear operators,
|
133 |
+
or calculated exactly for sparse matrices. It is used to precondition
|
134 |
+
`A`, thus an approximate trace is acceptable.
|
135 |
+
For linear operators, `traceA` should be provided to ensure performance
|
136 |
+
as the estimation is not guaranteed to be reliable for all cases.
|
137 |
+
|
138 |
+
.. versionadded:: 1.9.0
|
139 |
+
|
140 |
+
Returns
|
141 |
+
-------
|
142 |
+
expm_A_B : ndarray
|
143 |
+
The result of the action :math:`e^{t_k A} B`.
|
144 |
+
|
145 |
+
Warns
|
146 |
+
-----
|
147 |
+
UserWarning
|
148 |
+
If `A` is a linear operator and ``traceA=None`` (default).
|
149 |
+
|
150 |
+
Notes
|
151 |
+
-----
|
152 |
+
The optional arguments defining the sequence of evenly spaced time points
|
153 |
+
are compatible with the arguments of `numpy.linspace`.
|
154 |
+
|
155 |
+
The output ndarray shape is somewhat complicated so I explain it here.
|
156 |
+
The ndim of the output could be either 1, 2, or 3.
|
157 |
+
It would be 1 if you are computing the expm action on a single vector
|
158 |
+
at a single time point.
|
159 |
+
It would be 2 if you are computing the expm action on a vector
|
160 |
+
at multiple time points, or if you are computing the expm action
|
161 |
+
on a matrix at a single time point.
|
162 |
+
It would be 3 if you want the action on a matrix with multiple
|
163 |
+
columns at multiple time points.
|
164 |
+
If multiple time points are requested, expm_A_B[0] will always
|
165 |
+
be the action of the expm at the first time point,
|
166 |
+
regardless of whether the action is on a vector or a matrix.
|
167 |
+
|
168 |
+
References
|
169 |
+
----------
|
170 |
+
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2011)
|
171 |
+
"Computing the Action of the Matrix Exponential,
|
172 |
+
with an Application to Exponential Integrators."
|
173 |
+
SIAM Journal on Scientific Computing,
|
174 |
+
33 (2). pp. 488-511. ISSN 1064-8275
|
175 |
+
http://eprints.ma.man.ac.uk/1591/
|
176 |
+
|
177 |
+
.. [2] Nicholas J. Higham and Awad H. Al-Mohy (2010)
|
178 |
+
"Computing Matrix Functions."
|
179 |
+
Acta Numerica,
|
180 |
+
19. 159-208. ISSN 0962-4929
|
181 |
+
http://eprints.ma.man.ac.uk/1451/
|
182 |
+
|
183 |
+
Examples
|
184 |
+
--------
|
185 |
+
>>> import numpy as np
|
186 |
+
>>> from scipy.sparse import csc_matrix
|
187 |
+
>>> from scipy.sparse.linalg import expm, expm_multiply
|
188 |
+
>>> A = csc_matrix([[1, 0], [0, 1]])
|
189 |
+
>>> A.toarray()
|
190 |
+
array([[1, 0],
|
191 |
+
[0, 1]], dtype=int64)
|
192 |
+
>>> B = np.array([np.exp(-1.), np.exp(-2.)])
|
193 |
+
>>> B
|
194 |
+
array([ 0.36787944, 0.13533528])
|
195 |
+
>>> expm_multiply(A, B, start=1, stop=2, num=3, endpoint=True)
|
196 |
+
array([[ 1. , 0.36787944],
|
197 |
+
[ 1.64872127, 0.60653066],
|
198 |
+
[ 2.71828183, 1. ]])
|
199 |
+
>>> expm(A).dot(B) # Verify 1st timestep
|
200 |
+
array([ 1. , 0.36787944])
|
201 |
+
>>> expm(1.5*A).dot(B) # Verify 2nd timestep
|
202 |
+
array([ 1.64872127, 0.60653066])
|
203 |
+
>>> expm(2*A).dot(B) # Verify 3rd timestep
|
204 |
+
array([ 2.71828183, 1. ])
|
205 |
+
"""
|
206 |
+
if all(arg is None for arg in (start, stop, num, endpoint)):
|
207 |
+
X = _expm_multiply_simple(A, B, traceA=traceA)
|
208 |
+
else:
|
209 |
+
X, status = _expm_multiply_interval(A, B, start, stop, num,
|
210 |
+
endpoint, traceA=traceA)
|
211 |
+
return X
|
212 |
+
|
213 |
+
|
214 |
+
def _expm_multiply_simple(A, B, t=1.0, traceA=None, balance=False):
|
215 |
+
"""
|
216 |
+
Compute the action of the matrix exponential at a single time point.
|
217 |
+
|
218 |
+
Parameters
|
219 |
+
----------
|
220 |
+
A : transposable linear operator
|
221 |
+
The operator whose exponential is of interest.
|
222 |
+
B : ndarray
|
223 |
+
The matrix to be multiplied by the matrix exponential of A.
|
224 |
+
t : float
|
225 |
+
A time point.
|
226 |
+
traceA : scalar, optional
|
227 |
+
Trace of `A`. If not given the trace is estimated for linear operators,
|
228 |
+
or calculated exactly for sparse matrices. It is used to precondition
|
229 |
+
`A`, thus an approximate trace is acceptable
|
230 |
+
balance : bool
|
231 |
+
Indicates whether or not to apply balancing.
|
232 |
+
|
233 |
+
Returns
|
234 |
+
-------
|
235 |
+
F : ndarray
|
236 |
+
:math:`e^{t A} B`
|
237 |
+
|
238 |
+
Notes
|
239 |
+
-----
|
240 |
+
This is algorithm (3.2) in Al-Mohy and Higham (2011).
|
241 |
+
|
242 |
+
"""
|
243 |
+
if balance:
|
244 |
+
raise NotImplementedError
|
245 |
+
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
|
246 |
+
raise ValueError('expected A to be like a square matrix')
|
247 |
+
if A.shape[1] != B.shape[0]:
|
248 |
+
raise ValueError('shapes of matrices A {} and B {} are incompatible'
|
249 |
+
.format(A.shape, B.shape))
|
250 |
+
ident = _ident_like(A)
|
251 |
+
is_linear_operator = isinstance(A, scipy.sparse.linalg.LinearOperator)
|
252 |
+
n = A.shape[0]
|
253 |
+
if len(B.shape) == 1:
|
254 |
+
n0 = 1
|
255 |
+
elif len(B.shape) == 2:
|
256 |
+
n0 = B.shape[1]
|
257 |
+
else:
|
258 |
+
raise ValueError('expected B to be like a matrix or a vector')
|
259 |
+
u_d = 2**-53
|
260 |
+
tol = u_d
|
261 |
+
if traceA is None:
|
262 |
+
if is_linear_operator:
|
263 |
+
warn("Trace of LinearOperator not available, it will be estimated."
|
264 |
+
" Provide `traceA` to ensure performance.", stacklevel=3)
|
265 |
+
# m3=1 is bit arbitrary choice, a more accurate trace (larger m3) might
|
266 |
+
# speed up exponential calculation, but trace estimation is more costly
|
267 |
+
traceA = traceest(A, m3=1) if is_linear_operator else _trace(A)
|
268 |
+
mu = traceA / float(n)
|
269 |
+
A = A - mu * ident
|
270 |
+
A_1_norm = onenormest(A) if is_linear_operator else _exact_1_norm(A)
|
271 |
+
if t*A_1_norm == 0:
|
272 |
+
m_star, s = 0, 1
|
273 |
+
else:
|
274 |
+
ell = 2
|
275 |
+
norm_info = LazyOperatorNormInfo(t*A, A_1_norm=t*A_1_norm, ell=ell)
|
276 |
+
m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell)
|
277 |
+
return _expm_multiply_simple_core(A, B, t, mu, m_star, s, tol, balance)
|
278 |
+
|
279 |
+
|
280 |
+
def _expm_multiply_simple_core(A, B, t, mu, m_star, s, tol=None, balance=False):
|
281 |
+
"""
|
282 |
+
A helper function.
|
283 |
+
"""
|
284 |
+
if balance:
|
285 |
+
raise NotImplementedError
|
286 |
+
if tol is None:
|
287 |
+
u_d = 2 ** -53
|
288 |
+
tol = u_d
|
289 |
+
F = B
|
290 |
+
eta = np.exp(t*mu / float(s))
|
291 |
+
for i in range(s):
|
292 |
+
c1 = _exact_inf_norm(B)
|
293 |
+
for j in range(m_star):
|
294 |
+
coeff = t / float(s*(j+1))
|
295 |
+
B = coeff * A.dot(B)
|
296 |
+
c2 = _exact_inf_norm(B)
|
297 |
+
F = F + B
|
298 |
+
if c1 + c2 <= tol * _exact_inf_norm(F):
|
299 |
+
break
|
300 |
+
c1 = c2
|
301 |
+
F = eta * F
|
302 |
+
B = F
|
303 |
+
return F
|
304 |
+
|
305 |
+
|
306 |
+
# This table helps to compute bounds.
|
307 |
+
# They seem to have been difficult to calculate, involving symbolic
|
308 |
+
# manipulation of equations, followed by numerical root finding.
|
309 |
+
_theta = {
|
310 |
+
# The first 30 values are from table A.3 of Computing Matrix Functions.
|
311 |
+
1: 2.29e-16,
|
312 |
+
2: 2.58e-8,
|
313 |
+
3: 1.39e-5,
|
314 |
+
4: 3.40e-4,
|
315 |
+
5: 2.40e-3,
|
316 |
+
6: 9.07e-3,
|
317 |
+
7: 2.38e-2,
|
318 |
+
8: 5.00e-2,
|
319 |
+
9: 8.96e-2,
|
320 |
+
10: 1.44e-1,
|
321 |
+
# 11
|
322 |
+
11: 2.14e-1,
|
323 |
+
12: 3.00e-1,
|
324 |
+
13: 4.00e-1,
|
325 |
+
14: 5.14e-1,
|
326 |
+
15: 6.41e-1,
|
327 |
+
16: 7.81e-1,
|
328 |
+
17: 9.31e-1,
|
329 |
+
18: 1.09,
|
330 |
+
19: 1.26,
|
331 |
+
20: 1.44,
|
332 |
+
# 21
|
333 |
+
21: 1.62,
|
334 |
+
22: 1.82,
|
335 |
+
23: 2.01,
|
336 |
+
24: 2.22,
|
337 |
+
25: 2.43,
|
338 |
+
26: 2.64,
|
339 |
+
27: 2.86,
|
340 |
+
28: 3.08,
|
341 |
+
29: 3.31,
|
342 |
+
30: 3.54,
|
343 |
+
# The rest are from table 3.1 of
|
344 |
+
# Computing the Action of the Matrix Exponential.
|
345 |
+
35: 4.7,
|
346 |
+
40: 6.0,
|
347 |
+
45: 7.2,
|
348 |
+
50: 8.5,
|
349 |
+
55: 9.9,
|
350 |
+
}
|
351 |
+
|
352 |
+
|
353 |
+
def _onenormest_matrix_power(A, p,
|
354 |
+
t=2, itmax=5, compute_v=False, compute_w=False):
|
355 |
+
"""
|
356 |
+
Efficiently estimate the 1-norm of A^p.
|
357 |
+
|
358 |
+
Parameters
|
359 |
+
----------
|
360 |
+
A : ndarray
|
361 |
+
Matrix whose 1-norm of a power is to be computed.
|
362 |
+
p : int
|
363 |
+
Non-negative integer power.
|
364 |
+
t : int, optional
|
365 |
+
A positive parameter controlling the tradeoff between
|
366 |
+
accuracy versus time and memory usage.
|
367 |
+
Larger values take longer and use more memory
|
368 |
+
but give more accurate output.
|
369 |
+
itmax : int, optional
|
370 |
+
Use at most this many iterations.
|
371 |
+
compute_v : bool, optional
|
372 |
+
Request a norm-maximizing linear operator input vector if True.
|
373 |
+
compute_w : bool, optional
|
374 |
+
Request a norm-maximizing linear operator output vector if True.
|
375 |
+
|
376 |
+
Returns
|
377 |
+
-------
|
378 |
+
est : float
|
379 |
+
An underestimate of the 1-norm of the sparse matrix.
|
380 |
+
v : ndarray, optional
|
381 |
+
The vector such that ||Av||_1 == est*||v||_1.
|
382 |
+
It can be thought of as an input to the linear operator
|
383 |
+
that gives an output with particularly large norm.
|
384 |
+
w : ndarray, optional
|
385 |
+
The vector Av which has relatively large 1-norm.
|
386 |
+
It can be thought of as an output of the linear operator
|
387 |
+
that is relatively large in norm compared to the input.
|
388 |
+
|
389 |
+
"""
|
390 |
+
#XXX Eventually turn this into an API function in the _onenormest module,
|
391 |
+
#XXX and remove its underscore,
|
392 |
+
#XXX but wait until expm_multiply goes into scipy.
|
393 |
+
from scipy.sparse.linalg._onenormest import onenormest
|
394 |
+
return onenormest(aslinearoperator(A) ** p)
|
395 |
+
|
396 |
+
class LazyOperatorNormInfo:
|
397 |
+
"""
|
398 |
+
Information about an operator is lazily computed.
|
399 |
+
|
400 |
+
The information includes the exact 1-norm of the operator,
|
401 |
+
in addition to estimates of 1-norms of powers of the operator.
|
402 |
+
This uses the notation of Computing the Action (2011).
|
403 |
+
This class is specialized enough to probably not be of general interest
|
404 |
+
outside of this module.
|
405 |
+
|
406 |
+
"""
|
407 |
+
|
408 |
+
def __init__(self, A, A_1_norm=None, ell=2, scale=1):
|
409 |
+
"""
|
410 |
+
Provide the operator and some norm-related information.
|
411 |
+
|
412 |
+
Parameters
|
413 |
+
----------
|
414 |
+
A : linear operator
|
415 |
+
The operator of interest.
|
416 |
+
A_1_norm : float, optional
|
417 |
+
The exact 1-norm of A.
|
418 |
+
ell : int, optional
|
419 |
+
A technical parameter controlling norm estimation quality.
|
420 |
+
scale : int, optional
|
421 |
+
If specified, return the norms of scale*A instead of A.
|
422 |
+
|
423 |
+
"""
|
424 |
+
self._A = A
|
425 |
+
self._A_1_norm = A_1_norm
|
426 |
+
self._ell = ell
|
427 |
+
self._d = {}
|
428 |
+
self._scale = scale
|
429 |
+
|
430 |
+
def set_scale(self,scale):
|
431 |
+
"""
|
432 |
+
Set the scale parameter.
|
433 |
+
"""
|
434 |
+
self._scale = scale
|
435 |
+
|
436 |
+
def onenorm(self):
|
437 |
+
"""
|
438 |
+
Compute the exact 1-norm.
|
439 |
+
"""
|
440 |
+
if self._A_1_norm is None:
|
441 |
+
self._A_1_norm = _exact_1_norm(self._A)
|
442 |
+
return self._scale*self._A_1_norm
|
443 |
+
|
444 |
+
def d(self, p):
|
445 |
+
"""
|
446 |
+
Lazily estimate :math:`d_p(A) ~= || A^p ||^(1/p)` where :math:`||.||` is the 1-norm.
|
447 |
+
"""
|
448 |
+
if p not in self._d:
|
449 |
+
est = _onenormest_matrix_power(self._A, p, self._ell)
|
450 |
+
self._d[p] = est ** (1.0 / p)
|
451 |
+
return self._scale*self._d[p]
|
452 |
+
|
453 |
+
def alpha(self, p):
|
454 |
+
"""
|
455 |
+
Lazily compute max(d(p), d(p+1)).
|
456 |
+
"""
|
457 |
+
return max(self.d(p), self.d(p+1))
|
458 |
+
|
459 |
+
def _compute_cost_div_m(m, p, norm_info):
|
460 |
+
"""
|
461 |
+
A helper function for computing bounds.
|
462 |
+
|
463 |
+
This is equation (3.10).
|
464 |
+
It measures cost in terms of the number of required matrix products.
|
465 |
+
|
466 |
+
Parameters
|
467 |
+
----------
|
468 |
+
m : int
|
469 |
+
A valid key of _theta.
|
470 |
+
p : int
|
471 |
+
A matrix power.
|
472 |
+
norm_info : LazyOperatorNormInfo
|
473 |
+
Information about 1-norms of related operators.
|
474 |
+
|
475 |
+
Returns
|
476 |
+
-------
|
477 |
+
cost_div_m : int
|
478 |
+
Required number of matrix products divided by m.
|
479 |
+
|
480 |
+
"""
|
481 |
+
return int(np.ceil(norm_info.alpha(p) / _theta[m]))
|
482 |
+
|
483 |
+
|
484 |
+
def _compute_p_max(m_max):
|
485 |
+
"""
|
486 |
+
Compute the largest positive integer p such that p*(p-1) <= m_max + 1.
|
487 |
+
|
488 |
+
Do this in a slightly dumb way, but safe and not too slow.
|
489 |
+
|
490 |
+
Parameters
|
491 |
+
----------
|
492 |
+
m_max : int
|
493 |
+
A count related to bounds.
|
494 |
+
|
495 |
+
"""
|
496 |
+
sqrt_m_max = np.sqrt(m_max)
|
497 |
+
p_low = int(np.floor(sqrt_m_max))
|
498 |
+
p_high = int(np.ceil(sqrt_m_max + 1))
|
499 |
+
return max(p for p in range(p_low, p_high+1) if p*(p-1) <= m_max + 1)
|
500 |
+
|
501 |
+
|
502 |
+
def _fragment_3_1(norm_info, n0, tol, m_max=55, ell=2):
|
503 |
+
"""
|
504 |
+
A helper function for the _expm_multiply_* functions.
|
505 |
+
|
506 |
+
Parameters
|
507 |
+
----------
|
508 |
+
norm_info : LazyOperatorNormInfo
|
509 |
+
Information about norms of certain linear operators of interest.
|
510 |
+
n0 : int
|
511 |
+
Number of columns in the _expm_multiply_* B matrix.
|
512 |
+
tol : float
|
513 |
+
Expected to be
|
514 |
+
:math:`2^{-24}` for single precision or
|
515 |
+
:math:`2^{-53}` for double precision.
|
516 |
+
m_max : int
|
517 |
+
A value related to a bound.
|
518 |
+
ell : int
|
519 |
+
The number of columns used in the 1-norm approximation.
|
520 |
+
This is usually taken to be small, maybe between 1 and 5.
|
521 |
+
|
522 |
+
Returns
|
523 |
+
-------
|
524 |
+
best_m : int
|
525 |
+
Related to bounds for error control.
|
526 |
+
best_s : int
|
527 |
+
Amount of scaling.
|
528 |
+
|
529 |
+
Notes
|
530 |
+
-----
|
531 |
+
This is code fragment (3.1) in Al-Mohy and Higham (2011).
|
532 |
+
The discussion of default values for m_max and ell
|
533 |
+
is given between the definitions of equation (3.11)
|
534 |
+
and the definition of equation (3.12).
|
535 |
+
|
536 |
+
"""
|
537 |
+
if ell < 1:
|
538 |
+
raise ValueError('expected ell to be a positive integer')
|
539 |
+
best_m = None
|
540 |
+
best_s = None
|
541 |
+
if _condition_3_13(norm_info.onenorm(), n0, m_max, ell):
|
542 |
+
for m, theta in _theta.items():
|
543 |
+
s = int(np.ceil(norm_info.onenorm() / theta))
|
544 |
+
if best_m is None or m * s < best_m * best_s:
|
545 |
+
best_m = m
|
546 |
+
best_s = s
|
547 |
+
else:
|
548 |
+
# Equation (3.11).
|
549 |
+
for p in range(2, _compute_p_max(m_max) + 1):
|
550 |
+
for m in range(p*(p-1)-1, m_max+1):
|
551 |
+
if m in _theta:
|
552 |
+
s = _compute_cost_div_m(m, p, norm_info)
|
553 |
+
if best_m is None or m * s < best_m * best_s:
|
554 |
+
best_m = m
|
555 |
+
best_s = s
|
556 |
+
best_s = max(best_s, 1)
|
557 |
+
return best_m, best_s
|
558 |
+
|
559 |
+
|
560 |
+
def _condition_3_13(A_1_norm, n0, m_max, ell):
|
561 |
+
"""
|
562 |
+
A helper function for the _expm_multiply_* functions.
|
563 |
+
|
564 |
+
Parameters
|
565 |
+
----------
|
566 |
+
A_1_norm : float
|
567 |
+
The precomputed 1-norm of A.
|
568 |
+
n0 : int
|
569 |
+
Number of columns in the _expm_multiply_* B matrix.
|
570 |
+
m_max : int
|
571 |
+
A value related to a bound.
|
572 |
+
ell : int
|
573 |
+
The number of columns used in the 1-norm approximation.
|
574 |
+
This is usually taken to be small, maybe between 1 and 5.
|
575 |
+
|
576 |
+
Returns
|
577 |
+
-------
|
578 |
+
value : bool
|
579 |
+
Indicates whether or not the condition has been met.
|
580 |
+
|
581 |
+
Notes
|
582 |
+
-----
|
583 |
+
This is condition (3.13) in Al-Mohy and Higham (2011).
|
584 |
+
|
585 |
+
"""
|
586 |
+
|
587 |
+
# This is the rhs of equation (3.12).
|
588 |
+
p_max = _compute_p_max(m_max)
|
589 |
+
a = 2 * ell * p_max * (p_max + 3)
|
590 |
+
|
591 |
+
# Evaluate the condition (3.13).
|
592 |
+
b = _theta[m_max] / float(n0 * m_max)
|
593 |
+
return A_1_norm <= a * b
|
594 |
+
|
595 |
+
|
596 |
+
def _expm_multiply_interval(A, B, start=None, stop=None, num=None,
|
597 |
+
endpoint=None, traceA=None, balance=False,
|
598 |
+
status_only=False):
|
599 |
+
"""
|
600 |
+
Compute the action of the matrix exponential at multiple time points.
|
601 |
+
|
602 |
+
Parameters
|
603 |
+
----------
|
604 |
+
A : transposable linear operator
|
605 |
+
The operator whose exponential is of interest.
|
606 |
+
B : ndarray
|
607 |
+
The matrix to be multiplied by the matrix exponential of A.
|
608 |
+
start : scalar, optional
|
609 |
+
The starting time point of the sequence.
|
610 |
+
stop : scalar, optional
|
611 |
+
The end time point of the sequence, unless `endpoint` is set to False.
|
612 |
+
In that case, the sequence consists of all but the last of ``num + 1``
|
613 |
+
evenly spaced time points, so that `stop` is excluded.
|
614 |
+
Note that the step size changes when `endpoint` is False.
|
615 |
+
num : int, optional
|
616 |
+
Number of time points to use.
|
617 |
+
traceA : scalar, optional
|
618 |
+
Trace of `A`. If not given the trace is estimated for linear operators,
|
619 |
+
or calculated exactly for sparse matrices. It is used to precondition
|
620 |
+
`A`, thus an approximate trace is acceptable
|
621 |
+
endpoint : bool, optional
|
622 |
+
If True, `stop` is the last time point. Otherwise, it is not included.
|
623 |
+
balance : bool
|
624 |
+
Indicates whether or not to apply balancing.
|
625 |
+
status_only : bool
|
626 |
+
A flag that is set to True for some debugging and testing operations.
|
627 |
+
|
628 |
+
Returns
|
629 |
+
-------
|
630 |
+
F : ndarray
|
631 |
+
:math:`e^{t_k A} B`
|
632 |
+
status : int
|
633 |
+
An integer status for testing and debugging.
|
634 |
+
|
635 |
+
Notes
|
636 |
+
-----
|
637 |
+
This is algorithm (5.2) in Al-Mohy and Higham (2011).
|
638 |
+
|
639 |
+
There seems to be a typo, where line 15 of the algorithm should be
|
640 |
+
moved to line 6.5 (between lines 6 and 7).
|
641 |
+
|
642 |
+
"""
|
643 |
+
if balance:
|
644 |
+
raise NotImplementedError
|
645 |
+
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
|
646 |
+
raise ValueError('expected A to be like a square matrix')
|
647 |
+
if A.shape[1] != B.shape[0]:
|
648 |
+
raise ValueError('shapes of matrices A {} and B {} are incompatible'
|
649 |
+
.format(A.shape, B.shape))
|
650 |
+
ident = _ident_like(A)
|
651 |
+
is_linear_operator = isinstance(A, scipy.sparse.linalg.LinearOperator)
|
652 |
+
n = A.shape[0]
|
653 |
+
if len(B.shape) == 1:
|
654 |
+
n0 = 1
|
655 |
+
elif len(B.shape) == 2:
|
656 |
+
n0 = B.shape[1]
|
657 |
+
else:
|
658 |
+
raise ValueError('expected B to be like a matrix or a vector')
|
659 |
+
u_d = 2**-53
|
660 |
+
tol = u_d
|
661 |
+
if traceA is None:
|
662 |
+
if is_linear_operator:
|
663 |
+
warn("Trace of LinearOperator not available, it will be estimated."
|
664 |
+
" Provide `traceA` to ensure performance.", stacklevel=3)
|
665 |
+
# m3=5 is bit arbitrary choice, a more accurate trace (larger m3) might
|
666 |
+
# speed up exponential calculation, but trace estimation is also costly
|
667 |
+
# an educated guess would need to consider the number of time points
|
668 |
+
traceA = traceest(A, m3=5) if is_linear_operator else _trace(A)
|
669 |
+
mu = traceA / float(n)
|
670 |
+
|
671 |
+
# Get the linspace samples, attempting to preserve the linspace defaults.
|
672 |
+
linspace_kwargs = {'retstep': True}
|
673 |
+
if num is not None:
|
674 |
+
linspace_kwargs['num'] = num
|
675 |
+
if endpoint is not None:
|
676 |
+
linspace_kwargs['endpoint'] = endpoint
|
677 |
+
samples, step = np.linspace(start, stop, **linspace_kwargs)
|
678 |
+
|
679 |
+
# Convert the linspace output to the notation used by the publication.
|
680 |
+
nsamples = len(samples)
|
681 |
+
if nsamples < 2:
|
682 |
+
raise ValueError('at least two time points are required')
|
683 |
+
q = nsamples - 1
|
684 |
+
h = step
|
685 |
+
t_0 = samples[0]
|
686 |
+
t_q = samples[q]
|
687 |
+
|
688 |
+
# Define the output ndarray.
|
689 |
+
# Use an ndim=3 shape, such that the last two indices
|
690 |
+
# are the ones that may be involved in level 3 BLAS operations.
|
691 |
+
X_shape = (nsamples,) + B.shape
|
692 |
+
X = np.empty(X_shape, dtype=np.result_type(A.dtype, B.dtype, float))
|
693 |
+
t = t_q - t_0
|
694 |
+
A = A - mu * ident
|
695 |
+
A_1_norm = onenormest(A) if is_linear_operator else _exact_1_norm(A)
|
696 |
+
ell = 2
|
697 |
+
norm_info = LazyOperatorNormInfo(t*A, A_1_norm=t*A_1_norm, ell=ell)
|
698 |
+
if t*A_1_norm == 0:
|
699 |
+
m_star, s = 0, 1
|
700 |
+
else:
|
701 |
+
m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell)
|
702 |
+
|
703 |
+
# Compute the expm action up to the initial time point.
|
704 |
+
X[0] = _expm_multiply_simple_core(A, B, t_0, mu, m_star, s)
|
705 |
+
|
706 |
+
# Compute the expm action at the rest of the time points.
|
707 |
+
if q <= s:
|
708 |
+
if status_only:
|
709 |
+
return 0
|
710 |
+
else:
|
711 |
+
return _expm_multiply_interval_core_0(A, X,
|
712 |
+
h, mu, q, norm_info, tol, ell,n0)
|
713 |
+
elif not (q % s):
|
714 |
+
if status_only:
|
715 |
+
return 1
|
716 |
+
else:
|
717 |
+
return _expm_multiply_interval_core_1(A, X,
|
718 |
+
h, mu, m_star, s, q, tol)
|
719 |
+
elif (q % s):
|
720 |
+
if status_only:
|
721 |
+
return 2
|
722 |
+
else:
|
723 |
+
return _expm_multiply_interval_core_2(A, X,
|
724 |
+
h, mu, m_star, s, q, tol)
|
725 |
+
else:
|
726 |
+
raise Exception('internal error')
|
727 |
+
|
728 |
+
|
729 |
+
def _expm_multiply_interval_core_0(A, X, h, mu, q, norm_info, tol, ell, n0):
|
730 |
+
"""
|
731 |
+
A helper function, for the case q <= s.
|
732 |
+
"""
|
733 |
+
|
734 |
+
# Compute the new values of m_star and s which should be applied
|
735 |
+
# over intervals of size t/q
|
736 |
+
if norm_info.onenorm() == 0:
|
737 |
+
m_star, s = 0, 1
|
738 |
+
else:
|
739 |
+
norm_info.set_scale(1./q)
|
740 |
+
m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell)
|
741 |
+
norm_info.set_scale(1)
|
742 |
+
|
743 |
+
for k in range(q):
|
744 |
+
X[k+1] = _expm_multiply_simple_core(A, X[k], h, mu, m_star, s)
|
745 |
+
return X, 0
|
746 |
+
|
747 |
+
|
748 |
+
def _expm_multiply_interval_core_1(A, X, h, mu, m_star, s, q, tol):
|
749 |
+
"""
|
750 |
+
A helper function, for the case q > s and q % s == 0.
|
751 |
+
"""
|
752 |
+
d = q // s
|
753 |
+
input_shape = X.shape[1:]
|
754 |
+
K_shape = (m_star + 1, ) + input_shape
|
755 |
+
K = np.empty(K_shape, dtype=X.dtype)
|
756 |
+
for i in range(s):
|
757 |
+
Z = X[i*d]
|
758 |
+
K[0] = Z
|
759 |
+
high_p = 0
|
760 |
+
for k in range(1, d+1):
|
761 |
+
F = K[0]
|
762 |
+
c1 = _exact_inf_norm(F)
|
763 |
+
for p in range(1, m_star+1):
|
764 |
+
if p > high_p:
|
765 |
+
K[p] = h * A.dot(K[p-1]) / float(p)
|
766 |
+
coeff = float(pow(k, p))
|
767 |
+
F = F + coeff * K[p]
|
768 |
+
inf_norm_K_p_1 = _exact_inf_norm(K[p])
|
769 |
+
c2 = coeff * inf_norm_K_p_1
|
770 |
+
if c1 + c2 <= tol * _exact_inf_norm(F):
|
771 |
+
break
|
772 |
+
c1 = c2
|
773 |
+
X[k + i*d] = np.exp(k*h*mu) * F
|
774 |
+
return X, 1
|
775 |
+
|
776 |
+
|
777 |
+
def _expm_multiply_interval_core_2(A, X, h, mu, m_star, s, q, tol):
|
778 |
+
"""
|
779 |
+
A helper function, for the case q > s and q % s > 0.
|
780 |
+
"""
|
781 |
+
d = q // s
|
782 |
+
j = q // d
|
783 |
+
r = q - d * j
|
784 |
+
input_shape = X.shape[1:]
|
785 |
+
K_shape = (m_star + 1, ) + input_shape
|
786 |
+
K = np.empty(K_shape, dtype=X.dtype)
|
787 |
+
for i in range(j + 1):
|
788 |
+
Z = X[i*d]
|
789 |
+
K[0] = Z
|
790 |
+
high_p = 0
|
791 |
+
if i < j:
|
792 |
+
effective_d = d
|
793 |
+
else:
|
794 |
+
effective_d = r
|
795 |
+
for k in range(1, effective_d+1):
|
796 |
+
F = K[0]
|
797 |
+
c1 = _exact_inf_norm(F)
|
798 |
+
for p in range(1, m_star+1):
|
799 |
+
if p == high_p + 1:
|
800 |
+
K[p] = h * A.dot(K[p-1]) / float(p)
|
801 |
+
high_p = p
|
802 |
+
coeff = float(pow(k, p))
|
803 |
+
F = F + coeff * K[p]
|
804 |
+
inf_norm_K_p_1 = _exact_inf_norm(K[p])
|
805 |
+
c2 = coeff * inf_norm_K_p_1
|
806 |
+
if c1 + c2 <= tol * _exact_inf_norm(F):
|
807 |
+
break
|
808 |
+
c1 = c2
|
809 |
+
X[k + i*d] = np.exp(k*h*mu) * F
|
810 |
+
return X, 2
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_interface.py
ADDED
@@ -0,0 +1,896 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Abstract linear algebra library.
|
2 |
+
|
3 |
+
This module defines a class hierarchy that implements a kind of "lazy"
|
4 |
+
matrix representation, called the ``LinearOperator``. It can be used to do
|
5 |
+
linear algebra with extremely large sparse or structured matrices, without
|
6 |
+
representing those explicitly in memory. Such matrices can be added,
|
7 |
+
multiplied, transposed, etc.
|
8 |
+
|
9 |
+
As a motivating example, suppose you want have a matrix where almost all of
|
10 |
+
the elements have the value one. The standard sparse matrix representation
|
11 |
+
skips the storage of zeros, but not ones. By contrast, a LinearOperator is
|
12 |
+
able to represent such matrices efficiently. First, we need a compact way to
|
13 |
+
represent an all-ones matrix::
|
14 |
+
|
15 |
+
>>> import numpy as np
|
16 |
+
>>> from scipy.sparse.linalg._interface import LinearOperator
|
17 |
+
>>> class Ones(LinearOperator):
|
18 |
+
... def __init__(self, shape):
|
19 |
+
... super().__init__(dtype=None, shape=shape)
|
20 |
+
... def _matvec(self, x):
|
21 |
+
... return np.repeat(x.sum(), self.shape[0])
|
22 |
+
|
23 |
+
Instances of this class emulate ``np.ones(shape)``, but using a constant
|
24 |
+
amount of storage, independent of ``shape``. The ``_matvec`` method specifies
|
25 |
+
how this linear operator multiplies with (operates on) a vector. We can now
|
26 |
+
add this operator to a sparse matrix that stores only offsets from one::
|
27 |
+
|
28 |
+
>>> from scipy.sparse.linalg._interface import aslinearoperator
|
29 |
+
>>> from scipy.sparse import csr_matrix
|
30 |
+
>>> offsets = csr_matrix([[1, 0, 2], [0, -1, 0], [0, 0, 3]])
|
31 |
+
>>> A = aslinearoperator(offsets) + Ones(offsets.shape)
|
32 |
+
>>> A.dot([1, 2, 3])
|
33 |
+
array([13, 4, 15])
|
34 |
+
|
35 |
+
The result is the same as that given by its dense, explicitly-stored
|
36 |
+
counterpart::
|
37 |
+
|
38 |
+
>>> (np.ones(A.shape, A.dtype) + offsets.toarray()).dot([1, 2, 3])
|
39 |
+
array([13, 4, 15])
|
40 |
+
|
41 |
+
Several algorithms in the ``scipy.sparse`` library are able to operate on
|
42 |
+
``LinearOperator`` instances.
|
43 |
+
"""
|
44 |
+
|
45 |
+
import warnings
|
46 |
+
|
47 |
+
import numpy as np
|
48 |
+
|
49 |
+
from scipy.sparse import issparse
|
50 |
+
from scipy.sparse._sputils import isshape, isintlike, asmatrix, is_pydata_spmatrix
|
51 |
+
|
52 |
+
__all__ = ['LinearOperator', 'aslinearoperator']
|
53 |
+
|
54 |
+
|
55 |
+
class LinearOperator:
|
56 |
+
"""Common interface for performing matrix vector products
|
57 |
+
|
58 |
+
Many iterative methods (e.g. cg, gmres) do not need to know the
|
59 |
+
individual entries of a matrix to solve a linear system A*x=b.
|
60 |
+
Such solvers only require the computation of matrix vector
|
61 |
+
products, A*v where v is a dense vector. This class serves as
|
62 |
+
an abstract interface between iterative solvers and matrix-like
|
63 |
+
objects.
|
64 |
+
|
65 |
+
To construct a concrete LinearOperator, either pass appropriate
|
66 |
+
callables to the constructor of this class, or subclass it.
|
67 |
+
|
68 |
+
A subclass must implement either one of the methods ``_matvec``
|
69 |
+
and ``_matmat``, and the attributes/properties ``shape`` (pair of
|
70 |
+
integers) and ``dtype`` (may be None). It may call the ``__init__``
|
71 |
+
on this class to have these attributes validated. Implementing
|
72 |
+
``_matvec`` automatically implements ``_matmat`` (using a naive
|
73 |
+
algorithm) and vice-versa.
|
74 |
+
|
75 |
+
Optionally, a subclass may implement ``_rmatvec`` or ``_adjoint``
|
76 |
+
to implement the Hermitian adjoint (conjugate transpose). As with
|
77 |
+
``_matvec`` and ``_matmat``, implementing either ``_rmatvec`` or
|
78 |
+
``_adjoint`` implements the other automatically. Implementing
|
79 |
+
``_adjoint`` is preferable; ``_rmatvec`` is mostly there for
|
80 |
+
backwards compatibility.
|
81 |
+
|
82 |
+
Parameters
|
83 |
+
----------
|
84 |
+
shape : tuple
|
85 |
+
Matrix dimensions (M, N).
|
86 |
+
matvec : callable f(v)
|
87 |
+
Returns returns A * v.
|
88 |
+
rmatvec : callable f(v)
|
89 |
+
Returns A^H * v, where A^H is the conjugate transpose of A.
|
90 |
+
matmat : callable f(V)
|
91 |
+
Returns A * V, where V is a dense matrix with dimensions (N, K).
|
92 |
+
dtype : dtype
|
93 |
+
Data type of the matrix.
|
94 |
+
rmatmat : callable f(V)
|
95 |
+
Returns A^H * V, where V is a dense matrix with dimensions (M, K).
|
96 |
+
|
97 |
+
Attributes
|
98 |
+
----------
|
99 |
+
args : tuple
|
100 |
+
For linear operators describing products etc. of other linear
|
101 |
+
operators, the operands of the binary operation.
|
102 |
+
ndim : int
|
103 |
+
Number of dimensions (this is always 2)
|
104 |
+
|
105 |
+
See Also
|
106 |
+
--------
|
107 |
+
aslinearoperator : Construct LinearOperators
|
108 |
+
|
109 |
+
Notes
|
110 |
+
-----
|
111 |
+
The user-defined matvec() function must properly handle the case
|
112 |
+
where v has shape (N,) as well as the (N,1) case. The shape of
|
113 |
+
the return type is handled internally by LinearOperator.
|
114 |
+
|
115 |
+
LinearOperator instances can also be multiplied, added with each
|
116 |
+
other and exponentiated, all lazily: the result of these operations
|
117 |
+
is always a new, composite LinearOperator, that defers linear
|
118 |
+
operations to the original operators and combines the results.
|
119 |
+
|
120 |
+
More details regarding how to subclass a LinearOperator and several
|
121 |
+
examples of concrete LinearOperator instances can be found in the
|
122 |
+
external project `PyLops <https://pylops.readthedocs.io>`_.
|
123 |
+
|
124 |
+
|
125 |
+
Examples
|
126 |
+
--------
|
127 |
+
>>> import numpy as np
|
128 |
+
>>> from scipy.sparse.linalg import LinearOperator
|
129 |
+
>>> def mv(v):
|
130 |
+
... return np.array([2*v[0], 3*v[1]])
|
131 |
+
...
|
132 |
+
>>> A = LinearOperator((2,2), matvec=mv)
|
133 |
+
>>> A
|
134 |
+
<2x2 _CustomLinearOperator with dtype=float64>
|
135 |
+
>>> A.matvec(np.ones(2))
|
136 |
+
array([ 2., 3.])
|
137 |
+
>>> A * np.ones(2)
|
138 |
+
array([ 2., 3.])
|
139 |
+
|
140 |
+
"""
|
141 |
+
|
142 |
+
ndim = 2
|
143 |
+
# Necessary for right matmul with numpy arrays.
|
144 |
+
__array_ufunc__ = None
|
145 |
+
|
146 |
+
def __new__(cls, *args, **kwargs):
|
147 |
+
if cls is LinearOperator:
|
148 |
+
# Operate as _CustomLinearOperator factory.
|
149 |
+
return super().__new__(_CustomLinearOperator)
|
150 |
+
else:
|
151 |
+
obj = super().__new__(cls)
|
152 |
+
|
153 |
+
if (type(obj)._matvec == LinearOperator._matvec
|
154 |
+
and type(obj)._matmat == LinearOperator._matmat):
|
155 |
+
warnings.warn("LinearOperator subclass should implement"
|
156 |
+
" at least one of _matvec and _matmat.",
|
157 |
+
category=RuntimeWarning, stacklevel=2)
|
158 |
+
|
159 |
+
return obj
|
160 |
+
|
161 |
+
def __init__(self, dtype, shape):
|
162 |
+
"""Initialize this LinearOperator.
|
163 |
+
|
164 |
+
To be called by subclasses. ``dtype`` may be None; ``shape`` should
|
165 |
+
be convertible to a length-2 tuple.
|
166 |
+
"""
|
167 |
+
if dtype is not None:
|
168 |
+
dtype = np.dtype(dtype)
|
169 |
+
|
170 |
+
shape = tuple(shape)
|
171 |
+
if not isshape(shape):
|
172 |
+
raise ValueError(f"invalid shape {shape!r} (must be 2-d)")
|
173 |
+
|
174 |
+
self.dtype = dtype
|
175 |
+
self.shape = shape
|
176 |
+
|
177 |
+
def _init_dtype(self):
|
178 |
+
"""Called from subclasses at the end of the __init__ routine.
|
179 |
+
"""
|
180 |
+
if self.dtype is None:
|
181 |
+
v = np.zeros(self.shape[-1])
|
182 |
+
self.dtype = np.asarray(self.matvec(v)).dtype
|
183 |
+
|
184 |
+
def _matmat(self, X):
|
185 |
+
"""Default matrix-matrix multiplication handler.
|
186 |
+
|
187 |
+
Falls back on the user-defined _matvec method, so defining that will
|
188 |
+
define matrix multiplication (though in a very suboptimal way).
|
189 |
+
"""
|
190 |
+
|
191 |
+
return np.hstack([self.matvec(col.reshape(-1,1)) for col in X.T])
|
192 |
+
|
193 |
+
def _matvec(self, x):
|
194 |
+
"""Default matrix-vector multiplication handler.
|
195 |
+
|
196 |
+
If self is a linear operator of shape (M, N), then this method will
|
197 |
+
be called on a shape (N,) or (N, 1) ndarray, and should return a
|
198 |
+
shape (M,) or (M, 1) ndarray.
|
199 |
+
|
200 |
+
This default implementation falls back on _matmat, so defining that
|
201 |
+
will define matrix-vector multiplication as well.
|
202 |
+
"""
|
203 |
+
return self.matmat(x.reshape(-1, 1))
|
204 |
+
|
205 |
+
def matvec(self, x):
|
206 |
+
"""Matrix-vector multiplication.
|
207 |
+
|
208 |
+
Performs the operation y=A*x where A is an MxN linear
|
209 |
+
operator and x is a column vector or 1-d array.
|
210 |
+
|
211 |
+
Parameters
|
212 |
+
----------
|
213 |
+
x : {matrix, ndarray}
|
214 |
+
An array with shape (N,) or (N,1).
|
215 |
+
|
216 |
+
Returns
|
217 |
+
-------
|
218 |
+
y : {matrix, ndarray}
|
219 |
+
A matrix or ndarray with shape (M,) or (M,1) depending
|
220 |
+
on the type and shape of the x argument.
|
221 |
+
|
222 |
+
Notes
|
223 |
+
-----
|
224 |
+
This matvec wraps the user-specified matvec routine or overridden
|
225 |
+
_matvec method to ensure that y has the correct shape and type.
|
226 |
+
|
227 |
+
"""
|
228 |
+
|
229 |
+
x = np.asanyarray(x)
|
230 |
+
|
231 |
+
M,N = self.shape
|
232 |
+
|
233 |
+
if x.shape != (N,) and x.shape != (N,1):
|
234 |
+
raise ValueError('dimension mismatch')
|
235 |
+
|
236 |
+
y = self._matvec(x)
|
237 |
+
|
238 |
+
if isinstance(x, np.matrix):
|
239 |
+
y = asmatrix(y)
|
240 |
+
else:
|
241 |
+
y = np.asarray(y)
|
242 |
+
|
243 |
+
if x.ndim == 1:
|
244 |
+
y = y.reshape(M)
|
245 |
+
elif x.ndim == 2:
|
246 |
+
y = y.reshape(M,1)
|
247 |
+
else:
|
248 |
+
raise ValueError('invalid shape returned by user-defined matvec()')
|
249 |
+
|
250 |
+
return y
|
251 |
+
|
252 |
+
def rmatvec(self, x):
|
253 |
+
"""Adjoint matrix-vector multiplication.
|
254 |
+
|
255 |
+
Performs the operation y = A^H * x where A is an MxN linear
|
256 |
+
operator and x is a column vector or 1-d array.
|
257 |
+
|
258 |
+
Parameters
|
259 |
+
----------
|
260 |
+
x : {matrix, ndarray}
|
261 |
+
An array with shape (M,) or (M,1).
|
262 |
+
|
263 |
+
Returns
|
264 |
+
-------
|
265 |
+
y : {matrix, ndarray}
|
266 |
+
A matrix or ndarray with shape (N,) or (N,1) depending
|
267 |
+
on the type and shape of the x argument.
|
268 |
+
|
269 |
+
Notes
|
270 |
+
-----
|
271 |
+
This rmatvec wraps the user-specified rmatvec routine or overridden
|
272 |
+
_rmatvec method to ensure that y has the correct shape and type.
|
273 |
+
|
274 |
+
"""
|
275 |
+
|
276 |
+
x = np.asanyarray(x)
|
277 |
+
|
278 |
+
M,N = self.shape
|
279 |
+
|
280 |
+
if x.shape != (M,) and x.shape != (M,1):
|
281 |
+
raise ValueError('dimension mismatch')
|
282 |
+
|
283 |
+
y = self._rmatvec(x)
|
284 |
+
|
285 |
+
if isinstance(x, np.matrix):
|
286 |
+
y = asmatrix(y)
|
287 |
+
else:
|
288 |
+
y = np.asarray(y)
|
289 |
+
|
290 |
+
if x.ndim == 1:
|
291 |
+
y = y.reshape(N)
|
292 |
+
elif x.ndim == 2:
|
293 |
+
y = y.reshape(N,1)
|
294 |
+
else:
|
295 |
+
raise ValueError('invalid shape returned by user-defined rmatvec()')
|
296 |
+
|
297 |
+
return y
|
298 |
+
|
299 |
+
def _rmatvec(self, x):
|
300 |
+
"""Default implementation of _rmatvec; defers to adjoint."""
|
301 |
+
if type(self)._adjoint == LinearOperator._adjoint:
|
302 |
+
# _adjoint not overridden, prevent infinite recursion
|
303 |
+
raise NotImplementedError
|
304 |
+
else:
|
305 |
+
return self.H.matvec(x)
|
306 |
+
|
307 |
+
def matmat(self, X):
|
308 |
+
"""Matrix-matrix multiplication.
|
309 |
+
|
310 |
+
Performs the operation y=A*X where A is an MxN linear
|
311 |
+
operator and X dense N*K matrix or ndarray.
|
312 |
+
|
313 |
+
Parameters
|
314 |
+
----------
|
315 |
+
X : {matrix, ndarray}
|
316 |
+
An array with shape (N,K).
|
317 |
+
|
318 |
+
Returns
|
319 |
+
-------
|
320 |
+
Y : {matrix, ndarray}
|
321 |
+
A matrix or ndarray with shape (M,K) depending on
|
322 |
+
the type of the X argument.
|
323 |
+
|
324 |
+
Notes
|
325 |
+
-----
|
326 |
+
This matmat wraps any user-specified matmat routine or overridden
|
327 |
+
_matmat method to ensure that y has the correct type.
|
328 |
+
|
329 |
+
"""
|
330 |
+
if not (issparse(X) or is_pydata_spmatrix(X)):
|
331 |
+
X = np.asanyarray(X)
|
332 |
+
|
333 |
+
if X.ndim != 2:
|
334 |
+
raise ValueError(f'expected 2-d ndarray or matrix, not {X.ndim}-d')
|
335 |
+
|
336 |
+
if X.shape[0] != self.shape[1]:
|
337 |
+
raise ValueError(f'dimension mismatch: {self.shape}, {X.shape}')
|
338 |
+
|
339 |
+
try:
|
340 |
+
Y = self._matmat(X)
|
341 |
+
except Exception as e:
|
342 |
+
if issparse(X) or is_pydata_spmatrix(X):
|
343 |
+
raise TypeError(
|
344 |
+
"Unable to multiply a LinearOperator with a sparse matrix."
|
345 |
+
" Wrap the matrix in aslinearoperator first."
|
346 |
+
) from e
|
347 |
+
raise
|
348 |
+
|
349 |
+
if isinstance(Y, np.matrix):
|
350 |
+
Y = asmatrix(Y)
|
351 |
+
|
352 |
+
return Y
|
353 |
+
|
354 |
+
def rmatmat(self, X):
|
355 |
+
"""Adjoint matrix-matrix multiplication.
|
356 |
+
|
357 |
+
Performs the operation y = A^H * x where A is an MxN linear
|
358 |
+
operator and x is a column vector or 1-d array, or 2-d array.
|
359 |
+
The default implementation defers to the adjoint.
|
360 |
+
|
361 |
+
Parameters
|
362 |
+
----------
|
363 |
+
X : {matrix, ndarray}
|
364 |
+
A matrix or 2D array.
|
365 |
+
|
366 |
+
Returns
|
367 |
+
-------
|
368 |
+
Y : {matrix, ndarray}
|
369 |
+
A matrix or 2D array depending on the type of the input.
|
370 |
+
|
371 |
+
Notes
|
372 |
+
-----
|
373 |
+
This rmatmat wraps the user-specified rmatmat routine.
|
374 |
+
|
375 |
+
"""
|
376 |
+
if not (issparse(X) or is_pydata_spmatrix(X)):
|
377 |
+
X = np.asanyarray(X)
|
378 |
+
|
379 |
+
if X.ndim != 2:
|
380 |
+
raise ValueError('expected 2-d ndarray or matrix, not %d-d'
|
381 |
+
% X.ndim)
|
382 |
+
|
383 |
+
if X.shape[0] != self.shape[0]:
|
384 |
+
raise ValueError(f'dimension mismatch: {self.shape}, {X.shape}')
|
385 |
+
|
386 |
+
try:
|
387 |
+
Y = self._rmatmat(X)
|
388 |
+
except Exception as e:
|
389 |
+
if issparse(X) or is_pydata_spmatrix(X):
|
390 |
+
raise TypeError(
|
391 |
+
"Unable to multiply a LinearOperator with a sparse matrix."
|
392 |
+
" Wrap the matrix in aslinearoperator() first."
|
393 |
+
) from e
|
394 |
+
raise
|
395 |
+
|
396 |
+
if isinstance(Y, np.matrix):
|
397 |
+
Y = asmatrix(Y)
|
398 |
+
return Y
|
399 |
+
|
400 |
+
def _rmatmat(self, X):
|
401 |
+
"""Default implementation of _rmatmat defers to rmatvec or adjoint."""
|
402 |
+
if type(self)._adjoint == LinearOperator._adjoint:
|
403 |
+
return np.hstack([self.rmatvec(col.reshape(-1, 1)) for col in X.T])
|
404 |
+
else:
|
405 |
+
return self.H.matmat(X)
|
406 |
+
|
407 |
+
def __call__(self, x):
|
408 |
+
return self*x
|
409 |
+
|
410 |
+
def __mul__(self, x):
|
411 |
+
return self.dot(x)
|
412 |
+
|
413 |
+
def __truediv__(self, other):
|
414 |
+
if not np.isscalar(other):
|
415 |
+
raise ValueError("Can only divide a linear operator by a scalar.")
|
416 |
+
|
417 |
+
return _ScaledLinearOperator(self, 1.0/other)
|
418 |
+
|
419 |
+
def dot(self, x):
|
420 |
+
"""Matrix-matrix or matrix-vector multiplication.
|
421 |
+
|
422 |
+
Parameters
|
423 |
+
----------
|
424 |
+
x : array_like
|
425 |
+
1-d or 2-d array, representing a vector or matrix.
|
426 |
+
|
427 |
+
Returns
|
428 |
+
-------
|
429 |
+
Ax : array
|
430 |
+
1-d or 2-d array (depending on the shape of x) that represents
|
431 |
+
the result of applying this linear operator on x.
|
432 |
+
|
433 |
+
"""
|
434 |
+
if isinstance(x, LinearOperator):
|
435 |
+
return _ProductLinearOperator(self, x)
|
436 |
+
elif np.isscalar(x):
|
437 |
+
return _ScaledLinearOperator(self, x)
|
438 |
+
else:
|
439 |
+
if not issparse(x) and not is_pydata_spmatrix(x):
|
440 |
+
# Sparse matrices shouldn't be converted to numpy arrays.
|
441 |
+
x = np.asarray(x)
|
442 |
+
|
443 |
+
if x.ndim == 1 or x.ndim == 2 and x.shape[1] == 1:
|
444 |
+
return self.matvec(x)
|
445 |
+
elif x.ndim == 2:
|
446 |
+
return self.matmat(x)
|
447 |
+
else:
|
448 |
+
raise ValueError('expected 1-d or 2-d array or matrix, got %r'
|
449 |
+
% x)
|
450 |
+
|
451 |
+
def __matmul__(self, other):
|
452 |
+
if np.isscalar(other):
|
453 |
+
raise ValueError("Scalar operands are not allowed, "
|
454 |
+
"use '*' instead")
|
455 |
+
return self.__mul__(other)
|
456 |
+
|
457 |
+
def __rmatmul__(self, other):
|
458 |
+
if np.isscalar(other):
|
459 |
+
raise ValueError("Scalar operands are not allowed, "
|
460 |
+
"use '*' instead")
|
461 |
+
return self.__rmul__(other)
|
462 |
+
|
463 |
+
def __rmul__(self, x):
|
464 |
+
if np.isscalar(x):
|
465 |
+
return _ScaledLinearOperator(self, x)
|
466 |
+
else:
|
467 |
+
return self._rdot(x)
|
468 |
+
|
469 |
+
def _rdot(self, x):
|
470 |
+
"""Matrix-matrix or matrix-vector multiplication from the right.
|
471 |
+
|
472 |
+
Parameters
|
473 |
+
----------
|
474 |
+
x : array_like
|
475 |
+
1-d or 2-d array, representing a vector or matrix.
|
476 |
+
|
477 |
+
Returns
|
478 |
+
-------
|
479 |
+
xA : array
|
480 |
+
1-d or 2-d array (depending on the shape of x) that represents
|
481 |
+
the result of applying this linear operator on x from the right.
|
482 |
+
|
483 |
+
Notes
|
484 |
+
-----
|
485 |
+
This is copied from dot to implement right multiplication.
|
486 |
+
"""
|
487 |
+
if isinstance(x, LinearOperator):
|
488 |
+
return _ProductLinearOperator(x, self)
|
489 |
+
elif np.isscalar(x):
|
490 |
+
return _ScaledLinearOperator(self, x)
|
491 |
+
else:
|
492 |
+
if not issparse(x) and not is_pydata_spmatrix(x):
|
493 |
+
# Sparse matrices shouldn't be converted to numpy arrays.
|
494 |
+
x = np.asarray(x)
|
495 |
+
|
496 |
+
# We use transpose instead of rmatvec/rmatmat to avoid
|
497 |
+
# unnecessary complex conjugation if possible.
|
498 |
+
if x.ndim == 1 or x.ndim == 2 and x.shape[0] == 1:
|
499 |
+
return self.T.matvec(x.T).T
|
500 |
+
elif x.ndim == 2:
|
501 |
+
return self.T.matmat(x.T).T
|
502 |
+
else:
|
503 |
+
raise ValueError('expected 1-d or 2-d array or matrix, got %r'
|
504 |
+
% x)
|
505 |
+
|
506 |
+
def __pow__(self, p):
|
507 |
+
if np.isscalar(p):
|
508 |
+
return _PowerLinearOperator(self, p)
|
509 |
+
else:
|
510 |
+
return NotImplemented
|
511 |
+
|
512 |
+
def __add__(self, x):
|
513 |
+
if isinstance(x, LinearOperator):
|
514 |
+
return _SumLinearOperator(self, x)
|
515 |
+
else:
|
516 |
+
return NotImplemented
|
517 |
+
|
518 |
+
def __neg__(self):
|
519 |
+
return _ScaledLinearOperator(self, -1)
|
520 |
+
|
521 |
+
def __sub__(self, x):
|
522 |
+
return self.__add__(-x)
|
523 |
+
|
524 |
+
def __repr__(self):
|
525 |
+
M,N = self.shape
|
526 |
+
if self.dtype is None:
|
527 |
+
dt = 'unspecified dtype'
|
528 |
+
else:
|
529 |
+
dt = 'dtype=' + str(self.dtype)
|
530 |
+
|
531 |
+
return '<%dx%d %s with %s>' % (M, N, self.__class__.__name__, dt)
|
532 |
+
|
533 |
+
def adjoint(self):
|
534 |
+
"""Hermitian adjoint.
|
535 |
+
|
536 |
+
Returns the Hermitian adjoint of self, aka the Hermitian
|
537 |
+
conjugate or Hermitian transpose. For a complex matrix, the
|
538 |
+
Hermitian adjoint is equal to the conjugate transpose.
|
539 |
+
|
540 |
+
Can be abbreviated self.H instead of self.adjoint().
|
541 |
+
|
542 |
+
Returns
|
543 |
+
-------
|
544 |
+
A_H : LinearOperator
|
545 |
+
Hermitian adjoint of self.
|
546 |
+
"""
|
547 |
+
return self._adjoint()
|
548 |
+
|
549 |
+
H = property(adjoint)
|
550 |
+
|
551 |
+
def transpose(self):
|
552 |
+
"""Transpose this linear operator.
|
553 |
+
|
554 |
+
Returns a LinearOperator that represents the transpose of this one.
|
555 |
+
Can be abbreviated self.T instead of self.transpose().
|
556 |
+
"""
|
557 |
+
return self._transpose()
|
558 |
+
|
559 |
+
T = property(transpose)
|
560 |
+
|
561 |
+
def _adjoint(self):
|
562 |
+
"""Default implementation of _adjoint; defers to rmatvec."""
|
563 |
+
return _AdjointLinearOperator(self)
|
564 |
+
|
565 |
+
def _transpose(self):
|
566 |
+
""" Default implementation of _transpose; defers to rmatvec + conj"""
|
567 |
+
return _TransposedLinearOperator(self)
|
568 |
+
|
569 |
+
|
570 |
+
class _CustomLinearOperator(LinearOperator):
|
571 |
+
"""Linear operator defined in terms of user-specified operations."""
|
572 |
+
|
573 |
+
def __init__(self, shape, matvec, rmatvec=None, matmat=None,
|
574 |
+
dtype=None, rmatmat=None):
|
575 |
+
super().__init__(dtype, shape)
|
576 |
+
|
577 |
+
self.args = ()
|
578 |
+
|
579 |
+
self.__matvec_impl = matvec
|
580 |
+
self.__rmatvec_impl = rmatvec
|
581 |
+
self.__rmatmat_impl = rmatmat
|
582 |
+
self.__matmat_impl = matmat
|
583 |
+
|
584 |
+
self._init_dtype()
|
585 |
+
|
586 |
+
def _matmat(self, X):
|
587 |
+
if self.__matmat_impl is not None:
|
588 |
+
return self.__matmat_impl(X)
|
589 |
+
else:
|
590 |
+
return super()._matmat(X)
|
591 |
+
|
592 |
+
def _matvec(self, x):
|
593 |
+
return self.__matvec_impl(x)
|
594 |
+
|
595 |
+
def _rmatvec(self, x):
|
596 |
+
func = self.__rmatvec_impl
|
597 |
+
if func is None:
|
598 |
+
raise NotImplementedError("rmatvec is not defined")
|
599 |
+
return self.__rmatvec_impl(x)
|
600 |
+
|
601 |
+
def _rmatmat(self, X):
|
602 |
+
if self.__rmatmat_impl is not None:
|
603 |
+
return self.__rmatmat_impl(X)
|
604 |
+
else:
|
605 |
+
return super()._rmatmat(X)
|
606 |
+
|
607 |
+
def _adjoint(self):
|
608 |
+
return _CustomLinearOperator(shape=(self.shape[1], self.shape[0]),
|
609 |
+
matvec=self.__rmatvec_impl,
|
610 |
+
rmatvec=self.__matvec_impl,
|
611 |
+
matmat=self.__rmatmat_impl,
|
612 |
+
rmatmat=self.__matmat_impl,
|
613 |
+
dtype=self.dtype)
|
614 |
+
|
615 |
+
|
616 |
+
class _AdjointLinearOperator(LinearOperator):
|
617 |
+
"""Adjoint of arbitrary Linear Operator"""
|
618 |
+
|
619 |
+
def __init__(self, A):
|
620 |
+
shape = (A.shape[1], A.shape[0])
|
621 |
+
super().__init__(dtype=A.dtype, shape=shape)
|
622 |
+
self.A = A
|
623 |
+
self.args = (A,)
|
624 |
+
|
625 |
+
def _matvec(self, x):
|
626 |
+
return self.A._rmatvec(x)
|
627 |
+
|
628 |
+
def _rmatvec(self, x):
|
629 |
+
return self.A._matvec(x)
|
630 |
+
|
631 |
+
def _matmat(self, x):
|
632 |
+
return self.A._rmatmat(x)
|
633 |
+
|
634 |
+
def _rmatmat(self, x):
|
635 |
+
return self.A._matmat(x)
|
636 |
+
|
637 |
+
class _TransposedLinearOperator(LinearOperator):
|
638 |
+
"""Transposition of arbitrary Linear Operator"""
|
639 |
+
|
640 |
+
def __init__(self, A):
|
641 |
+
shape = (A.shape[1], A.shape[0])
|
642 |
+
super().__init__(dtype=A.dtype, shape=shape)
|
643 |
+
self.A = A
|
644 |
+
self.args = (A,)
|
645 |
+
|
646 |
+
def _matvec(self, x):
|
647 |
+
# NB. np.conj works also on sparse matrices
|
648 |
+
return np.conj(self.A._rmatvec(np.conj(x)))
|
649 |
+
|
650 |
+
def _rmatvec(self, x):
|
651 |
+
return np.conj(self.A._matvec(np.conj(x)))
|
652 |
+
|
653 |
+
def _matmat(self, x):
|
654 |
+
# NB. np.conj works also on sparse matrices
|
655 |
+
return np.conj(self.A._rmatmat(np.conj(x)))
|
656 |
+
|
657 |
+
def _rmatmat(self, x):
|
658 |
+
return np.conj(self.A._matmat(np.conj(x)))
|
659 |
+
|
660 |
+
def _get_dtype(operators, dtypes=None):
|
661 |
+
if dtypes is None:
|
662 |
+
dtypes = []
|
663 |
+
for obj in operators:
|
664 |
+
if obj is not None and hasattr(obj, 'dtype'):
|
665 |
+
dtypes.append(obj.dtype)
|
666 |
+
return np.result_type(*dtypes)
|
667 |
+
|
668 |
+
|
669 |
+
class _SumLinearOperator(LinearOperator):
|
670 |
+
def __init__(self, A, B):
|
671 |
+
if not isinstance(A, LinearOperator) or \
|
672 |
+
not isinstance(B, LinearOperator):
|
673 |
+
raise ValueError('both operands have to be a LinearOperator')
|
674 |
+
if A.shape != B.shape:
|
675 |
+
raise ValueError(f'cannot add {A} and {B}: shape mismatch')
|
676 |
+
self.args = (A, B)
|
677 |
+
super().__init__(_get_dtype([A, B]), A.shape)
|
678 |
+
|
679 |
+
def _matvec(self, x):
|
680 |
+
return self.args[0].matvec(x) + self.args[1].matvec(x)
|
681 |
+
|
682 |
+
def _rmatvec(self, x):
|
683 |
+
return self.args[0].rmatvec(x) + self.args[1].rmatvec(x)
|
684 |
+
|
685 |
+
def _rmatmat(self, x):
|
686 |
+
return self.args[0].rmatmat(x) + self.args[1].rmatmat(x)
|
687 |
+
|
688 |
+
def _matmat(self, x):
|
689 |
+
return self.args[0].matmat(x) + self.args[1].matmat(x)
|
690 |
+
|
691 |
+
def _adjoint(self):
|
692 |
+
A, B = self.args
|
693 |
+
return A.H + B.H
|
694 |
+
|
695 |
+
|
696 |
+
class _ProductLinearOperator(LinearOperator):
|
697 |
+
def __init__(self, A, B):
|
698 |
+
if not isinstance(A, LinearOperator) or \
|
699 |
+
not isinstance(B, LinearOperator):
|
700 |
+
raise ValueError('both operands have to be a LinearOperator')
|
701 |
+
if A.shape[1] != B.shape[0]:
|
702 |
+
raise ValueError(f'cannot multiply {A} and {B}: shape mismatch')
|
703 |
+
super().__init__(_get_dtype([A, B]),
|
704 |
+
(A.shape[0], B.shape[1]))
|
705 |
+
self.args = (A, B)
|
706 |
+
|
707 |
+
def _matvec(self, x):
|
708 |
+
return self.args[0].matvec(self.args[1].matvec(x))
|
709 |
+
|
710 |
+
def _rmatvec(self, x):
|
711 |
+
return self.args[1].rmatvec(self.args[0].rmatvec(x))
|
712 |
+
|
713 |
+
def _rmatmat(self, x):
|
714 |
+
return self.args[1].rmatmat(self.args[0].rmatmat(x))
|
715 |
+
|
716 |
+
def _matmat(self, x):
|
717 |
+
return self.args[0].matmat(self.args[1].matmat(x))
|
718 |
+
|
719 |
+
def _adjoint(self):
|
720 |
+
A, B = self.args
|
721 |
+
return B.H * A.H
|
722 |
+
|
723 |
+
|
724 |
+
class _ScaledLinearOperator(LinearOperator):
|
725 |
+
def __init__(self, A, alpha):
|
726 |
+
if not isinstance(A, LinearOperator):
|
727 |
+
raise ValueError('LinearOperator expected as A')
|
728 |
+
if not np.isscalar(alpha):
|
729 |
+
raise ValueError('scalar expected as alpha')
|
730 |
+
if isinstance(A, _ScaledLinearOperator):
|
731 |
+
A, alpha_original = A.args
|
732 |
+
# Avoid in-place multiplication so that we don't accidentally mutate
|
733 |
+
# the original prefactor.
|
734 |
+
alpha = alpha * alpha_original
|
735 |
+
|
736 |
+
dtype = _get_dtype([A], [type(alpha)])
|
737 |
+
super().__init__(dtype, A.shape)
|
738 |
+
self.args = (A, alpha)
|
739 |
+
|
740 |
+
def _matvec(self, x):
|
741 |
+
return self.args[1] * self.args[0].matvec(x)
|
742 |
+
|
743 |
+
def _rmatvec(self, x):
|
744 |
+
return np.conj(self.args[1]) * self.args[0].rmatvec(x)
|
745 |
+
|
746 |
+
def _rmatmat(self, x):
|
747 |
+
return np.conj(self.args[1]) * self.args[0].rmatmat(x)
|
748 |
+
|
749 |
+
def _matmat(self, x):
|
750 |
+
return self.args[1] * self.args[0].matmat(x)
|
751 |
+
|
752 |
+
def _adjoint(self):
|
753 |
+
A, alpha = self.args
|
754 |
+
return A.H * np.conj(alpha)
|
755 |
+
|
756 |
+
|
757 |
+
class _PowerLinearOperator(LinearOperator):
|
758 |
+
def __init__(self, A, p):
|
759 |
+
if not isinstance(A, LinearOperator):
|
760 |
+
raise ValueError('LinearOperator expected as A')
|
761 |
+
if A.shape[0] != A.shape[1]:
|
762 |
+
raise ValueError('square LinearOperator expected, got %r' % A)
|
763 |
+
if not isintlike(p) or p < 0:
|
764 |
+
raise ValueError('non-negative integer expected as p')
|
765 |
+
|
766 |
+
super().__init__(_get_dtype([A]), A.shape)
|
767 |
+
self.args = (A, p)
|
768 |
+
|
769 |
+
def _power(self, fun, x):
|
770 |
+
res = np.array(x, copy=True)
|
771 |
+
for i in range(self.args[1]):
|
772 |
+
res = fun(res)
|
773 |
+
return res
|
774 |
+
|
775 |
+
def _matvec(self, x):
|
776 |
+
return self._power(self.args[0].matvec, x)
|
777 |
+
|
778 |
+
def _rmatvec(self, x):
|
779 |
+
return self._power(self.args[0].rmatvec, x)
|
780 |
+
|
781 |
+
def _rmatmat(self, x):
|
782 |
+
return self._power(self.args[0].rmatmat, x)
|
783 |
+
|
784 |
+
def _matmat(self, x):
|
785 |
+
return self._power(self.args[0].matmat, x)
|
786 |
+
|
787 |
+
def _adjoint(self):
|
788 |
+
A, p = self.args
|
789 |
+
return A.H ** p
|
790 |
+
|
791 |
+
|
792 |
+
class MatrixLinearOperator(LinearOperator):
|
793 |
+
def __init__(self, A):
|
794 |
+
super().__init__(A.dtype, A.shape)
|
795 |
+
self.A = A
|
796 |
+
self.__adj = None
|
797 |
+
self.args = (A,)
|
798 |
+
|
799 |
+
def _matmat(self, X):
|
800 |
+
return self.A.dot(X)
|
801 |
+
|
802 |
+
def _adjoint(self):
|
803 |
+
if self.__adj is None:
|
804 |
+
self.__adj = _AdjointMatrixOperator(self)
|
805 |
+
return self.__adj
|
806 |
+
|
807 |
+
class _AdjointMatrixOperator(MatrixLinearOperator):
|
808 |
+
def __init__(self, adjoint):
|
809 |
+
self.A = adjoint.A.T.conj()
|
810 |
+
self.__adjoint = adjoint
|
811 |
+
self.args = (adjoint,)
|
812 |
+
self.shape = adjoint.shape[1], adjoint.shape[0]
|
813 |
+
|
814 |
+
@property
|
815 |
+
def dtype(self):
|
816 |
+
return self.__adjoint.dtype
|
817 |
+
|
818 |
+
def _adjoint(self):
|
819 |
+
return self.__adjoint
|
820 |
+
|
821 |
+
|
822 |
+
class IdentityOperator(LinearOperator):
|
823 |
+
def __init__(self, shape, dtype=None):
|
824 |
+
super().__init__(dtype, shape)
|
825 |
+
|
826 |
+
def _matvec(self, x):
|
827 |
+
return x
|
828 |
+
|
829 |
+
def _rmatvec(self, x):
|
830 |
+
return x
|
831 |
+
|
832 |
+
def _rmatmat(self, x):
|
833 |
+
return x
|
834 |
+
|
835 |
+
def _matmat(self, x):
|
836 |
+
return x
|
837 |
+
|
838 |
+
def _adjoint(self):
|
839 |
+
return self
|
840 |
+
|
841 |
+
|
842 |
+
def aslinearoperator(A):
|
843 |
+
"""Return A as a LinearOperator.
|
844 |
+
|
845 |
+
'A' may be any of the following types:
|
846 |
+
- ndarray
|
847 |
+
- matrix
|
848 |
+
- sparse matrix (e.g. csr_matrix, lil_matrix, etc.)
|
849 |
+
- LinearOperator
|
850 |
+
- An object with .shape and .matvec attributes
|
851 |
+
|
852 |
+
See the LinearOperator documentation for additional information.
|
853 |
+
|
854 |
+
Notes
|
855 |
+
-----
|
856 |
+
If 'A' has no .dtype attribute, the data type is determined by calling
|
857 |
+
:func:`LinearOperator.matvec()` - set the .dtype attribute to prevent this
|
858 |
+
call upon the linear operator creation.
|
859 |
+
|
860 |
+
Examples
|
861 |
+
--------
|
862 |
+
>>> import numpy as np
|
863 |
+
>>> from scipy.sparse.linalg import aslinearoperator
|
864 |
+
>>> M = np.array([[1,2,3],[4,5,6]], dtype=np.int32)
|
865 |
+
>>> aslinearoperator(M)
|
866 |
+
<2x3 MatrixLinearOperator with dtype=int32>
|
867 |
+
"""
|
868 |
+
if isinstance(A, LinearOperator):
|
869 |
+
return A
|
870 |
+
|
871 |
+
elif isinstance(A, np.ndarray) or isinstance(A, np.matrix):
|
872 |
+
if A.ndim > 2:
|
873 |
+
raise ValueError('array must have ndim <= 2')
|
874 |
+
A = np.atleast_2d(np.asarray(A))
|
875 |
+
return MatrixLinearOperator(A)
|
876 |
+
|
877 |
+
elif issparse(A) or is_pydata_spmatrix(A):
|
878 |
+
return MatrixLinearOperator(A)
|
879 |
+
|
880 |
+
else:
|
881 |
+
if hasattr(A, 'shape') and hasattr(A, 'matvec'):
|
882 |
+
rmatvec = None
|
883 |
+
rmatmat = None
|
884 |
+
dtype = None
|
885 |
+
|
886 |
+
if hasattr(A, 'rmatvec'):
|
887 |
+
rmatvec = A.rmatvec
|
888 |
+
if hasattr(A, 'rmatmat'):
|
889 |
+
rmatmat = A.rmatmat
|
890 |
+
if hasattr(A, 'dtype'):
|
891 |
+
dtype = A.dtype
|
892 |
+
return LinearOperator(A.shape, A.matvec, rmatvec=rmatvec,
|
893 |
+
rmatmat=rmatmat, dtype=dtype)
|
894 |
+
|
895 |
+
else:
|
896 |
+
raise TypeError('type not understood')
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (647 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/_gcrotmk.cpython-310.pyc
ADDED
Binary file (11.4 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/iterative.cpython-310.pyc
ADDED
Binary file (27.6 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/lgmres.cpython-310.pyc
ADDED
Binary file (7.41 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/lsmr.cpython-310.pyc
ADDED
Binary file (11.6 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/lsqr.cpython-310.pyc
ADDED
Binary file (17 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/minres.cpython-310.pyc
ADDED
Binary file (8.08 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/tfqmr.cpython-310.pyc
ADDED
Binary file (5.74 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/utils.cpython-310.pyc
ADDED
Binary file (3.3 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/lgmres.py
ADDED
@@ -0,0 +1,242 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (C) 2009, Pauli Virtanen <[email protected]>
|
2 |
+
# Distributed under the same license as SciPy.
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
from numpy.linalg import LinAlgError
|
6 |
+
from scipy.linalg import get_blas_funcs
|
7 |
+
from .iterative import _get_atol_rtol
|
8 |
+
from .utils import make_system
|
9 |
+
from scipy._lib.deprecation import _NoValue, _deprecate_positional_args
|
10 |
+
|
11 |
+
from ._gcrotmk import _fgmres
|
12 |
+
|
13 |
+
__all__ = ['lgmres']
|
14 |
+
|
15 |
+
|
16 |
+
@_deprecate_positional_args(version="1.14.0")
|
17 |
+
def lgmres(A, b, x0=None, *, tol=_NoValue, maxiter=1000, M=None, callback=None,
|
18 |
+
inner_m=30, outer_k=3, outer_v=None, store_outer_Av=True,
|
19 |
+
prepend_outer_v=False, atol=None, rtol=1e-5):
|
20 |
+
"""
|
21 |
+
Solve a matrix equation using the LGMRES algorithm.
|
22 |
+
|
23 |
+
The LGMRES algorithm [1]_ [2]_ is designed to avoid some problems
|
24 |
+
in the convergence in restarted GMRES, and often converges in fewer
|
25 |
+
iterations.
|
26 |
+
|
27 |
+
Parameters
|
28 |
+
----------
|
29 |
+
A : {sparse matrix, ndarray, LinearOperator}
|
30 |
+
The real or complex N-by-N matrix of the linear system.
|
31 |
+
Alternatively, ``A`` can be a linear operator which can
|
32 |
+
produce ``Ax`` using, e.g.,
|
33 |
+
``scipy.sparse.linalg.LinearOperator``.
|
34 |
+
b : ndarray
|
35 |
+
Right hand side of the linear system. Has shape (N,) or (N,1).
|
36 |
+
x0 : ndarray
|
37 |
+
Starting guess for the solution.
|
38 |
+
rtol, atol : float, optional
|
39 |
+
Parameters for the convergence test. For convergence,
|
40 |
+
``norm(b - A @ x) <= max(rtol*norm(b), atol)`` should be satisfied.
|
41 |
+
The default is ``rtol=1e-5``, the default for ``atol`` is ``rtol``.
|
42 |
+
|
43 |
+
.. warning::
|
44 |
+
|
45 |
+
The default value for ``atol`` will be changed to ``0.0`` in
|
46 |
+
SciPy 1.14.0.
|
47 |
+
maxiter : int, optional
|
48 |
+
Maximum number of iterations. Iteration will stop after maxiter
|
49 |
+
steps even if the specified tolerance has not been achieved.
|
50 |
+
M : {sparse matrix, ndarray, LinearOperator}, optional
|
51 |
+
Preconditioner for A. The preconditioner should approximate the
|
52 |
+
inverse of A. Effective preconditioning dramatically improves the
|
53 |
+
rate of convergence, which implies that fewer iterations are needed
|
54 |
+
to reach a given error tolerance.
|
55 |
+
callback : function, optional
|
56 |
+
User-supplied function to call after each iteration. It is called
|
57 |
+
as callback(xk), where xk is the current solution vector.
|
58 |
+
inner_m : int, optional
|
59 |
+
Number of inner GMRES iterations per each outer iteration.
|
60 |
+
outer_k : int, optional
|
61 |
+
Number of vectors to carry between inner GMRES iterations.
|
62 |
+
According to [1]_, good values are in the range of 1...3.
|
63 |
+
However, note that if you want to use the additional vectors to
|
64 |
+
accelerate solving multiple similar problems, larger values may
|
65 |
+
be beneficial.
|
66 |
+
outer_v : list of tuples, optional
|
67 |
+
List containing tuples ``(v, Av)`` of vectors and corresponding
|
68 |
+
matrix-vector products, used to augment the Krylov subspace, and
|
69 |
+
carried between inner GMRES iterations. The element ``Av`` can
|
70 |
+
be `None` if the matrix-vector product should be re-evaluated.
|
71 |
+
This parameter is modified in-place by `lgmres`, and can be used
|
72 |
+
to pass "guess" vectors in and out of the algorithm when solving
|
73 |
+
similar problems.
|
74 |
+
store_outer_Av : bool, optional
|
75 |
+
Whether LGMRES should store also A@v in addition to vectors `v`
|
76 |
+
in the `outer_v` list. Default is True.
|
77 |
+
prepend_outer_v : bool, optional
|
78 |
+
Whether to put outer_v augmentation vectors before Krylov iterates.
|
79 |
+
In standard LGMRES, prepend_outer_v=False.
|
80 |
+
tol : float, optional, deprecated
|
81 |
+
|
82 |
+
.. deprecated:: 1.12.0
|
83 |
+
`lgmres` keyword argument ``tol`` is deprecated in favor of ``rtol``
|
84 |
+
and will be removed in SciPy 1.14.0.
|
85 |
+
|
86 |
+
Returns
|
87 |
+
-------
|
88 |
+
x : ndarray
|
89 |
+
The converged solution.
|
90 |
+
info : int
|
91 |
+
Provides convergence information:
|
92 |
+
|
93 |
+
- 0 : successful exit
|
94 |
+
- >0 : convergence to tolerance not achieved, number of iterations
|
95 |
+
- <0 : illegal input or breakdown
|
96 |
+
|
97 |
+
Notes
|
98 |
+
-----
|
99 |
+
The LGMRES algorithm [1]_ [2]_ is designed to avoid the
|
100 |
+
slowing of convergence in restarted GMRES, due to alternating
|
101 |
+
residual vectors. Typically, it often outperforms GMRES(m) of
|
102 |
+
comparable memory requirements by some measure, or at least is not
|
103 |
+
much worse.
|
104 |
+
|
105 |
+
Another advantage in this algorithm is that you can supply it with
|
106 |
+
'guess' vectors in the `outer_v` argument that augment the Krylov
|
107 |
+
subspace. If the solution lies close to the span of these vectors,
|
108 |
+
the algorithm converges faster. This can be useful if several very
|
109 |
+
similar matrices need to be inverted one after another, such as in
|
110 |
+
Newton-Krylov iteration where the Jacobian matrix often changes
|
111 |
+
little in the nonlinear steps.
|
112 |
+
|
113 |
+
References
|
114 |
+
----------
|
115 |
+
.. [1] A.H. Baker and E.R. Jessup and T. Manteuffel, "A Technique for
|
116 |
+
Accelerating the Convergence of Restarted GMRES", SIAM J. Matrix
|
117 |
+
Anal. Appl. 26, 962 (2005).
|
118 |
+
.. [2] A.H. Baker, "On Improving the Performance of the Linear Solver
|
119 |
+
restarted GMRES", PhD thesis, University of Colorado (2003).
|
120 |
+
|
121 |
+
Examples
|
122 |
+
--------
|
123 |
+
>>> import numpy as np
|
124 |
+
>>> from scipy.sparse import csc_matrix
|
125 |
+
>>> from scipy.sparse.linalg import lgmres
|
126 |
+
>>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
|
127 |
+
>>> b = np.array([2, 4, -1], dtype=float)
|
128 |
+
>>> x, exitCode = lgmres(A, b, atol=1e-5)
|
129 |
+
>>> print(exitCode) # 0 indicates successful convergence
|
130 |
+
0
|
131 |
+
>>> np.allclose(A.dot(x), b)
|
132 |
+
True
|
133 |
+
"""
|
134 |
+
A,M,x,b,postprocess = make_system(A,M,x0,b)
|
135 |
+
|
136 |
+
if not np.isfinite(b).all():
|
137 |
+
raise ValueError("RHS must contain only finite numbers")
|
138 |
+
|
139 |
+
matvec = A.matvec
|
140 |
+
psolve = M.matvec
|
141 |
+
|
142 |
+
if outer_v is None:
|
143 |
+
outer_v = []
|
144 |
+
|
145 |
+
axpy, dot, scal = None, None, None
|
146 |
+
nrm2 = get_blas_funcs('nrm2', [b])
|
147 |
+
|
148 |
+
b_norm = nrm2(b)
|
149 |
+
|
150 |
+
# we call this to get the right atol/rtol and raise warnings as necessary
|
151 |
+
atol, rtol = _get_atol_rtol('lgmres', b_norm, tol, atol, rtol)
|
152 |
+
|
153 |
+
if b_norm == 0:
|
154 |
+
x = b
|
155 |
+
return (postprocess(x), 0)
|
156 |
+
|
157 |
+
ptol_max_factor = 1.0
|
158 |
+
|
159 |
+
for k_outer in range(maxiter):
|
160 |
+
r_outer = matvec(x) - b
|
161 |
+
|
162 |
+
# -- callback
|
163 |
+
if callback is not None:
|
164 |
+
callback(x)
|
165 |
+
|
166 |
+
# -- determine input type routines
|
167 |
+
if axpy is None:
|
168 |
+
if np.iscomplexobj(r_outer) and not np.iscomplexobj(x):
|
169 |
+
x = x.astype(r_outer.dtype)
|
170 |
+
axpy, dot, scal, nrm2 = get_blas_funcs(['axpy', 'dot', 'scal', 'nrm2'],
|
171 |
+
(x, r_outer))
|
172 |
+
|
173 |
+
# -- check stopping condition
|
174 |
+
r_norm = nrm2(r_outer)
|
175 |
+
if r_norm <= max(atol, rtol * b_norm):
|
176 |
+
break
|
177 |
+
|
178 |
+
# -- inner LGMRES iteration
|
179 |
+
v0 = -psolve(r_outer)
|
180 |
+
inner_res_0 = nrm2(v0)
|
181 |
+
|
182 |
+
if inner_res_0 == 0:
|
183 |
+
rnorm = nrm2(r_outer)
|
184 |
+
raise RuntimeError("Preconditioner returned a zero vector; "
|
185 |
+
"|v| ~ %.1g, |M v| = 0" % rnorm)
|
186 |
+
|
187 |
+
v0 = scal(1.0/inner_res_0, v0)
|
188 |
+
|
189 |
+
ptol = min(ptol_max_factor, max(atol, rtol*b_norm)/r_norm)
|
190 |
+
|
191 |
+
try:
|
192 |
+
Q, R, B, vs, zs, y, pres = _fgmres(matvec,
|
193 |
+
v0,
|
194 |
+
inner_m,
|
195 |
+
lpsolve=psolve,
|
196 |
+
atol=ptol,
|
197 |
+
outer_v=outer_v,
|
198 |
+
prepend_outer_v=prepend_outer_v)
|
199 |
+
y *= inner_res_0
|
200 |
+
if not np.isfinite(y).all():
|
201 |
+
# Overflow etc. in computation. There's no way to
|
202 |
+
# recover from this, so we have to bail out.
|
203 |
+
raise LinAlgError()
|
204 |
+
except LinAlgError:
|
205 |
+
# Floating point over/underflow, non-finite result from
|
206 |
+
# matmul etc. -- report failure.
|
207 |
+
return postprocess(x), k_outer + 1
|
208 |
+
|
209 |
+
# Inner loop tolerance control
|
210 |
+
if pres > ptol:
|
211 |
+
ptol_max_factor = min(1.0, 1.5 * ptol_max_factor)
|
212 |
+
else:
|
213 |
+
ptol_max_factor = max(1e-16, 0.25 * ptol_max_factor)
|
214 |
+
|
215 |
+
# -- GMRES terminated: eval solution
|
216 |
+
dx = zs[0]*y[0]
|
217 |
+
for w, yc in zip(zs[1:], y[1:]):
|
218 |
+
dx = axpy(w, dx, dx.shape[0], yc) # dx += w*yc
|
219 |
+
|
220 |
+
# -- Store LGMRES augmentation vectors
|
221 |
+
nx = nrm2(dx)
|
222 |
+
if nx > 0:
|
223 |
+
if store_outer_Av:
|
224 |
+
q = Q.dot(R.dot(y))
|
225 |
+
ax = vs[0]*q[0]
|
226 |
+
for v, qc in zip(vs[1:], q[1:]):
|
227 |
+
ax = axpy(v, ax, ax.shape[0], qc)
|
228 |
+
outer_v.append((dx/nx, ax/nx))
|
229 |
+
else:
|
230 |
+
outer_v.append((dx/nx, None))
|
231 |
+
|
232 |
+
# -- Retain only a finite number of augmentation vectors
|
233 |
+
while len(outer_v) > outer_k:
|
234 |
+
del outer_v[0]
|
235 |
+
|
236 |
+
# -- Apply step
|
237 |
+
x += dx
|
238 |
+
else:
|
239 |
+
# didn't converge ...
|
240 |
+
return postprocess(x), maxiter
|
241 |
+
|
242 |
+
return postprocess(x), 0
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__init__.py
ADDED
File without changes
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (206 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/test_gcrotmk.cpython-310.pyc
ADDED
Binary file (5.29 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/test_iterative.cpython-310.pyc
ADDED
Binary file (21.3 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/test_lgmres.cpython-310.pyc
ADDED
Binary file (6.68 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/test_lsmr.cpython-310.pyc
ADDED
Binary file (6.64 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/test_lsqr.cpython-310.pyc
ADDED
Binary file (3.04 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/test_minres.cpython-310.pyc
ADDED
Binary file (3.16 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/test_utils.cpython-310.pyc
ADDED
Binary file (604 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/test_gcrotmk.py
ADDED
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
"""Tests for the linalg._isolve.gcrotmk module
|
3 |
+
"""
|
4 |
+
|
5 |
+
from numpy.testing import (assert_, assert_allclose, assert_equal,
|
6 |
+
suppress_warnings)
|
7 |
+
|
8 |
+
import numpy as np
|
9 |
+
from numpy import zeros, array, allclose
|
10 |
+
from scipy.linalg import norm
|
11 |
+
from scipy.sparse import csr_matrix, eye, rand
|
12 |
+
|
13 |
+
from scipy.sparse.linalg._interface import LinearOperator
|
14 |
+
from scipy.sparse.linalg import splu
|
15 |
+
from scipy.sparse.linalg._isolve import gcrotmk, gmres
|
16 |
+
|
17 |
+
|
18 |
+
Am = csr_matrix(array([[-2,1,0,0,0,9],
|
19 |
+
[1,-2,1,0,5,0],
|
20 |
+
[0,1,-2,1,0,0],
|
21 |
+
[0,0,1,-2,1,0],
|
22 |
+
[0,3,0,1,-2,1],
|
23 |
+
[1,0,0,0,1,-2]]))
|
24 |
+
b = array([1,2,3,4,5,6])
|
25 |
+
count = [0]
|
26 |
+
|
27 |
+
|
28 |
+
def matvec(v):
|
29 |
+
count[0] += 1
|
30 |
+
return Am@v
|
31 |
+
|
32 |
+
|
33 |
+
A = LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype)
|
34 |
+
|
35 |
+
|
36 |
+
def do_solve(**kw):
|
37 |
+
count[0] = 0
|
38 |
+
with suppress_warnings() as sup:
|
39 |
+
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
40 |
+
x0, flag = gcrotmk(A, b, x0=zeros(A.shape[0]), rtol=1e-14, **kw)
|
41 |
+
count_0 = count[0]
|
42 |
+
assert_(allclose(A@x0, b, rtol=1e-12, atol=1e-12), norm(A@x0-b))
|
43 |
+
return x0, count_0
|
44 |
+
|
45 |
+
|
46 |
+
class TestGCROTMK:
|
47 |
+
def test_preconditioner(self):
|
48 |
+
# Check that preconditioning works
|
49 |
+
pc = splu(Am.tocsc())
|
50 |
+
M = LinearOperator(matvec=pc.solve, shape=A.shape, dtype=A.dtype)
|
51 |
+
|
52 |
+
x0, count_0 = do_solve()
|
53 |
+
x1, count_1 = do_solve(M=M)
|
54 |
+
|
55 |
+
assert_equal(count_1, 3)
|
56 |
+
assert_(count_1 < count_0/2)
|
57 |
+
assert_(allclose(x1, x0, rtol=1e-14))
|
58 |
+
|
59 |
+
def test_arnoldi(self):
|
60 |
+
np.random.seed(1)
|
61 |
+
|
62 |
+
A = eye(2000) + rand(2000, 2000, density=5e-4)
|
63 |
+
b = np.random.rand(2000)
|
64 |
+
|
65 |
+
# The inner arnoldi should be equivalent to gmres
|
66 |
+
with suppress_warnings() as sup:
|
67 |
+
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
68 |
+
x0, flag0 = gcrotmk(A, b, x0=zeros(A.shape[0]), m=15, k=0, maxiter=1)
|
69 |
+
x1, flag1 = gmres(A, b, x0=zeros(A.shape[0]), restart=15, maxiter=1)
|
70 |
+
|
71 |
+
assert_equal(flag0, 1)
|
72 |
+
assert_equal(flag1, 1)
|
73 |
+
assert np.linalg.norm(A.dot(x0) - b) > 1e-3
|
74 |
+
|
75 |
+
assert_allclose(x0, x1)
|
76 |
+
|
77 |
+
def test_cornercase(self):
|
78 |
+
np.random.seed(1234)
|
79 |
+
|
80 |
+
# Rounding error may prevent convergence with tol=0 --- ensure
|
81 |
+
# that the return values in this case are correct, and no
|
82 |
+
# exceptions are raised
|
83 |
+
|
84 |
+
for n in [3, 5, 10, 100]:
|
85 |
+
A = 2*eye(n)
|
86 |
+
|
87 |
+
with suppress_warnings() as sup:
|
88 |
+
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
89 |
+
b = np.ones(n)
|
90 |
+
x, info = gcrotmk(A, b, maxiter=10)
|
91 |
+
assert_equal(info, 0)
|
92 |
+
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
|
93 |
+
|
94 |
+
x, info = gcrotmk(A, b, rtol=0, maxiter=10)
|
95 |
+
if info == 0:
|
96 |
+
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
|
97 |
+
|
98 |
+
b = np.random.rand(n)
|
99 |
+
x, info = gcrotmk(A, b, maxiter=10)
|
100 |
+
assert_equal(info, 0)
|
101 |
+
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
|
102 |
+
|
103 |
+
x, info = gcrotmk(A, b, rtol=0, maxiter=10)
|
104 |
+
if info == 0:
|
105 |
+
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
|
106 |
+
|
107 |
+
def test_nans(self):
|
108 |
+
A = eye(3, format='lil')
|
109 |
+
A[1,1] = np.nan
|
110 |
+
b = np.ones(3)
|
111 |
+
|
112 |
+
with suppress_warnings() as sup:
|
113 |
+
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
114 |
+
x, info = gcrotmk(A, b, rtol=0, maxiter=10)
|
115 |
+
assert_equal(info, 1)
|
116 |
+
|
117 |
+
def test_truncate(self):
|
118 |
+
np.random.seed(1234)
|
119 |
+
A = np.random.rand(30, 30) + np.eye(30)
|
120 |
+
b = np.random.rand(30)
|
121 |
+
|
122 |
+
for truncate in ['oldest', 'smallest']:
|
123 |
+
with suppress_warnings() as sup:
|
124 |
+
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
125 |
+
x, info = gcrotmk(A, b, m=10, k=10, truncate=truncate,
|
126 |
+
rtol=1e-4, maxiter=200)
|
127 |
+
assert_equal(info, 0)
|
128 |
+
assert_allclose(A.dot(x) - b, 0, atol=1e-3)
|
129 |
+
|
130 |
+
def test_CU(self):
|
131 |
+
for discard_C in (True, False):
|
132 |
+
# Check that C,U behave as expected
|
133 |
+
CU = []
|
134 |
+
x0, count_0 = do_solve(CU=CU, discard_C=discard_C)
|
135 |
+
assert_(len(CU) > 0)
|
136 |
+
assert_(len(CU) <= 6)
|
137 |
+
|
138 |
+
if discard_C:
|
139 |
+
for c, u in CU:
|
140 |
+
assert_(c is None)
|
141 |
+
|
142 |
+
# should converge immediately
|
143 |
+
x1, count_1 = do_solve(CU=CU, discard_C=discard_C)
|
144 |
+
if discard_C:
|
145 |
+
assert_equal(count_1, 2 + len(CU))
|
146 |
+
else:
|
147 |
+
assert_equal(count_1, 3)
|
148 |
+
assert_(count_1 <= count_0/2)
|
149 |
+
assert_allclose(x1, x0, atol=1e-14)
|
150 |
+
|
151 |
+
def test_denormals(self):
|
152 |
+
# Check that no warnings are emitted if the matrix contains
|
153 |
+
# numbers for which 1/x has no float representation, and that
|
154 |
+
# the solver behaves properly.
|
155 |
+
A = np.array([[1, 2], [3, 4]], dtype=float)
|
156 |
+
A *= 100 * np.nextafter(0, 1)
|
157 |
+
|
158 |
+
b = np.array([1, 1])
|
159 |
+
|
160 |
+
with suppress_warnings() as sup:
|
161 |
+
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
162 |
+
xp, info = gcrotmk(A, b)
|
163 |
+
|
164 |
+
if info == 0:
|
165 |
+
assert_allclose(A.dot(xp), b)
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/test_iterative.py
ADDED
@@ -0,0 +1,796 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" Test functions for the sparse.linalg._isolve module
|
2 |
+
"""
|
3 |
+
|
4 |
+
import itertools
|
5 |
+
import platform
|
6 |
+
import sys
|
7 |
+
import pytest
|
8 |
+
|
9 |
+
import numpy as np
|
10 |
+
from numpy.testing import assert_array_equal, assert_allclose
|
11 |
+
from numpy import zeros, arange, array, ones, eye, iscomplexobj
|
12 |
+
from numpy.linalg import norm
|
13 |
+
|
14 |
+
from scipy.sparse import spdiags, csr_matrix, kronsum
|
15 |
+
|
16 |
+
from scipy.sparse.linalg import LinearOperator, aslinearoperator
|
17 |
+
from scipy.sparse.linalg._isolve import (bicg, bicgstab, cg, cgs,
|
18 |
+
gcrotmk, gmres, lgmres,
|
19 |
+
minres, qmr, tfqmr)
|
20 |
+
|
21 |
+
# TODO check that method preserve shape and type
|
22 |
+
# TODO test both preconditioner methods
|
23 |
+
|
24 |
+
|
25 |
+
# list of all solvers under test
|
26 |
+
_SOLVERS = [bicg, bicgstab, cg, cgs, gcrotmk, gmres, lgmres,
|
27 |
+
minres, qmr, tfqmr]
|
28 |
+
|
29 |
+
pytestmark = [
|
30 |
+
# remove this once atol defaults to 0.0 for all methods
|
31 |
+
pytest.mark.filterwarnings("ignore:.*called without specifying.*"),
|
32 |
+
]
|
33 |
+
|
34 |
+
|
35 |
+
# create parametrized fixture for easy reuse in tests
|
36 |
+
@pytest.fixture(params=_SOLVERS, scope="session")
|
37 |
+
def solver(request):
|
38 |
+
"""
|
39 |
+
Fixture for all solvers in scipy.sparse.linalg._isolve
|
40 |
+
"""
|
41 |
+
return request.param
|
42 |
+
|
43 |
+
|
44 |
+
class Case:
|
45 |
+
def __init__(self, name, A, b=None, skip=None, nonconvergence=None):
|
46 |
+
self.name = name
|
47 |
+
self.A = A
|
48 |
+
if b is None:
|
49 |
+
self.b = arange(A.shape[0], dtype=float)
|
50 |
+
else:
|
51 |
+
self.b = b
|
52 |
+
if skip is None:
|
53 |
+
self.skip = []
|
54 |
+
else:
|
55 |
+
self.skip = skip
|
56 |
+
if nonconvergence is None:
|
57 |
+
self.nonconvergence = []
|
58 |
+
else:
|
59 |
+
self.nonconvergence = nonconvergence
|
60 |
+
|
61 |
+
|
62 |
+
class SingleTest:
|
63 |
+
def __init__(self, A, b, solver, casename, convergence=True):
|
64 |
+
self.A = A
|
65 |
+
self.b = b
|
66 |
+
self.solver = solver
|
67 |
+
self.name = casename + '-' + solver.__name__
|
68 |
+
self.convergence = convergence
|
69 |
+
|
70 |
+
def __repr__(self):
|
71 |
+
return f"<{self.name}>"
|
72 |
+
|
73 |
+
|
74 |
+
class IterativeParams:
|
75 |
+
def __init__(self):
|
76 |
+
sym_solvers = [minres, cg]
|
77 |
+
posdef_solvers = [cg]
|
78 |
+
real_solvers = [minres]
|
79 |
+
|
80 |
+
# list of Cases
|
81 |
+
self.cases = []
|
82 |
+
|
83 |
+
# Symmetric and Positive Definite
|
84 |
+
N = 40
|
85 |
+
data = ones((3, N))
|
86 |
+
data[0, :] = 2
|
87 |
+
data[1, :] = -1
|
88 |
+
data[2, :] = -1
|
89 |
+
Poisson1D = spdiags(data, [0, -1, 1], N, N, format='csr')
|
90 |
+
self.cases.append(Case("poisson1d", Poisson1D))
|
91 |
+
# note: minres fails for single precision
|
92 |
+
self.cases.append(Case("poisson1d-F", Poisson1D.astype('f'),
|
93 |
+
skip=[minres]))
|
94 |
+
|
95 |
+
# Symmetric and Negative Definite
|
96 |
+
self.cases.append(Case("neg-poisson1d", -Poisson1D,
|
97 |
+
skip=posdef_solvers))
|
98 |
+
# note: minres fails for single precision
|
99 |
+
self.cases.append(Case("neg-poisson1d-F", (-Poisson1D).astype('f'),
|
100 |
+
skip=posdef_solvers + [minres]))
|
101 |
+
|
102 |
+
# 2-dimensional Poisson equations
|
103 |
+
Poisson2D = kronsum(Poisson1D, Poisson1D)
|
104 |
+
# note: minres fails for 2-d poisson problem,
|
105 |
+
# it will be fixed in the future PR
|
106 |
+
self.cases.append(Case("poisson2d", Poisson2D, skip=[minres]))
|
107 |
+
# note: minres fails for single precision
|
108 |
+
self.cases.append(Case("poisson2d-F", Poisson2D.astype('f'),
|
109 |
+
skip=[minres]))
|
110 |
+
|
111 |
+
# Symmetric and Indefinite
|
112 |
+
data = array([[6, -5, 2, 7, -1, 10, 4, -3, -8, 9]], dtype='d')
|
113 |
+
RandDiag = spdiags(data, [0], 10, 10, format='csr')
|
114 |
+
self.cases.append(Case("rand-diag", RandDiag, skip=posdef_solvers))
|
115 |
+
self.cases.append(Case("rand-diag-F", RandDiag.astype('f'),
|
116 |
+
skip=posdef_solvers))
|
117 |
+
|
118 |
+
# Random real-valued
|
119 |
+
np.random.seed(1234)
|
120 |
+
data = np.random.rand(4, 4)
|
121 |
+
self.cases.append(Case("rand", data,
|
122 |
+
skip=posdef_solvers + sym_solvers))
|
123 |
+
self.cases.append(Case("rand-F", data.astype('f'),
|
124 |
+
skip=posdef_solvers + sym_solvers))
|
125 |
+
|
126 |
+
# Random symmetric real-valued
|
127 |
+
np.random.seed(1234)
|
128 |
+
data = np.random.rand(4, 4)
|
129 |
+
data = data + data.T
|
130 |
+
self.cases.append(Case("rand-sym", data, skip=posdef_solvers))
|
131 |
+
self.cases.append(Case("rand-sym-F", data.astype('f'),
|
132 |
+
skip=posdef_solvers))
|
133 |
+
|
134 |
+
# Random pos-def symmetric real
|
135 |
+
np.random.seed(1234)
|
136 |
+
data = np.random.rand(9, 9)
|
137 |
+
data = np.dot(data.conj(), data.T)
|
138 |
+
self.cases.append(Case("rand-sym-pd", data))
|
139 |
+
# note: minres fails for single precision
|
140 |
+
self.cases.append(Case("rand-sym-pd-F", data.astype('f'),
|
141 |
+
skip=[minres]))
|
142 |
+
|
143 |
+
# Random complex-valued
|
144 |
+
np.random.seed(1234)
|
145 |
+
data = np.random.rand(4, 4) + 1j * np.random.rand(4, 4)
|
146 |
+
skip_cmplx = posdef_solvers + sym_solvers + real_solvers
|
147 |
+
self.cases.append(Case("rand-cmplx", data, skip=skip_cmplx))
|
148 |
+
self.cases.append(Case("rand-cmplx-F", data.astype('F'),
|
149 |
+
skip=skip_cmplx))
|
150 |
+
|
151 |
+
# Random hermitian complex-valued
|
152 |
+
np.random.seed(1234)
|
153 |
+
data = np.random.rand(4, 4) + 1j * np.random.rand(4, 4)
|
154 |
+
data = data + data.T.conj()
|
155 |
+
self.cases.append(Case("rand-cmplx-herm", data,
|
156 |
+
skip=posdef_solvers + real_solvers))
|
157 |
+
self.cases.append(Case("rand-cmplx-herm-F", data.astype('F'),
|
158 |
+
skip=posdef_solvers + real_solvers))
|
159 |
+
|
160 |
+
# Random pos-def hermitian complex-valued
|
161 |
+
np.random.seed(1234)
|
162 |
+
data = np.random.rand(9, 9) + 1j * np.random.rand(9, 9)
|
163 |
+
data = np.dot(data.conj(), data.T)
|
164 |
+
self.cases.append(Case("rand-cmplx-sym-pd", data, skip=real_solvers))
|
165 |
+
self.cases.append(Case("rand-cmplx-sym-pd-F", data.astype('F'),
|
166 |
+
skip=real_solvers))
|
167 |
+
|
168 |
+
# Non-symmetric and Positive Definite
|
169 |
+
#
|
170 |
+
# cgs, qmr, bicg and tfqmr fail to converge on this one
|
171 |
+
# -- algorithmic limitation apparently
|
172 |
+
data = ones((2, 10))
|
173 |
+
data[0, :] = 2
|
174 |
+
data[1, :] = -1
|
175 |
+
A = spdiags(data, [0, -1], 10, 10, format='csr')
|
176 |
+
self.cases.append(Case("nonsymposdef", A,
|
177 |
+
skip=sym_solvers + [cgs, qmr, bicg, tfqmr]))
|
178 |
+
self.cases.append(Case("nonsymposdef-F", A.astype('F'),
|
179 |
+
skip=sym_solvers + [cgs, qmr, bicg, tfqmr]))
|
180 |
+
|
181 |
+
# Symmetric, non-pd, hitting cgs/bicg/bicgstab/qmr/tfqmr breakdown
|
182 |
+
A = np.array([[0, 0, 0, 0, 0, 1, -1, -0, -0, -0, -0],
|
183 |
+
[0, 0, 0, 0, 0, 2, -0, -1, -0, -0, -0],
|
184 |
+
[0, 0, 0, 0, 0, 2, -0, -0, -1, -0, -0],
|
185 |
+
[0, 0, 0, 0, 0, 2, -0, -0, -0, -1, -0],
|
186 |
+
[0, 0, 0, 0, 0, 1, -0, -0, -0, -0, -1],
|
187 |
+
[1, 2, 2, 2, 1, 0, -0, -0, -0, -0, -0],
|
188 |
+
[-1, 0, 0, 0, 0, 0, -1, -0, -0, -0, -0],
|
189 |
+
[0, -1, 0, 0, 0, 0, -0, -1, -0, -0, -0],
|
190 |
+
[0, 0, -1, 0, 0, 0, -0, -0, -1, -0, -0],
|
191 |
+
[0, 0, 0, -1, 0, 0, -0, -0, -0, -1, -0],
|
192 |
+
[0, 0, 0, 0, -1, 0, -0, -0, -0, -0, -1]], dtype=float)
|
193 |
+
b = np.array([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0], dtype=float)
|
194 |
+
assert (A == A.T).all()
|
195 |
+
self.cases.append(Case("sym-nonpd", A, b,
|
196 |
+
skip=posdef_solvers,
|
197 |
+
nonconvergence=[cgs, bicg, bicgstab, qmr, tfqmr]
|
198 |
+
)
|
199 |
+
)
|
200 |
+
|
201 |
+
def generate_tests(self):
|
202 |
+
# generate test cases with skips applied
|
203 |
+
tests = []
|
204 |
+
for case in self.cases:
|
205 |
+
for solver in _SOLVERS:
|
206 |
+
if (solver in case.skip):
|
207 |
+
continue
|
208 |
+
if solver in case.nonconvergence:
|
209 |
+
tests += [SingleTest(case.A, case.b, solver, case.name,
|
210 |
+
convergence=False)]
|
211 |
+
else:
|
212 |
+
tests += [SingleTest(case.A, case.b, solver, case.name)]
|
213 |
+
return tests
|
214 |
+
|
215 |
+
|
216 |
+
cases = IterativeParams().generate_tests()
|
217 |
+
|
218 |
+
|
219 |
+
@pytest.fixture(params=cases, ids=[x.name for x in cases], scope="module")
|
220 |
+
def case(request):
|
221 |
+
"""
|
222 |
+
Fixture for all cases in IterativeParams
|
223 |
+
"""
|
224 |
+
return request.param
|
225 |
+
|
226 |
+
|
227 |
+
def test_maxiter(case):
|
228 |
+
if not case.convergence:
|
229 |
+
pytest.skip("Solver - Breakdown case, see gh-8829")
|
230 |
+
A = case.A
|
231 |
+
rtol = 1e-12
|
232 |
+
|
233 |
+
b = case.b
|
234 |
+
x0 = 0 * b
|
235 |
+
|
236 |
+
residuals = []
|
237 |
+
|
238 |
+
def callback(x):
|
239 |
+
residuals.append(norm(b - case.A * x))
|
240 |
+
|
241 |
+
x, info = case.solver(A, b, x0=x0, rtol=rtol, maxiter=1, callback=callback)
|
242 |
+
|
243 |
+
assert len(residuals) == 1
|
244 |
+
assert info == 1
|
245 |
+
|
246 |
+
|
247 |
+
def test_convergence(case):
|
248 |
+
A = case.A
|
249 |
+
|
250 |
+
if A.dtype.char in "dD":
|
251 |
+
rtol = 1e-8
|
252 |
+
else:
|
253 |
+
rtol = 1e-2
|
254 |
+
|
255 |
+
b = case.b
|
256 |
+
x0 = 0 * b
|
257 |
+
|
258 |
+
x, info = case.solver(A, b, x0=x0, rtol=rtol)
|
259 |
+
|
260 |
+
assert_array_equal(x0, 0 * b) # ensure that x0 is not overwritten
|
261 |
+
if case.convergence:
|
262 |
+
assert info == 0
|
263 |
+
assert norm(A @ x - b) <= norm(b) * rtol
|
264 |
+
else:
|
265 |
+
assert info != 0
|
266 |
+
assert norm(A @ x - b) <= norm(b)
|
267 |
+
|
268 |
+
|
269 |
+
def test_precond_dummy(case):
|
270 |
+
if not case.convergence:
|
271 |
+
pytest.skip("Solver - Breakdown case, see gh-8829")
|
272 |
+
|
273 |
+
rtol = 1e-8
|
274 |
+
|
275 |
+
def identity(b, which=None):
|
276 |
+
"""trivial preconditioner"""
|
277 |
+
return b
|
278 |
+
|
279 |
+
A = case.A
|
280 |
+
|
281 |
+
M, N = A.shape
|
282 |
+
# Ensure the diagonal elements of A are non-zero before calculating
|
283 |
+
# 1.0/A.diagonal()
|
284 |
+
diagOfA = A.diagonal()
|
285 |
+
if np.count_nonzero(diagOfA) == len(diagOfA):
|
286 |
+
spdiags([1.0 / diagOfA], [0], M, N)
|
287 |
+
|
288 |
+
b = case.b
|
289 |
+
x0 = 0 * b
|
290 |
+
|
291 |
+
precond = LinearOperator(A.shape, identity, rmatvec=identity)
|
292 |
+
|
293 |
+
if case.solver is qmr:
|
294 |
+
x, info = case.solver(A, b, M1=precond, M2=precond, x0=x0, rtol=rtol)
|
295 |
+
else:
|
296 |
+
x, info = case.solver(A, b, M=precond, x0=x0, rtol=rtol)
|
297 |
+
assert info == 0
|
298 |
+
assert norm(A @ x - b) <= norm(b) * rtol
|
299 |
+
|
300 |
+
A = aslinearoperator(A)
|
301 |
+
A.psolve = identity
|
302 |
+
A.rpsolve = identity
|
303 |
+
|
304 |
+
x, info = case.solver(A, b, x0=x0, rtol=rtol)
|
305 |
+
assert info == 0
|
306 |
+
assert norm(A @ x - b) <= norm(b) * rtol
|
307 |
+
|
308 |
+
|
309 |
+
# Specific test for poisson1d and poisson2d cases
|
310 |
+
@pytest.mark.parametrize('case', [x for x in IterativeParams().cases
|
311 |
+
if x.name in ('poisson1d', 'poisson2d')],
|
312 |
+
ids=['poisson1d', 'poisson2d'])
|
313 |
+
def test_precond_inverse(case):
|
314 |
+
for solver in _SOLVERS:
|
315 |
+
if solver in case.skip or solver is qmr:
|
316 |
+
continue
|
317 |
+
|
318 |
+
rtol = 1e-8
|
319 |
+
|
320 |
+
def inverse(b, which=None):
|
321 |
+
"""inverse preconditioner"""
|
322 |
+
A = case.A
|
323 |
+
if not isinstance(A, np.ndarray):
|
324 |
+
A = A.toarray()
|
325 |
+
return np.linalg.solve(A, b)
|
326 |
+
|
327 |
+
def rinverse(b, which=None):
|
328 |
+
"""inverse preconditioner"""
|
329 |
+
A = case.A
|
330 |
+
if not isinstance(A, np.ndarray):
|
331 |
+
A = A.toarray()
|
332 |
+
return np.linalg.solve(A.T, b)
|
333 |
+
|
334 |
+
matvec_count = [0]
|
335 |
+
|
336 |
+
def matvec(b):
|
337 |
+
matvec_count[0] += 1
|
338 |
+
return case.A @ b
|
339 |
+
|
340 |
+
def rmatvec(b):
|
341 |
+
matvec_count[0] += 1
|
342 |
+
return case.A.T @ b
|
343 |
+
|
344 |
+
b = case.b
|
345 |
+
x0 = 0 * b
|
346 |
+
|
347 |
+
A = LinearOperator(case.A.shape, matvec, rmatvec=rmatvec)
|
348 |
+
precond = LinearOperator(case.A.shape, inverse, rmatvec=rinverse)
|
349 |
+
|
350 |
+
# Solve with preconditioner
|
351 |
+
matvec_count = [0]
|
352 |
+
x, info = solver(A, b, M=precond, x0=x0, rtol=rtol)
|
353 |
+
|
354 |
+
assert info == 0
|
355 |
+
assert norm(case.A @ x - b) <= norm(b) * rtol
|
356 |
+
|
357 |
+
# Solution should be nearly instant
|
358 |
+
assert matvec_count[0] <= 3
|
359 |
+
|
360 |
+
|
361 |
+
def test_atol(solver):
|
362 |
+
# TODO: minres / tfqmr. It didn't historically use absolute tolerances, so
|
363 |
+
# fixing it is less urgent.
|
364 |
+
if solver in (minres, tfqmr):
|
365 |
+
pytest.skip("TODO: Add atol to minres/tfqmr")
|
366 |
+
|
367 |
+
# Historically this is tested as below, all pass but for some reason
|
368 |
+
# gcrotmk is over-sensitive to difference between random.seed/rng.random
|
369 |
+
# Hence tol lower bound is changed from -10 to -9
|
370 |
+
# np.random.seed(1234)
|
371 |
+
# A = np.random.rand(10, 10)
|
372 |
+
# A = A @ A.T + 10 * np.eye(10)
|
373 |
+
# b = 1e3*np.random.rand(10)
|
374 |
+
|
375 |
+
rng = np.random.default_rng(168441431005389)
|
376 |
+
A = rng.uniform(size=[10, 10])
|
377 |
+
A = A @ A.T + 10*np.eye(10)
|
378 |
+
b = 1e3 * rng.uniform(size=10)
|
379 |
+
|
380 |
+
b_norm = np.linalg.norm(b)
|
381 |
+
|
382 |
+
tols = np.r_[0, np.logspace(-9, 2, 7), np.inf]
|
383 |
+
|
384 |
+
# Check effect of badly scaled preconditioners
|
385 |
+
M0 = rng.standard_normal(size=(10, 10))
|
386 |
+
M0 = M0 @ M0.T
|
387 |
+
Ms = [None, 1e-6 * M0, 1e6 * M0]
|
388 |
+
|
389 |
+
for M, rtol, atol in itertools.product(Ms, tols, tols):
|
390 |
+
if rtol == 0 and atol == 0:
|
391 |
+
continue
|
392 |
+
|
393 |
+
if solver is qmr:
|
394 |
+
if M is not None:
|
395 |
+
M = aslinearoperator(M)
|
396 |
+
M2 = aslinearoperator(np.eye(10))
|
397 |
+
else:
|
398 |
+
M2 = None
|
399 |
+
x, info = solver(A, b, M1=M, M2=M2, rtol=rtol, atol=atol)
|
400 |
+
else:
|
401 |
+
x, info = solver(A, b, M=M, rtol=rtol, atol=atol)
|
402 |
+
|
403 |
+
assert info == 0
|
404 |
+
residual = A @ x - b
|
405 |
+
err = np.linalg.norm(residual)
|
406 |
+
atol2 = rtol * b_norm
|
407 |
+
# Added 1.00025 fudge factor because of `err` exceeding `atol` just
|
408 |
+
# very slightly on s390x (see gh-17839)
|
409 |
+
assert err <= 1.00025 * max(atol, atol2)
|
410 |
+
|
411 |
+
|
412 |
+
def test_zero_rhs(solver):
|
413 |
+
rng = np.random.default_rng(1684414984100503)
|
414 |
+
A = rng.random(size=[10, 10])
|
415 |
+
A = A @ A.T + 10 * np.eye(10)
|
416 |
+
|
417 |
+
b = np.zeros(10)
|
418 |
+
tols = np.r_[np.logspace(-10, 2, 7)]
|
419 |
+
|
420 |
+
for tol in tols:
|
421 |
+
x, info = solver(A, b, rtol=tol)
|
422 |
+
assert info == 0
|
423 |
+
assert_allclose(x, 0., atol=1e-15)
|
424 |
+
|
425 |
+
x, info = solver(A, b, rtol=tol, x0=ones(10))
|
426 |
+
assert info == 0
|
427 |
+
assert_allclose(x, 0., atol=tol)
|
428 |
+
|
429 |
+
if solver is not minres:
|
430 |
+
x, info = solver(A, b, rtol=tol, atol=0, x0=ones(10))
|
431 |
+
if info == 0:
|
432 |
+
assert_allclose(x, 0)
|
433 |
+
|
434 |
+
x, info = solver(A, b, rtol=tol, atol=tol)
|
435 |
+
assert info == 0
|
436 |
+
assert_allclose(x, 0, atol=1e-300)
|
437 |
+
|
438 |
+
x, info = solver(A, b, rtol=tol, atol=0)
|
439 |
+
assert info == 0
|
440 |
+
assert_allclose(x, 0, atol=1e-300)
|
441 |
+
|
442 |
+
|
443 |
+
@pytest.mark.xfail(reason="see gh-18697")
|
444 |
+
def test_maxiter_worsening(solver):
|
445 |
+
if solver not in (gmres, lgmres, qmr):
|
446 |
+
# these were skipped from the very beginning, see gh-9201; gh-14160
|
447 |
+
pytest.skip("Solver breakdown case")
|
448 |
+
# Check error does not grow (boundlessly) with increasing maxiter.
|
449 |
+
# This can occur due to the solvers hitting close to breakdown,
|
450 |
+
# which they should detect and halt as necessary.
|
451 |
+
# cf. gh-9100
|
452 |
+
if (solver is gmres and platform.machine() == 'aarch64'
|
453 |
+
and sys.version_info[1] == 9):
|
454 |
+
pytest.xfail(reason="gh-13019")
|
455 |
+
if (solver is lgmres and
|
456 |
+
platform.machine() not in ['x86_64' 'x86', 'aarch64', 'arm64']):
|
457 |
+
# see gh-17839
|
458 |
+
pytest.xfail(reason="fails on at least ppc64le, ppc64 and riscv64")
|
459 |
+
|
460 |
+
# Singular matrix, rhs numerically not in range
|
461 |
+
A = np.array([[-0.1112795288033378, 0, 0, 0.16127952880333685],
|
462 |
+
[0, -0.13627952880333782 + 6.283185307179586j, 0, 0],
|
463 |
+
[0, 0, -0.13627952880333782 - 6.283185307179586j, 0],
|
464 |
+
[0.1112795288033368, 0j, 0j, -0.16127952880333785]])
|
465 |
+
v = np.ones(4)
|
466 |
+
best_error = np.inf
|
467 |
+
|
468 |
+
# Unable to match the Fortran code tolerance levels with this example
|
469 |
+
# Original tolerance values
|
470 |
+
|
471 |
+
# slack_tol = 7 if platform.machine() == 'aarch64' else 5
|
472 |
+
slack_tol = 9
|
473 |
+
|
474 |
+
for maxiter in range(1, 20):
|
475 |
+
x, info = solver(A, v, maxiter=maxiter, rtol=1e-8, atol=0)
|
476 |
+
|
477 |
+
if info == 0:
|
478 |
+
assert norm(A @ x - v) <= 1e-8 * norm(v)
|
479 |
+
|
480 |
+
error = np.linalg.norm(A @ x - v)
|
481 |
+
best_error = min(best_error, error)
|
482 |
+
|
483 |
+
# Check with slack
|
484 |
+
assert error <= slack_tol * best_error
|
485 |
+
|
486 |
+
|
487 |
+
def test_x0_working(solver):
|
488 |
+
# Easy problem
|
489 |
+
rng = np.random.default_rng(1685363802304750)
|
490 |
+
n = 10
|
491 |
+
A = rng.random(size=[n, n])
|
492 |
+
A = A @ A.T
|
493 |
+
b = rng.random(n)
|
494 |
+
x0 = rng.random(n)
|
495 |
+
|
496 |
+
if solver is minres:
|
497 |
+
kw = dict(rtol=1e-6)
|
498 |
+
else:
|
499 |
+
kw = dict(atol=0, rtol=1e-6)
|
500 |
+
|
501 |
+
x, info = solver(A, b, **kw)
|
502 |
+
assert info == 0
|
503 |
+
assert norm(A @ x - b) <= 1e-6 * norm(b)
|
504 |
+
|
505 |
+
x, info = solver(A, b, x0=x0, **kw)
|
506 |
+
assert info == 0
|
507 |
+
assert norm(A @ x - b) <= 2e-6*norm(b)
|
508 |
+
|
509 |
+
|
510 |
+
def test_x0_equals_Mb(case):
|
511 |
+
if case.solver is tfqmr:
|
512 |
+
pytest.skip("Solver does not support x0='Mb'")
|
513 |
+
A = case.A
|
514 |
+
b = case.b
|
515 |
+
x0 = 'Mb'
|
516 |
+
rtol = 1e-8
|
517 |
+
x, info = case.solver(A, b, x0=x0, rtol=rtol)
|
518 |
+
|
519 |
+
assert_array_equal(x0, 'Mb') # ensure that x0 is not overwritten
|
520 |
+
assert info == 0
|
521 |
+
assert norm(A @ x - b) <= rtol * norm(b)
|
522 |
+
|
523 |
+
|
524 |
+
@pytest.mark.parametrize('solver', _SOLVERS)
|
525 |
+
def test_x0_solves_problem_exactly(solver):
|
526 |
+
# See gh-19948
|
527 |
+
mat = np.eye(2)
|
528 |
+
rhs = np.array([-1., -1.])
|
529 |
+
|
530 |
+
sol, info = solver(mat, rhs, x0=rhs)
|
531 |
+
assert_allclose(sol, rhs)
|
532 |
+
assert info == 0
|
533 |
+
|
534 |
+
|
535 |
+
# Specific tfqmr test
|
536 |
+
@pytest.mark.parametrize('case', IterativeParams().cases)
|
537 |
+
def test_show(case, capsys):
|
538 |
+
def cb(x):
|
539 |
+
pass
|
540 |
+
|
541 |
+
x, info = tfqmr(case.A, case.b, callback=cb, show=True)
|
542 |
+
out, err = capsys.readouterr()
|
543 |
+
|
544 |
+
if case.name == "sym-nonpd":
|
545 |
+
# no logs for some reason
|
546 |
+
exp = ""
|
547 |
+
elif case.name in ("nonsymposdef", "nonsymposdef-F"):
|
548 |
+
# Asymmetric and Positive Definite
|
549 |
+
exp = "TFQMR: Linear solve not converged due to reach MAXIT iterations"
|
550 |
+
else: # all other cases
|
551 |
+
exp = "TFQMR: Linear solve converged due to reach TOL iterations"
|
552 |
+
|
553 |
+
assert out.startswith(exp)
|
554 |
+
assert err == ""
|
555 |
+
|
556 |
+
|
557 |
+
def test_positional_deprecation(solver):
|
558 |
+
# from test_x0_working
|
559 |
+
rng = np.random.default_rng(1685363802304750)
|
560 |
+
n = 10
|
561 |
+
A = rng.random(size=[n, n])
|
562 |
+
A = A @ A.T
|
563 |
+
b = rng.random(n)
|
564 |
+
x0 = rng.random(n)
|
565 |
+
with pytest.deprecated_call(
|
566 |
+
# due to the use of the _deprecate_positional_args decorator, it's not possible
|
567 |
+
# to separate the two warnings (1 for positional use, 1 for `tol` deprecation).
|
568 |
+
match="use keyword arguments.*|argument `tol` is deprecated.*"
|
569 |
+
):
|
570 |
+
solver(A, b, x0, 1e-5)
|
571 |
+
|
572 |
+
|
573 |
+
class TestQMR:
|
574 |
+
@pytest.mark.filterwarnings('ignore::scipy.sparse.SparseEfficiencyWarning')
|
575 |
+
def test_leftright_precond(self):
|
576 |
+
"""Check that QMR works with left and right preconditioners"""
|
577 |
+
|
578 |
+
from scipy.sparse.linalg._dsolve import splu
|
579 |
+
from scipy.sparse.linalg._interface import LinearOperator
|
580 |
+
|
581 |
+
n = 100
|
582 |
+
|
583 |
+
dat = ones(n)
|
584 |
+
A = spdiags([-2 * dat, 4 * dat, -dat], [-1, 0, 1], n, n)
|
585 |
+
b = arange(n, dtype='d')
|
586 |
+
|
587 |
+
L = spdiags([-dat / 2, dat], [-1, 0], n, n)
|
588 |
+
U = spdiags([4 * dat, -dat], [0, 1], n, n)
|
589 |
+
L_solver = splu(L)
|
590 |
+
U_solver = splu(U)
|
591 |
+
|
592 |
+
def L_solve(b):
|
593 |
+
return L_solver.solve(b)
|
594 |
+
|
595 |
+
def U_solve(b):
|
596 |
+
return U_solver.solve(b)
|
597 |
+
|
598 |
+
def LT_solve(b):
|
599 |
+
return L_solver.solve(b, 'T')
|
600 |
+
|
601 |
+
def UT_solve(b):
|
602 |
+
return U_solver.solve(b, 'T')
|
603 |
+
|
604 |
+
M1 = LinearOperator((n, n), matvec=L_solve, rmatvec=LT_solve)
|
605 |
+
M2 = LinearOperator((n, n), matvec=U_solve, rmatvec=UT_solve)
|
606 |
+
|
607 |
+
rtol = 1e-8
|
608 |
+
x, info = qmr(A, b, rtol=rtol, maxiter=15, M1=M1, M2=M2)
|
609 |
+
|
610 |
+
assert info == 0
|
611 |
+
assert norm(A @ x - b) <= rtol * norm(b)
|
612 |
+
|
613 |
+
|
614 |
+
class TestGMRES:
|
615 |
+
def test_basic(self):
|
616 |
+
A = np.vander(np.arange(10) + 1)[:, ::-1]
|
617 |
+
b = np.zeros(10)
|
618 |
+
b[0] = 1
|
619 |
+
|
620 |
+
x_gm, err = gmres(A, b, restart=5, maxiter=1)
|
621 |
+
|
622 |
+
assert_allclose(x_gm[0], 0.359, rtol=1e-2)
|
623 |
+
|
624 |
+
def test_callback(self):
|
625 |
+
|
626 |
+
def store_residual(r, rvec):
|
627 |
+
rvec[rvec.nonzero()[0].max() + 1] = r
|
628 |
+
|
629 |
+
# Define, A,b
|
630 |
+
A = csr_matrix(array([[-2, 1, 0, 0, 0, 0],
|
631 |
+
[1, -2, 1, 0, 0, 0],
|
632 |
+
[0, 1, -2, 1, 0, 0],
|
633 |
+
[0, 0, 1, -2, 1, 0],
|
634 |
+
[0, 0, 0, 1, -2, 1],
|
635 |
+
[0, 0, 0, 0, 1, -2]]))
|
636 |
+
b = ones((A.shape[0],))
|
637 |
+
maxiter = 1
|
638 |
+
rvec = zeros(maxiter + 1)
|
639 |
+
rvec[0] = 1.0
|
640 |
+
|
641 |
+
def callback(r):
|
642 |
+
return store_residual(r, rvec)
|
643 |
+
|
644 |
+
x, flag = gmres(A, b, x0=zeros(A.shape[0]), rtol=1e-16,
|
645 |
+
maxiter=maxiter, callback=callback)
|
646 |
+
|
647 |
+
# Expected output from SciPy 1.0.0
|
648 |
+
assert_allclose(rvec, array([1.0, 0.81649658092772603]), rtol=1e-10)
|
649 |
+
|
650 |
+
# Test preconditioned callback
|
651 |
+
M = 1e-3 * np.eye(A.shape[0])
|
652 |
+
rvec = zeros(maxiter + 1)
|
653 |
+
rvec[0] = 1.0
|
654 |
+
x, flag = gmres(A, b, M=M, rtol=1e-16, maxiter=maxiter,
|
655 |
+
callback=callback)
|
656 |
+
|
657 |
+
# Expected output from SciPy 1.0.0
|
658 |
+
# (callback has preconditioned residual!)
|
659 |
+
assert_allclose(rvec, array([1.0, 1e-3 * 0.81649658092772603]),
|
660 |
+
rtol=1e-10)
|
661 |
+
|
662 |
+
def test_abi(self):
|
663 |
+
# Check we don't segfault on gmres with complex argument
|
664 |
+
A = eye(2)
|
665 |
+
b = ones(2)
|
666 |
+
r_x, r_info = gmres(A, b)
|
667 |
+
r_x = r_x.astype(complex)
|
668 |
+
x, info = gmres(A.astype(complex), b.astype(complex))
|
669 |
+
|
670 |
+
assert iscomplexobj(x)
|
671 |
+
assert_allclose(r_x, x)
|
672 |
+
assert r_info == info
|
673 |
+
|
674 |
+
def test_atol_legacy(self):
|
675 |
+
|
676 |
+
A = eye(2)
|
677 |
+
b = ones(2)
|
678 |
+
x, info = gmres(A, b, rtol=1e-5)
|
679 |
+
assert np.linalg.norm(A @ x - b) <= 1e-5 * np.linalg.norm(b)
|
680 |
+
assert_allclose(x, b, atol=0, rtol=1e-8)
|
681 |
+
|
682 |
+
rndm = np.random.RandomState(12345)
|
683 |
+
A = rndm.rand(30, 30)
|
684 |
+
b = 1e-6 * ones(30)
|
685 |
+
x, info = gmres(A, b, rtol=1e-7, restart=20)
|
686 |
+
assert np.linalg.norm(A @ x - b) > 1e-7
|
687 |
+
|
688 |
+
A = eye(2)
|
689 |
+
b = 1e-10 * ones(2)
|
690 |
+
x, info = gmres(A, b, rtol=1e-8, atol=0)
|
691 |
+
assert np.linalg.norm(A @ x - b) <= 1e-8 * np.linalg.norm(b)
|
692 |
+
|
693 |
+
def test_defective_precond_breakdown(self):
|
694 |
+
# Breakdown due to defective preconditioner
|
695 |
+
M = np.eye(3)
|
696 |
+
M[2, 2] = 0
|
697 |
+
|
698 |
+
b = np.array([0, 1, 1])
|
699 |
+
x = np.array([1, 0, 0])
|
700 |
+
A = np.diag([2, 3, 4])
|
701 |
+
|
702 |
+
x, info = gmres(A, b, x0=x, M=M, rtol=1e-15, atol=0)
|
703 |
+
|
704 |
+
# Should not return nans, nor terminate with false success
|
705 |
+
assert not np.isnan(x).any()
|
706 |
+
if info == 0:
|
707 |
+
assert np.linalg.norm(A @ x - b) <= 1e-15 * np.linalg.norm(b)
|
708 |
+
|
709 |
+
# The solution should be OK outside null space of M
|
710 |
+
assert_allclose(M @ (A @ x), M @ b)
|
711 |
+
|
712 |
+
def test_defective_matrix_breakdown(self):
|
713 |
+
# Breakdown due to defective matrix
|
714 |
+
A = np.array([[0, 1, 0], [1, 0, 0], [0, 0, 0]])
|
715 |
+
b = np.array([1, 0, 1])
|
716 |
+
rtol = 1e-8
|
717 |
+
x, info = gmres(A, b, rtol=rtol, atol=0)
|
718 |
+
|
719 |
+
# Should not return nans, nor terminate with false success
|
720 |
+
assert not np.isnan(x).any()
|
721 |
+
if info == 0:
|
722 |
+
assert np.linalg.norm(A @ x - b) <= rtol * np.linalg.norm(b)
|
723 |
+
|
724 |
+
# The solution should be OK outside null space of A
|
725 |
+
assert_allclose(A @ (A @ x), A @ b)
|
726 |
+
|
727 |
+
def test_callback_type(self):
|
728 |
+
# The legacy callback type changes meaning of 'maxiter'
|
729 |
+
np.random.seed(1)
|
730 |
+
A = np.random.rand(20, 20)
|
731 |
+
b = np.random.rand(20)
|
732 |
+
|
733 |
+
cb_count = [0]
|
734 |
+
|
735 |
+
def pr_norm_cb(r):
|
736 |
+
cb_count[0] += 1
|
737 |
+
assert isinstance(r, float)
|
738 |
+
|
739 |
+
def x_cb(x):
|
740 |
+
cb_count[0] += 1
|
741 |
+
assert isinstance(x, np.ndarray)
|
742 |
+
|
743 |
+
# 2 iterations is not enough to solve the problem
|
744 |
+
cb_count = [0]
|
745 |
+
x, info = gmres(A, b, rtol=1e-6, atol=0, callback=pr_norm_cb,
|
746 |
+
maxiter=2, restart=50)
|
747 |
+
assert info == 2
|
748 |
+
assert cb_count[0] == 2
|
749 |
+
|
750 |
+
# With `callback_type` specified, no warning should be raised
|
751 |
+
cb_count = [0]
|
752 |
+
x, info = gmres(A, b, rtol=1e-6, atol=0, callback=pr_norm_cb,
|
753 |
+
maxiter=2, restart=50, callback_type='legacy')
|
754 |
+
assert info == 2
|
755 |
+
assert cb_count[0] == 2
|
756 |
+
|
757 |
+
# 2 restart cycles is enough to solve the problem
|
758 |
+
cb_count = [0]
|
759 |
+
x, info = gmres(A, b, rtol=1e-6, atol=0, callback=pr_norm_cb,
|
760 |
+
maxiter=2, restart=50, callback_type='pr_norm')
|
761 |
+
assert info == 0
|
762 |
+
assert cb_count[0] > 2
|
763 |
+
|
764 |
+
# 2 restart cycles is enough to solve the problem
|
765 |
+
cb_count = [0]
|
766 |
+
x, info = gmres(A, b, rtol=1e-6, atol=0, callback=x_cb, maxiter=2,
|
767 |
+
restart=50, callback_type='x')
|
768 |
+
assert info == 0
|
769 |
+
assert cb_count[0] == 1
|
770 |
+
|
771 |
+
def test_callback_x_monotonic(self):
|
772 |
+
# Check that callback_type='x' gives monotonic norm decrease
|
773 |
+
np.random.seed(1)
|
774 |
+
A = np.random.rand(20, 20) + np.eye(20)
|
775 |
+
b = np.random.rand(20)
|
776 |
+
|
777 |
+
prev_r = [np.inf]
|
778 |
+
count = [0]
|
779 |
+
|
780 |
+
def x_cb(x):
|
781 |
+
r = np.linalg.norm(A @ x - b)
|
782 |
+
assert r <= prev_r[0]
|
783 |
+
prev_r[0] = r
|
784 |
+
count[0] += 1
|
785 |
+
|
786 |
+
x, info = gmres(A, b, rtol=1e-6, atol=0, callback=x_cb, maxiter=20,
|
787 |
+
restart=10, callback_type='x')
|
788 |
+
assert info == 20
|
789 |
+
assert count[0] == 20
|
790 |
+
|
791 |
+
def test_restrt_dep(self):
|
792 |
+
with pytest.warns(
|
793 |
+
DeprecationWarning,
|
794 |
+
match="'gmres' keyword argument 'restrt'"
|
795 |
+
):
|
796 |
+
gmres(np.array([1]), np.array([1]), restrt=10)
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/test_lgmres.py
ADDED
@@ -0,0 +1,211 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Tests for the linalg._isolve.lgmres module
|
2 |
+
"""
|
3 |
+
|
4 |
+
from numpy.testing import (assert_, assert_allclose, assert_equal,
|
5 |
+
suppress_warnings)
|
6 |
+
|
7 |
+
import pytest
|
8 |
+
from platform import python_implementation
|
9 |
+
|
10 |
+
import numpy as np
|
11 |
+
from numpy import zeros, array, allclose
|
12 |
+
from scipy.linalg import norm
|
13 |
+
from scipy.sparse import csr_matrix, eye, rand
|
14 |
+
|
15 |
+
from scipy.sparse.linalg._interface import LinearOperator
|
16 |
+
from scipy.sparse.linalg import splu
|
17 |
+
from scipy.sparse.linalg._isolve import lgmres, gmres
|
18 |
+
|
19 |
+
|
20 |
+
Am = csr_matrix(array([[-2, 1, 0, 0, 0, 9],
|
21 |
+
[1, -2, 1, 0, 5, 0],
|
22 |
+
[0, 1, -2, 1, 0, 0],
|
23 |
+
[0, 0, 1, -2, 1, 0],
|
24 |
+
[0, 3, 0, 1, -2, 1],
|
25 |
+
[1, 0, 0, 0, 1, -2]]))
|
26 |
+
b = array([1, 2, 3, 4, 5, 6])
|
27 |
+
count = [0]
|
28 |
+
|
29 |
+
|
30 |
+
def matvec(v):
|
31 |
+
count[0] += 1
|
32 |
+
return Am@v
|
33 |
+
|
34 |
+
|
35 |
+
A = LinearOperator(matvec=matvec, shape=Am.shape, dtype=Am.dtype)
|
36 |
+
|
37 |
+
|
38 |
+
def do_solve(**kw):
|
39 |
+
count[0] = 0
|
40 |
+
with suppress_warnings() as sup:
|
41 |
+
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
42 |
+
x0, flag = lgmres(A, b, x0=zeros(A.shape[0]),
|
43 |
+
inner_m=6, rtol=1e-14, **kw)
|
44 |
+
count_0 = count[0]
|
45 |
+
assert_(allclose(A@x0, b, rtol=1e-12, atol=1e-12), norm(A@x0-b))
|
46 |
+
return x0, count_0
|
47 |
+
|
48 |
+
|
49 |
+
class TestLGMRES:
|
50 |
+
def test_preconditioner(self):
|
51 |
+
# Check that preconditioning works
|
52 |
+
pc = splu(Am.tocsc())
|
53 |
+
M = LinearOperator(matvec=pc.solve, shape=A.shape, dtype=A.dtype)
|
54 |
+
|
55 |
+
x0, count_0 = do_solve()
|
56 |
+
x1, count_1 = do_solve(M=M)
|
57 |
+
|
58 |
+
assert_(count_1 == 3)
|
59 |
+
assert_(count_1 < count_0/2)
|
60 |
+
assert_(allclose(x1, x0, rtol=1e-14))
|
61 |
+
|
62 |
+
def test_outer_v(self):
|
63 |
+
# Check that the augmentation vectors behave as expected
|
64 |
+
|
65 |
+
outer_v = []
|
66 |
+
x0, count_0 = do_solve(outer_k=6, outer_v=outer_v)
|
67 |
+
assert_(len(outer_v) > 0)
|
68 |
+
assert_(len(outer_v) <= 6)
|
69 |
+
|
70 |
+
x1, count_1 = do_solve(outer_k=6, outer_v=outer_v,
|
71 |
+
prepend_outer_v=True)
|
72 |
+
assert_(count_1 == 2, count_1)
|
73 |
+
assert_(count_1 < count_0/2)
|
74 |
+
assert_(allclose(x1, x0, rtol=1e-14))
|
75 |
+
|
76 |
+
# ---
|
77 |
+
|
78 |
+
outer_v = []
|
79 |
+
x0, count_0 = do_solve(outer_k=6, outer_v=outer_v,
|
80 |
+
store_outer_Av=False)
|
81 |
+
assert_(array([v[1] is None for v in outer_v]).all())
|
82 |
+
assert_(len(outer_v) > 0)
|
83 |
+
assert_(len(outer_v) <= 6)
|
84 |
+
|
85 |
+
x1, count_1 = do_solve(outer_k=6, outer_v=outer_v,
|
86 |
+
prepend_outer_v=True)
|
87 |
+
assert_(count_1 == 3, count_1)
|
88 |
+
assert_(count_1 < count_0/2)
|
89 |
+
assert_(allclose(x1, x0, rtol=1e-14))
|
90 |
+
|
91 |
+
@pytest.mark.skipif(python_implementation() == 'PyPy',
|
92 |
+
reason="Fails on PyPy CI runs. See #9507")
|
93 |
+
def test_arnoldi(self):
|
94 |
+
np.random.seed(1234)
|
95 |
+
|
96 |
+
A = eye(2000) + rand(2000, 2000, density=5e-4)
|
97 |
+
b = np.random.rand(2000)
|
98 |
+
|
99 |
+
# The inner arnoldi should be equivalent to gmres
|
100 |
+
with suppress_warnings() as sup:
|
101 |
+
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
102 |
+
x0, flag0 = lgmres(A, b, x0=zeros(A.shape[0]),
|
103 |
+
inner_m=15, maxiter=1)
|
104 |
+
x1, flag1 = gmres(A, b, x0=zeros(A.shape[0]),
|
105 |
+
restart=15, maxiter=1)
|
106 |
+
|
107 |
+
assert_equal(flag0, 1)
|
108 |
+
assert_equal(flag1, 1)
|
109 |
+
norm = np.linalg.norm(A.dot(x0) - b)
|
110 |
+
assert_(norm > 1e-4)
|
111 |
+
assert_allclose(x0, x1)
|
112 |
+
|
113 |
+
def test_cornercase(self):
|
114 |
+
np.random.seed(1234)
|
115 |
+
|
116 |
+
# Rounding error may prevent convergence with tol=0 --- ensure
|
117 |
+
# that the return values in this case are correct, and no
|
118 |
+
# exceptions are raised
|
119 |
+
|
120 |
+
for n in [3, 5, 10, 100]:
|
121 |
+
A = 2*eye(n)
|
122 |
+
|
123 |
+
with suppress_warnings() as sup:
|
124 |
+
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
125 |
+
|
126 |
+
b = np.ones(n)
|
127 |
+
x, info = lgmres(A, b, maxiter=10)
|
128 |
+
assert_equal(info, 0)
|
129 |
+
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
|
130 |
+
|
131 |
+
x, info = lgmres(A, b, rtol=0, maxiter=10)
|
132 |
+
if info == 0:
|
133 |
+
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
|
134 |
+
|
135 |
+
b = np.random.rand(n)
|
136 |
+
x, info = lgmres(A, b, maxiter=10)
|
137 |
+
assert_equal(info, 0)
|
138 |
+
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
|
139 |
+
|
140 |
+
x, info = lgmres(A, b, rtol=0, maxiter=10)
|
141 |
+
if info == 0:
|
142 |
+
assert_allclose(A.dot(x) - b, 0, atol=1e-14)
|
143 |
+
|
144 |
+
def test_nans(self):
|
145 |
+
A = eye(3, format='lil')
|
146 |
+
A[1, 1] = np.nan
|
147 |
+
b = np.ones(3)
|
148 |
+
|
149 |
+
with suppress_warnings() as sup:
|
150 |
+
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
151 |
+
x, info = lgmres(A, b, rtol=0, maxiter=10)
|
152 |
+
assert_equal(info, 1)
|
153 |
+
|
154 |
+
def test_breakdown_with_outer_v(self):
|
155 |
+
A = np.array([[1, 2], [3, 4]], dtype=float)
|
156 |
+
b = np.array([1, 2])
|
157 |
+
|
158 |
+
x = np.linalg.solve(A, b)
|
159 |
+
v0 = np.array([1, 0])
|
160 |
+
|
161 |
+
# The inner iteration should converge to the correct solution,
|
162 |
+
# since it's in the outer vector list
|
163 |
+
with suppress_warnings() as sup:
|
164 |
+
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
165 |
+
xp, info = lgmres(A, b, outer_v=[(v0, None), (x, None)], maxiter=1)
|
166 |
+
|
167 |
+
assert_allclose(xp, x, atol=1e-12)
|
168 |
+
|
169 |
+
def test_breakdown_underdetermined(self):
|
170 |
+
# Should find LSQ solution in the Krylov span in one inner
|
171 |
+
# iteration, despite solver breakdown from nilpotent A.
|
172 |
+
A = np.array([[0, 1, 1, 1],
|
173 |
+
[0, 0, 1, 1],
|
174 |
+
[0, 0, 0, 1],
|
175 |
+
[0, 0, 0, 0]], dtype=float)
|
176 |
+
|
177 |
+
bs = [
|
178 |
+
np.array([1, 1, 1, 1]),
|
179 |
+
np.array([1, 1, 1, 0]),
|
180 |
+
np.array([1, 1, 0, 0]),
|
181 |
+
np.array([1, 0, 0, 0]),
|
182 |
+
]
|
183 |
+
|
184 |
+
for b in bs:
|
185 |
+
with suppress_warnings() as sup:
|
186 |
+
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
187 |
+
xp, info = lgmres(A, b, maxiter=1)
|
188 |
+
resp = np.linalg.norm(A.dot(xp) - b)
|
189 |
+
|
190 |
+
K = np.c_[b, A.dot(b), A.dot(A.dot(b)), A.dot(A.dot(A.dot(b)))]
|
191 |
+
y, _, _, _ = np.linalg.lstsq(A.dot(K), b, rcond=-1)
|
192 |
+
x = K.dot(y)
|
193 |
+
res = np.linalg.norm(A.dot(x) - b)
|
194 |
+
|
195 |
+
assert_allclose(resp, res, err_msg=repr(b))
|
196 |
+
|
197 |
+
def test_denormals(self):
|
198 |
+
# Check that no warnings are emitted if the matrix contains
|
199 |
+
# numbers for which 1/x has no float representation, and that
|
200 |
+
# the solver behaves properly.
|
201 |
+
A = np.array([[1, 2], [3, 4]], dtype=float)
|
202 |
+
A *= 100 * np.nextafter(0, 1)
|
203 |
+
|
204 |
+
b = np.array([1, 1])
|
205 |
+
|
206 |
+
with suppress_warnings() as sup:
|
207 |
+
sup.filter(DeprecationWarning, ".*called without specifying.*")
|
208 |
+
xp, info = lgmres(A, b)
|
209 |
+
|
210 |
+
if info == 0:
|
211 |
+
assert_allclose(A.dot(xp), b)
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/test_lsmr.py
ADDED
@@ -0,0 +1,185 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Copyright (C) 2010 David Fong and Michael Saunders
|
3 |
+
Distributed under the same license as SciPy
|
4 |
+
|
5 |
+
Testing Code for LSMR.
|
6 |
+
|
7 |
+
03 Jun 2010: First version release with lsmr.py
|
8 |
+
|
9 |
+
David Chin-lung Fong [email protected]
|
10 |
+
Institute for Computational and Mathematical Engineering
|
11 |
+
Stanford University
|
12 |
+
|
13 |
+
Michael Saunders [email protected]
|
14 |
+
Systems Optimization Laboratory
|
15 |
+
Dept of MS&E, Stanford University.
|
16 |
+
|
17 |
+
"""
|
18 |
+
|
19 |
+
from numpy import array, arange, eye, zeros, ones, transpose, hstack
|
20 |
+
from numpy.linalg import norm
|
21 |
+
from numpy.testing import assert_allclose
|
22 |
+
import pytest
|
23 |
+
from scipy.sparse import coo_matrix
|
24 |
+
from scipy.sparse.linalg._interface import aslinearoperator
|
25 |
+
from scipy.sparse.linalg import lsmr
|
26 |
+
from .test_lsqr import G, b
|
27 |
+
|
28 |
+
|
29 |
+
class TestLSMR:
|
30 |
+
def setup_method(self):
|
31 |
+
self.n = 10
|
32 |
+
self.m = 10
|
33 |
+
|
34 |
+
def assertCompatibleSystem(self, A, xtrue):
|
35 |
+
Afun = aslinearoperator(A)
|
36 |
+
b = Afun.matvec(xtrue)
|
37 |
+
x = lsmr(A, b)[0]
|
38 |
+
assert norm(x - xtrue) == pytest.approx(0, abs=1e-5)
|
39 |
+
|
40 |
+
def testIdentityACase1(self):
|
41 |
+
A = eye(self.n)
|
42 |
+
xtrue = zeros((self.n, 1))
|
43 |
+
self.assertCompatibleSystem(A, xtrue)
|
44 |
+
|
45 |
+
def testIdentityACase2(self):
|
46 |
+
A = eye(self.n)
|
47 |
+
xtrue = ones((self.n,1))
|
48 |
+
self.assertCompatibleSystem(A, xtrue)
|
49 |
+
|
50 |
+
def testIdentityACase3(self):
|
51 |
+
A = eye(self.n)
|
52 |
+
xtrue = transpose(arange(self.n,0,-1))
|
53 |
+
self.assertCompatibleSystem(A, xtrue)
|
54 |
+
|
55 |
+
def testBidiagonalA(self):
|
56 |
+
A = lowerBidiagonalMatrix(20,self.n)
|
57 |
+
xtrue = transpose(arange(self.n,0,-1))
|
58 |
+
self.assertCompatibleSystem(A,xtrue)
|
59 |
+
|
60 |
+
def testScalarB(self):
|
61 |
+
A = array([[1.0, 2.0]])
|
62 |
+
b = 3.0
|
63 |
+
x = lsmr(A, b)[0]
|
64 |
+
assert norm(A.dot(x) - b) == pytest.approx(0)
|
65 |
+
|
66 |
+
def testComplexX(self):
|
67 |
+
A = eye(self.n)
|
68 |
+
xtrue = transpose(arange(self.n, 0, -1) * (1 + 1j))
|
69 |
+
self.assertCompatibleSystem(A, xtrue)
|
70 |
+
|
71 |
+
def testComplexX0(self):
|
72 |
+
A = 4 * eye(self.n) + ones((self.n, self.n))
|
73 |
+
xtrue = transpose(arange(self.n, 0, -1))
|
74 |
+
b = aslinearoperator(A).matvec(xtrue)
|
75 |
+
x0 = zeros(self.n, dtype=complex)
|
76 |
+
x = lsmr(A, b, x0=x0)[0]
|
77 |
+
assert norm(x - xtrue) == pytest.approx(0, abs=1e-5)
|
78 |
+
|
79 |
+
def testComplexA(self):
|
80 |
+
A = 4 * eye(self.n) + 1j * ones((self.n, self.n))
|
81 |
+
xtrue = transpose(arange(self.n, 0, -1).astype(complex))
|
82 |
+
self.assertCompatibleSystem(A, xtrue)
|
83 |
+
|
84 |
+
def testComplexB(self):
|
85 |
+
A = 4 * eye(self.n) + ones((self.n, self.n))
|
86 |
+
xtrue = transpose(arange(self.n, 0, -1) * (1 + 1j))
|
87 |
+
b = aslinearoperator(A).matvec(xtrue)
|
88 |
+
x = lsmr(A, b)[0]
|
89 |
+
assert norm(x - xtrue) == pytest.approx(0, abs=1e-5)
|
90 |
+
|
91 |
+
def testColumnB(self):
|
92 |
+
A = eye(self.n)
|
93 |
+
b = ones((self.n, 1))
|
94 |
+
x = lsmr(A, b)[0]
|
95 |
+
assert norm(A.dot(x) - b.ravel()) == pytest.approx(0)
|
96 |
+
|
97 |
+
def testInitialization(self):
|
98 |
+
# Test that the default setting is not modified
|
99 |
+
x_ref, _, itn_ref, normr_ref, *_ = lsmr(G, b)
|
100 |
+
assert_allclose(norm(b - G@x_ref), normr_ref, atol=1e-6)
|
101 |
+
|
102 |
+
# Test passing zeros yields similar result
|
103 |
+
x0 = zeros(b.shape)
|
104 |
+
x = lsmr(G, b, x0=x0)[0]
|
105 |
+
assert_allclose(x, x_ref)
|
106 |
+
|
107 |
+
# Test warm-start with single iteration
|
108 |
+
x0 = lsmr(G, b, maxiter=1)[0]
|
109 |
+
|
110 |
+
x, _, itn, normr, *_ = lsmr(G, b, x0=x0)
|
111 |
+
assert_allclose(norm(b - G@x), normr, atol=1e-6)
|
112 |
+
|
113 |
+
# NOTE(gh-12139): This doesn't always converge to the same value as
|
114 |
+
# ref because error estimates will be slightly different when calculated
|
115 |
+
# from zeros vs x0 as a result only compare norm and itn (not x).
|
116 |
+
|
117 |
+
# x generally converges 1 iteration faster because it started at x0.
|
118 |
+
# itn == itn_ref means that lsmr(x0) took an extra iteration see above.
|
119 |
+
# -1 is technically possible but is rare (1 in 100000) so it's more
|
120 |
+
# likely to be an error elsewhere.
|
121 |
+
assert itn - itn_ref in (0, 1)
|
122 |
+
|
123 |
+
# If an extra iteration is performed normr may be 0, while normr_ref
|
124 |
+
# may be much larger.
|
125 |
+
assert normr < normr_ref * (1 + 1e-6)
|
126 |
+
|
127 |
+
|
128 |
+
class TestLSMRReturns:
|
129 |
+
def setup_method(self):
|
130 |
+
self.n = 10
|
131 |
+
self.A = lowerBidiagonalMatrix(20, self.n)
|
132 |
+
self.xtrue = transpose(arange(self.n, 0, -1))
|
133 |
+
self.Afun = aslinearoperator(self.A)
|
134 |
+
self.b = self.Afun.matvec(self.xtrue)
|
135 |
+
self.x0 = ones(self.n)
|
136 |
+
self.x00 = self.x0.copy()
|
137 |
+
self.returnValues = lsmr(self.A, self.b)
|
138 |
+
self.returnValuesX0 = lsmr(self.A, self.b, x0=self.x0)
|
139 |
+
|
140 |
+
def test_unchanged_x0(self):
|
141 |
+
x, istop, itn, normr, normar, normA, condA, normx = self.returnValuesX0
|
142 |
+
assert_allclose(self.x00, self.x0)
|
143 |
+
|
144 |
+
def testNormr(self):
|
145 |
+
x, istop, itn, normr, normar, normA, condA, normx = self.returnValues
|
146 |
+
assert norm(self.b - self.Afun.matvec(x)) == pytest.approx(normr)
|
147 |
+
|
148 |
+
def testNormar(self):
|
149 |
+
x, istop, itn, normr, normar, normA, condA, normx = self.returnValues
|
150 |
+
assert (norm(self.Afun.rmatvec(self.b - self.Afun.matvec(x)))
|
151 |
+
== pytest.approx(normar))
|
152 |
+
|
153 |
+
def testNormx(self):
|
154 |
+
x, istop, itn, normr, normar, normA, condA, normx = self.returnValues
|
155 |
+
assert norm(x) == pytest.approx(normx)
|
156 |
+
|
157 |
+
|
158 |
+
def lowerBidiagonalMatrix(m, n):
|
159 |
+
# This is a simple example for testing LSMR.
|
160 |
+
# It uses the leading m*n submatrix from
|
161 |
+
# A = [ 1
|
162 |
+
# 1 2
|
163 |
+
# 2 3
|
164 |
+
# 3 4
|
165 |
+
# ...
|
166 |
+
# n ]
|
167 |
+
# suitably padded by zeros.
|
168 |
+
#
|
169 |
+
# 04 Jun 2010: First version for distribution with lsmr.py
|
170 |
+
if m <= n:
|
171 |
+
row = hstack((arange(m, dtype=int),
|
172 |
+
arange(1, m, dtype=int)))
|
173 |
+
col = hstack((arange(m, dtype=int),
|
174 |
+
arange(m-1, dtype=int)))
|
175 |
+
data = hstack((arange(1, m+1, dtype=float),
|
176 |
+
arange(1,m, dtype=float)))
|
177 |
+
return coo_matrix((data, (row, col)), shape=(m,n))
|
178 |
+
else:
|
179 |
+
row = hstack((arange(n, dtype=int),
|
180 |
+
arange(1, n+1, dtype=int)))
|
181 |
+
col = hstack((arange(n, dtype=int),
|
182 |
+
arange(n, dtype=int)))
|
183 |
+
data = hstack((arange(1, n+1, dtype=float),
|
184 |
+
arange(1,n+1, dtype=float)))
|
185 |
+
return coo_matrix((data,(row, col)), shape=(m,n))
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/test_lsqr.py
ADDED
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from numpy.testing import assert_allclose, assert_array_equal, assert_equal
|
3 |
+
import pytest
|
4 |
+
import scipy.sparse
|
5 |
+
import scipy.sparse.linalg
|
6 |
+
from scipy.sparse.linalg import lsqr
|
7 |
+
|
8 |
+
# Set up a test problem
|
9 |
+
n = 35
|
10 |
+
G = np.eye(n)
|
11 |
+
normal = np.random.normal
|
12 |
+
norm = np.linalg.norm
|
13 |
+
|
14 |
+
for jj in range(5):
|
15 |
+
gg = normal(size=n)
|
16 |
+
hh = gg * gg.T
|
17 |
+
G += (hh + hh.T) * 0.5
|
18 |
+
G += normal(size=n) * normal(size=n)
|
19 |
+
|
20 |
+
b = normal(size=n)
|
21 |
+
|
22 |
+
# tolerance for atol/btol keywords of lsqr()
|
23 |
+
tol = 2e-10
|
24 |
+
# tolerances for testing the results of the lsqr() call with assert_allclose
|
25 |
+
# These tolerances are a bit fragile - see discussion in gh-15301.
|
26 |
+
atol_test = 4e-10
|
27 |
+
rtol_test = 2e-8
|
28 |
+
show = False
|
29 |
+
maxit = None
|
30 |
+
|
31 |
+
|
32 |
+
def test_lsqr_basic():
|
33 |
+
b_copy = b.copy()
|
34 |
+
xo, *_ = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit)
|
35 |
+
assert_array_equal(b_copy, b)
|
36 |
+
|
37 |
+
svx = np.linalg.solve(G, b)
|
38 |
+
assert_allclose(xo, svx, atol=atol_test, rtol=rtol_test)
|
39 |
+
|
40 |
+
# Now the same but with damp > 0.
|
41 |
+
# This is equivalent to solving the extended system:
|
42 |
+
# ( G ) @ x = ( b )
|
43 |
+
# ( damp*I ) ( 0 )
|
44 |
+
damp = 1.5
|
45 |
+
xo, *_ = lsqr(
|
46 |
+
G, b, damp=damp, show=show, atol=tol, btol=tol, iter_lim=maxit)
|
47 |
+
|
48 |
+
Gext = np.r_[G, damp * np.eye(G.shape[1])]
|
49 |
+
bext = np.r_[b, np.zeros(G.shape[1])]
|
50 |
+
svx, *_ = np.linalg.lstsq(Gext, bext, rcond=None)
|
51 |
+
assert_allclose(xo, svx, atol=atol_test, rtol=rtol_test)
|
52 |
+
|
53 |
+
|
54 |
+
def test_gh_2466():
|
55 |
+
row = np.array([0, 0])
|
56 |
+
col = np.array([0, 1])
|
57 |
+
val = np.array([1, -1])
|
58 |
+
A = scipy.sparse.coo_matrix((val, (row, col)), shape=(1, 2))
|
59 |
+
b = np.asarray([4])
|
60 |
+
lsqr(A, b)
|
61 |
+
|
62 |
+
|
63 |
+
def test_well_conditioned_problems():
|
64 |
+
# Test that sparse the lsqr solver returns the right solution
|
65 |
+
# on various problems with different random seeds.
|
66 |
+
# This is a non-regression test for a potential ZeroDivisionError
|
67 |
+
# raised when computing the `test2` & `test3` convergence conditions.
|
68 |
+
n = 10
|
69 |
+
A_sparse = scipy.sparse.eye(n, n)
|
70 |
+
A_dense = A_sparse.toarray()
|
71 |
+
|
72 |
+
with np.errstate(invalid='raise'):
|
73 |
+
for seed in range(30):
|
74 |
+
rng = np.random.RandomState(seed + 10)
|
75 |
+
beta = rng.rand(n)
|
76 |
+
beta[beta == 0] = 0.00001 # ensure that all the betas are not null
|
77 |
+
b = A_sparse @ beta[:, np.newaxis]
|
78 |
+
output = lsqr(A_sparse, b, show=show)
|
79 |
+
|
80 |
+
# Check that the termination condition corresponds to an approximate
|
81 |
+
# solution to Ax = b
|
82 |
+
assert_equal(output[1], 1)
|
83 |
+
solution = output[0]
|
84 |
+
|
85 |
+
# Check that we recover the ground truth solution
|
86 |
+
assert_allclose(solution, beta)
|
87 |
+
|
88 |
+
# Sanity check: compare to the dense array solver
|
89 |
+
reference_solution = np.linalg.solve(A_dense, b).ravel()
|
90 |
+
assert_allclose(solution, reference_solution)
|
91 |
+
|
92 |
+
|
93 |
+
def test_b_shapes():
|
94 |
+
# Test b being a scalar.
|
95 |
+
A = np.array([[1.0, 2.0]])
|
96 |
+
b = 3.0
|
97 |
+
x = lsqr(A, b)[0]
|
98 |
+
assert norm(A.dot(x) - b) == pytest.approx(0)
|
99 |
+
|
100 |
+
# Test b being a column vector.
|
101 |
+
A = np.eye(10)
|
102 |
+
b = np.ones((10, 1))
|
103 |
+
x = lsqr(A, b)[0]
|
104 |
+
assert norm(A.dot(x) - b.ravel()) == pytest.approx(0)
|
105 |
+
|
106 |
+
|
107 |
+
def test_initialization():
|
108 |
+
# Test the default setting is the same as zeros
|
109 |
+
b_copy = b.copy()
|
110 |
+
x_ref = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit)
|
111 |
+
x0 = np.zeros(x_ref[0].shape)
|
112 |
+
x = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit, x0=x0)
|
113 |
+
assert_array_equal(b_copy, b)
|
114 |
+
assert_allclose(x_ref[0], x[0])
|
115 |
+
|
116 |
+
# Test warm-start with single iteration
|
117 |
+
x0 = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=1)[0]
|
118 |
+
x = lsqr(G, b, show=show, atol=tol, btol=tol, iter_lim=maxit, x0=x0)
|
119 |
+
assert_allclose(x_ref[0], x[0])
|
120 |
+
assert_array_equal(b_copy, b)
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/test_minres.py
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from numpy.linalg import norm
|
3 |
+
from numpy.testing import assert_equal, assert_allclose, assert_
|
4 |
+
from scipy.sparse.linalg._isolve import minres
|
5 |
+
|
6 |
+
from pytest import raises as assert_raises
|
7 |
+
|
8 |
+
|
9 |
+
def get_sample_problem():
|
10 |
+
# A random 10 x 10 symmetric matrix
|
11 |
+
np.random.seed(1234)
|
12 |
+
matrix = np.random.rand(10, 10)
|
13 |
+
matrix = matrix + matrix.T
|
14 |
+
# A random vector of length 10
|
15 |
+
vector = np.random.rand(10)
|
16 |
+
return matrix, vector
|
17 |
+
|
18 |
+
|
19 |
+
def test_singular():
|
20 |
+
A, b = get_sample_problem()
|
21 |
+
A[0, ] = 0
|
22 |
+
b[0] = 0
|
23 |
+
xp, info = minres(A, b)
|
24 |
+
assert_equal(info, 0)
|
25 |
+
assert norm(A @ xp - b) <= 1e-5 * norm(b)
|
26 |
+
|
27 |
+
|
28 |
+
def test_x0_is_used_by():
|
29 |
+
A, b = get_sample_problem()
|
30 |
+
# Random x0 to feed minres
|
31 |
+
np.random.seed(12345)
|
32 |
+
x0 = np.random.rand(10)
|
33 |
+
trace = []
|
34 |
+
|
35 |
+
def trace_iterates(xk):
|
36 |
+
trace.append(xk)
|
37 |
+
minres(A, b, x0=x0, callback=trace_iterates)
|
38 |
+
trace_with_x0 = trace
|
39 |
+
|
40 |
+
trace = []
|
41 |
+
minres(A, b, callback=trace_iterates)
|
42 |
+
assert_(not np.array_equal(trace_with_x0[0], trace[0]))
|
43 |
+
|
44 |
+
|
45 |
+
def test_shift():
|
46 |
+
A, b = get_sample_problem()
|
47 |
+
shift = 0.5
|
48 |
+
shifted_A = A - shift * np.eye(10)
|
49 |
+
x1, info1 = minres(A, b, shift=shift)
|
50 |
+
x2, info2 = minres(shifted_A, b)
|
51 |
+
assert_equal(info1, 0)
|
52 |
+
assert_allclose(x1, x2, rtol=1e-5)
|
53 |
+
|
54 |
+
|
55 |
+
def test_asymmetric_fail():
|
56 |
+
"""Asymmetric matrix should raise `ValueError` when check=True"""
|
57 |
+
A, b = get_sample_problem()
|
58 |
+
A[1, 2] = 1
|
59 |
+
A[2, 1] = 2
|
60 |
+
with assert_raises(ValueError):
|
61 |
+
xp, info = minres(A, b, check=True)
|
62 |
+
|
63 |
+
|
64 |
+
def test_minres_non_default_x0():
|
65 |
+
np.random.seed(1234)
|
66 |
+
rtol = 1e-6
|
67 |
+
a = np.random.randn(5, 5)
|
68 |
+
a = np.dot(a, a.T)
|
69 |
+
b = np.random.randn(5)
|
70 |
+
c = np.random.randn(5)
|
71 |
+
x = minres(a, b, x0=c, rtol=rtol)[0]
|
72 |
+
assert norm(a @ x - b) <= rtol * norm(b)
|
73 |
+
|
74 |
+
|
75 |
+
def test_minres_precond_non_default_x0():
|
76 |
+
np.random.seed(12345)
|
77 |
+
rtol = 1e-6
|
78 |
+
a = np.random.randn(5, 5)
|
79 |
+
a = np.dot(a, a.T)
|
80 |
+
b = np.random.randn(5)
|
81 |
+
c = np.random.randn(5)
|
82 |
+
m = np.random.randn(5, 5)
|
83 |
+
m = np.dot(m, m.T)
|
84 |
+
x = minres(a, b, M=m, x0=c, rtol=rtol)[0]
|
85 |
+
assert norm(a @ x - b) <= rtol * norm(b)
|
86 |
+
|
87 |
+
|
88 |
+
def test_minres_precond_exact_x0():
|
89 |
+
np.random.seed(1234)
|
90 |
+
rtol = 1e-6
|
91 |
+
a = np.eye(10)
|
92 |
+
b = np.ones(10)
|
93 |
+
c = np.ones(10)
|
94 |
+
m = np.random.randn(10, 10)
|
95 |
+
m = np.dot(m, m.T)
|
96 |
+
x = minres(a, b, M=m, x0=c, rtol=rtol)[0]
|
97 |
+
assert norm(a @ x - b) <= rtol * norm(b)
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/test_utils.py
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from pytest import raises as assert_raises
|
3 |
+
|
4 |
+
import scipy.sparse.linalg._isolve.utils as utils
|
5 |
+
|
6 |
+
|
7 |
+
def test_make_system_bad_shape():
|
8 |
+
assert_raises(ValueError,
|
9 |
+
utils.make_system, np.zeros((5,3)), None, np.zeros(4), np.zeros(4))
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tfqmr.py
ADDED
@@ -0,0 +1,191 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
from .iterative import _get_atol_rtol
|
3 |
+
from .utils import make_system
|
4 |
+
from scipy._lib.deprecation import _NoValue, _deprecate_positional_args
|
5 |
+
|
6 |
+
|
7 |
+
__all__ = ['tfqmr']
|
8 |
+
|
9 |
+
|
10 |
+
@_deprecate_positional_args(version="1.14.0")
|
11 |
+
def tfqmr(A, b, x0=None, *, tol=_NoValue, maxiter=None, M=None,
|
12 |
+
callback=None, atol=None, rtol=1e-5, show=False):
|
13 |
+
"""
|
14 |
+
Use Transpose-Free Quasi-Minimal Residual iteration to solve ``Ax = b``.
|
15 |
+
|
16 |
+
Parameters
|
17 |
+
----------
|
18 |
+
A : {sparse matrix, ndarray, LinearOperator}
|
19 |
+
The real or complex N-by-N matrix of the linear system.
|
20 |
+
Alternatively, `A` can be a linear operator which can
|
21 |
+
produce ``Ax`` using, e.g.,
|
22 |
+
`scipy.sparse.linalg.LinearOperator`.
|
23 |
+
b : {ndarray}
|
24 |
+
Right hand side of the linear system. Has shape (N,) or (N,1).
|
25 |
+
x0 : {ndarray}
|
26 |
+
Starting guess for the solution.
|
27 |
+
rtol, atol : float, optional
|
28 |
+
Parameters for the convergence test. For convergence,
|
29 |
+
``norm(b - A @ x) <= max(rtol*norm(b), atol)`` should be satisfied.
|
30 |
+
The default is ``rtol=1e-5``, the default for ``atol`` is ``rtol``.
|
31 |
+
|
32 |
+
.. warning::
|
33 |
+
|
34 |
+
The default value for ``atol`` will be changed to ``0.0`` in
|
35 |
+
SciPy 1.14.0.
|
36 |
+
maxiter : int, optional
|
37 |
+
Maximum number of iterations. Iteration will stop after maxiter
|
38 |
+
steps even if the specified tolerance has not been achieved.
|
39 |
+
Default is ``min(10000, ndofs * 10)``, where ``ndofs = A.shape[0]``.
|
40 |
+
M : {sparse matrix, ndarray, LinearOperator}
|
41 |
+
Inverse of the preconditioner of A. M should approximate the
|
42 |
+
inverse of A and be easy to solve for (see Notes). Effective
|
43 |
+
preconditioning dramatically improves the rate of convergence,
|
44 |
+
which implies that fewer iterations are needed to reach a given
|
45 |
+
error tolerance. By default, no preconditioner is used.
|
46 |
+
callback : function, optional
|
47 |
+
User-supplied function to call after each iteration. It is called
|
48 |
+
as `callback(xk)`, where `xk` is the current solution vector.
|
49 |
+
show : bool, optional
|
50 |
+
Specify ``show = True`` to show the convergence, ``show = False`` is
|
51 |
+
to close the output of the convergence.
|
52 |
+
Default is `False`.
|
53 |
+
tol : float, optional, deprecated
|
54 |
+
|
55 |
+
.. deprecated:: 1.12.0
|
56 |
+
`tfqmr` keyword argument ``tol`` is deprecated in favor of ``rtol``
|
57 |
+
and will be removed in SciPy 1.14.0.
|
58 |
+
|
59 |
+
Returns
|
60 |
+
-------
|
61 |
+
x : ndarray
|
62 |
+
The converged solution.
|
63 |
+
info : int
|
64 |
+
Provides convergence information:
|
65 |
+
|
66 |
+
- 0 : successful exit
|
67 |
+
- >0 : convergence to tolerance not achieved, number of iterations
|
68 |
+
- <0 : illegal input or breakdown
|
69 |
+
|
70 |
+
Notes
|
71 |
+
-----
|
72 |
+
The Transpose-Free QMR algorithm is derived from the CGS algorithm.
|
73 |
+
However, unlike CGS, the convergence curves for the TFQMR method is
|
74 |
+
smoothed by computing a quasi minimization of the residual norm. The
|
75 |
+
implementation supports left preconditioner, and the "residual norm"
|
76 |
+
to compute in convergence criterion is actually an upper bound on the
|
77 |
+
actual residual norm ``||b - Axk||``.
|
78 |
+
|
79 |
+
References
|
80 |
+
----------
|
81 |
+
.. [1] R. W. Freund, A Transpose-Free Quasi-Minimal Residual Algorithm for
|
82 |
+
Non-Hermitian Linear Systems, SIAM J. Sci. Comput., 14(2), 470-482,
|
83 |
+
1993.
|
84 |
+
.. [2] Y. Saad, Iterative Methods for Sparse Linear Systems, 2nd edition,
|
85 |
+
SIAM, Philadelphia, 2003.
|
86 |
+
.. [3] C. T. Kelley, Iterative Methods for Linear and Nonlinear Equations,
|
87 |
+
number 16 in Frontiers in Applied Mathematics, SIAM, Philadelphia,
|
88 |
+
1995.
|
89 |
+
|
90 |
+
Examples
|
91 |
+
--------
|
92 |
+
>>> import numpy as np
|
93 |
+
>>> from scipy.sparse import csc_matrix
|
94 |
+
>>> from scipy.sparse.linalg import tfqmr
|
95 |
+
>>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
|
96 |
+
>>> b = np.array([2, 4, -1], dtype=float)
|
97 |
+
>>> x, exitCode = tfqmr(A, b)
|
98 |
+
>>> print(exitCode) # 0 indicates successful convergence
|
99 |
+
0
|
100 |
+
>>> np.allclose(A.dot(x), b)
|
101 |
+
True
|
102 |
+
"""
|
103 |
+
|
104 |
+
# Check data type
|
105 |
+
dtype = A.dtype
|
106 |
+
if np.issubdtype(dtype, np.int64):
|
107 |
+
dtype = float
|
108 |
+
A = A.astype(dtype)
|
109 |
+
if np.issubdtype(b.dtype, np.int64):
|
110 |
+
b = b.astype(dtype)
|
111 |
+
|
112 |
+
A, M, x, b, postprocess = make_system(A, M, x0, b)
|
113 |
+
|
114 |
+
# Check if the R.H.S is a zero vector
|
115 |
+
if np.linalg.norm(b) == 0.:
|
116 |
+
x = b.copy()
|
117 |
+
return (postprocess(x), 0)
|
118 |
+
|
119 |
+
ndofs = A.shape[0]
|
120 |
+
if maxiter is None:
|
121 |
+
maxiter = min(10000, ndofs * 10)
|
122 |
+
|
123 |
+
if x0 is None:
|
124 |
+
r = b.copy()
|
125 |
+
else:
|
126 |
+
r = b - A.matvec(x)
|
127 |
+
u = r
|
128 |
+
w = r.copy()
|
129 |
+
# Take rstar as b - Ax0, that is rstar := r = b - Ax0 mathematically
|
130 |
+
rstar = r
|
131 |
+
v = M.matvec(A.matvec(r))
|
132 |
+
uhat = v
|
133 |
+
d = theta = eta = 0.
|
134 |
+
# at this point we know rstar == r, so rho is always real
|
135 |
+
rho = np.inner(rstar.conjugate(), r).real
|
136 |
+
rhoLast = rho
|
137 |
+
r0norm = np.sqrt(rho)
|
138 |
+
tau = r0norm
|
139 |
+
if r0norm == 0:
|
140 |
+
return (postprocess(x), 0)
|
141 |
+
|
142 |
+
# we call this to get the right atol and raise warnings as necessary
|
143 |
+
atol, _ = _get_atol_rtol('tfqmr', r0norm, tol, atol, rtol)
|
144 |
+
|
145 |
+
for iter in range(maxiter):
|
146 |
+
even = iter % 2 == 0
|
147 |
+
if (even):
|
148 |
+
vtrstar = np.inner(rstar.conjugate(), v)
|
149 |
+
# Check breakdown
|
150 |
+
if vtrstar == 0.:
|
151 |
+
return (postprocess(x), -1)
|
152 |
+
alpha = rho / vtrstar
|
153 |
+
uNext = u - alpha * v # [1]-(5.6)
|
154 |
+
w -= alpha * uhat # [1]-(5.8)
|
155 |
+
d = u + (theta**2 / alpha) * eta * d # [1]-(5.5)
|
156 |
+
# [1]-(5.2)
|
157 |
+
theta = np.linalg.norm(w) / tau
|
158 |
+
c = np.sqrt(1. / (1 + theta**2))
|
159 |
+
tau *= theta * c
|
160 |
+
# Calculate step and direction [1]-(5.4)
|
161 |
+
eta = (c**2) * alpha
|
162 |
+
z = M.matvec(d)
|
163 |
+
x += eta * z
|
164 |
+
|
165 |
+
if callback is not None:
|
166 |
+
callback(x)
|
167 |
+
|
168 |
+
# Convergence criterion
|
169 |
+
if tau * np.sqrt(iter+1) < atol:
|
170 |
+
if (show):
|
171 |
+
print("TFQMR: Linear solve converged due to reach TOL "
|
172 |
+
f"iterations {iter+1}")
|
173 |
+
return (postprocess(x), 0)
|
174 |
+
|
175 |
+
if (not even):
|
176 |
+
# [1]-(5.7)
|
177 |
+
rho = np.inner(rstar.conjugate(), w)
|
178 |
+
beta = rho / rhoLast
|
179 |
+
u = w + beta * u
|
180 |
+
v = beta * uhat + (beta**2) * v
|
181 |
+
uhat = M.matvec(A.matvec(u))
|
182 |
+
v += uhat
|
183 |
+
else:
|
184 |
+
uhat = M.matvec(A.matvec(uNext))
|
185 |
+
u = uNext
|
186 |
+
rhoLast = rho
|
187 |
+
|
188 |
+
if (show):
|
189 |
+
print("TFQMR: Linear solve not converged due to reach MAXIT "
|
190 |
+
f"iterations {iter+1}")
|
191 |
+
return (postprocess(x), maxiter)
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/utils.py
ADDED
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
__docformat__ = "restructuredtext en"
|
2 |
+
|
3 |
+
__all__ = []
|
4 |
+
|
5 |
+
|
6 |
+
from numpy import asanyarray, asarray, array, zeros
|
7 |
+
|
8 |
+
from scipy.sparse.linalg._interface import aslinearoperator, LinearOperator, \
|
9 |
+
IdentityOperator
|
10 |
+
|
11 |
+
_coerce_rules = {('f','f'):'f', ('f','d'):'d', ('f','F'):'F',
|
12 |
+
('f','D'):'D', ('d','f'):'d', ('d','d'):'d',
|
13 |
+
('d','F'):'D', ('d','D'):'D', ('F','f'):'F',
|
14 |
+
('F','d'):'D', ('F','F'):'F', ('F','D'):'D',
|
15 |
+
('D','f'):'D', ('D','d'):'D', ('D','F'):'D',
|
16 |
+
('D','D'):'D'}
|
17 |
+
|
18 |
+
|
19 |
+
def coerce(x,y):
|
20 |
+
if x not in 'fdFD':
|
21 |
+
x = 'd'
|
22 |
+
if y not in 'fdFD':
|
23 |
+
y = 'd'
|
24 |
+
return _coerce_rules[x,y]
|
25 |
+
|
26 |
+
|
27 |
+
def id(x):
|
28 |
+
return x
|
29 |
+
|
30 |
+
|
31 |
+
def make_system(A, M, x0, b):
|
32 |
+
"""Make a linear system Ax=b
|
33 |
+
|
34 |
+
Parameters
|
35 |
+
----------
|
36 |
+
A : LinearOperator
|
37 |
+
sparse or dense matrix (or any valid input to aslinearoperator)
|
38 |
+
M : {LinearOperator, Nones}
|
39 |
+
preconditioner
|
40 |
+
sparse or dense matrix (or any valid input to aslinearoperator)
|
41 |
+
x0 : {array_like, str, None}
|
42 |
+
initial guess to iterative method.
|
43 |
+
``x0 = 'Mb'`` means using the nonzero initial guess ``M @ b``.
|
44 |
+
Default is `None`, which means using the zero initial guess.
|
45 |
+
b : array_like
|
46 |
+
right hand side
|
47 |
+
|
48 |
+
Returns
|
49 |
+
-------
|
50 |
+
(A, M, x, b, postprocess)
|
51 |
+
A : LinearOperator
|
52 |
+
matrix of the linear system
|
53 |
+
M : LinearOperator
|
54 |
+
preconditioner
|
55 |
+
x : rank 1 ndarray
|
56 |
+
initial guess
|
57 |
+
b : rank 1 ndarray
|
58 |
+
right hand side
|
59 |
+
postprocess : function
|
60 |
+
converts the solution vector to the appropriate
|
61 |
+
type and dimensions (e.g. (N,1) matrix)
|
62 |
+
|
63 |
+
"""
|
64 |
+
A_ = A
|
65 |
+
A = aslinearoperator(A)
|
66 |
+
|
67 |
+
if A.shape[0] != A.shape[1]:
|
68 |
+
raise ValueError(f'expected square matrix, but got shape={(A.shape,)}')
|
69 |
+
|
70 |
+
N = A.shape[0]
|
71 |
+
|
72 |
+
b = asanyarray(b)
|
73 |
+
|
74 |
+
if not (b.shape == (N,1) or b.shape == (N,)):
|
75 |
+
raise ValueError(f'shapes of A {A.shape} and b {b.shape} are '
|
76 |
+
'incompatible')
|
77 |
+
|
78 |
+
if b.dtype.char not in 'fdFD':
|
79 |
+
b = b.astype('d') # upcast non-FP types to double
|
80 |
+
|
81 |
+
def postprocess(x):
|
82 |
+
return x
|
83 |
+
|
84 |
+
if hasattr(A,'dtype'):
|
85 |
+
xtype = A.dtype.char
|
86 |
+
else:
|
87 |
+
xtype = A.matvec(b).dtype.char
|
88 |
+
xtype = coerce(xtype, b.dtype.char)
|
89 |
+
|
90 |
+
b = asarray(b,dtype=xtype) # make b the same type as x
|
91 |
+
b = b.ravel()
|
92 |
+
|
93 |
+
# process preconditioner
|
94 |
+
if M is None:
|
95 |
+
if hasattr(A_,'psolve'):
|
96 |
+
psolve = A_.psolve
|
97 |
+
else:
|
98 |
+
psolve = id
|
99 |
+
if hasattr(A_,'rpsolve'):
|
100 |
+
rpsolve = A_.rpsolve
|
101 |
+
else:
|
102 |
+
rpsolve = id
|
103 |
+
if psolve is id and rpsolve is id:
|
104 |
+
M = IdentityOperator(shape=A.shape, dtype=A.dtype)
|
105 |
+
else:
|
106 |
+
M = LinearOperator(A.shape, matvec=psolve, rmatvec=rpsolve,
|
107 |
+
dtype=A.dtype)
|
108 |
+
else:
|
109 |
+
M = aslinearoperator(M)
|
110 |
+
if A.shape != M.shape:
|
111 |
+
raise ValueError('matrix and preconditioner have different shapes')
|
112 |
+
|
113 |
+
# set initial guess
|
114 |
+
if x0 is None:
|
115 |
+
x = zeros(N, dtype=xtype)
|
116 |
+
elif isinstance(x0, str):
|
117 |
+
if x0 == 'Mb': # use nonzero initial guess ``M @ b``
|
118 |
+
bCopy = b.copy()
|
119 |
+
x = M.matvec(bCopy)
|
120 |
+
else:
|
121 |
+
x = array(x0, dtype=xtype)
|
122 |
+
if not (x.shape == (N, 1) or x.shape == (N,)):
|
123 |
+
raise ValueError(f'shapes of A {A.shape} and '
|
124 |
+
f'x0 {x.shape} are incompatible')
|
125 |
+
x = x.ravel()
|
126 |
+
|
127 |
+
return A, M, x, b, postprocess
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_matfuncs.py
ADDED
@@ -0,0 +1,940 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Sparse matrix functions
|
3 |
+
"""
|
4 |
+
|
5 |
+
#
|
6 |
+
# Authors: Travis Oliphant, March 2002
|
7 |
+
# Anthony Scopatz, August 2012 (Sparse Updates)
|
8 |
+
# Jake Vanderplas, August 2012 (Sparse Updates)
|
9 |
+
#
|
10 |
+
|
11 |
+
__all__ = ['expm', 'inv', 'matrix_power']
|
12 |
+
|
13 |
+
import numpy as np
|
14 |
+
from scipy.linalg._basic import solve, solve_triangular
|
15 |
+
|
16 |
+
from scipy.sparse._base import issparse
|
17 |
+
from scipy.sparse.linalg import spsolve
|
18 |
+
from scipy.sparse._sputils import is_pydata_spmatrix, isintlike
|
19 |
+
|
20 |
+
import scipy.sparse
|
21 |
+
import scipy.sparse.linalg
|
22 |
+
from scipy.sparse.linalg._interface import LinearOperator
|
23 |
+
from scipy.sparse._construct import eye
|
24 |
+
|
25 |
+
from ._expm_multiply import _ident_like, _exact_1_norm as _onenorm
|
26 |
+
|
27 |
+
|
28 |
+
UPPER_TRIANGULAR = 'upper_triangular'
|
29 |
+
|
30 |
+
|
31 |
+
def inv(A):
|
32 |
+
"""
|
33 |
+
Compute the inverse of a sparse matrix
|
34 |
+
|
35 |
+
Parameters
|
36 |
+
----------
|
37 |
+
A : (M, M) sparse matrix
|
38 |
+
square matrix to be inverted
|
39 |
+
|
40 |
+
Returns
|
41 |
+
-------
|
42 |
+
Ainv : (M, M) sparse matrix
|
43 |
+
inverse of `A`
|
44 |
+
|
45 |
+
Notes
|
46 |
+
-----
|
47 |
+
This computes the sparse inverse of `A`. If the inverse of `A` is expected
|
48 |
+
to be non-sparse, it will likely be faster to convert `A` to dense and use
|
49 |
+
`scipy.linalg.inv`.
|
50 |
+
|
51 |
+
Examples
|
52 |
+
--------
|
53 |
+
>>> from scipy.sparse import csc_matrix
|
54 |
+
>>> from scipy.sparse.linalg import inv
|
55 |
+
>>> A = csc_matrix([[1., 0.], [1., 2.]])
|
56 |
+
>>> Ainv = inv(A)
|
57 |
+
>>> Ainv
|
58 |
+
<2x2 sparse matrix of type '<class 'numpy.float64'>'
|
59 |
+
with 3 stored elements in Compressed Sparse Column format>
|
60 |
+
>>> A.dot(Ainv)
|
61 |
+
<2x2 sparse matrix of type '<class 'numpy.float64'>'
|
62 |
+
with 2 stored elements in Compressed Sparse Column format>
|
63 |
+
>>> A.dot(Ainv).toarray()
|
64 |
+
array([[ 1., 0.],
|
65 |
+
[ 0., 1.]])
|
66 |
+
|
67 |
+
.. versionadded:: 0.12.0
|
68 |
+
|
69 |
+
"""
|
70 |
+
# Check input
|
71 |
+
if not (scipy.sparse.issparse(A) or is_pydata_spmatrix(A)):
|
72 |
+
raise TypeError('Input must be a sparse matrix')
|
73 |
+
|
74 |
+
# Use sparse direct solver to solve "AX = I" accurately
|
75 |
+
I = _ident_like(A)
|
76 |
+
Ainv = spsolve(A, I)
|
77 |
+
return Ainv
|
78 |
+
|
79 |
+
|
80 |
+
def _onenorm_matrix_power_nnm(A, p):
|
81 |
+
"""
|
82 |
+
Compute the 1-norm of a non-negative integer power of a non-negative matrix.
|
83 |
+
|
84 |
+
Parameters
|
85 |
+
----------
|
86 |
+
A : a square ndarray or matrix or sparse matrix
|
87 |
+
Input matrix with non-negative entries.
|
88 |
+
p : non-negative integer
|
89 |
+
The power to which the matrix is to be raised.
|
90 |
+
|
91 |
+
Returns
|
92 |
+
-------
|
93 |
+
out : float
|
94 |
+
The 1-norm of the matrix power p of A.
|
95 |
+
|
96 |
+
"""
|
97 |
+
# Check input
|
98 |
+
if int(p) != p or p < 0:
|
99 |
+
raise ValueError('expected non-negative integer p')
|
100 |
+
p = int(p)
|
101 |
+
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
|
102 |
+
raise ValueError('expected A to be like a square matrix')
|
103 |
+
|
104 |
+
# Explicitly make a column vector so that this works when A is a
|
105 |
+
# numpy matrix (in addition to ndarray and sparse matrix).
|
106 |
+
v = np.ones((A.shape[0], 1), dtype=float)
|
107 |
+
M = A.T
|
108 |
+
for i in range(p):
|
109 |
+
v = M.dot(v)
|
110 |
+
return np.max(v)
|
111 |
+
|
112 |
+
|
113 |
+
def _is_upper_triangular(A):
|
114 |
+
# This function could possibly be of wider interest.
|
115 |
+
if issparse(A):
|
116 |
+
lower_part = scipy.sparse.tril(A, -1)
|
117 |
+
# Check structural upper triangularity,
|
118 |
+
# then coincidental upper triangularity if needed.
|
119 |
+
return lower_part.nnz == 0 or lower_part.count_nonzero() == 0
|
120 |
+
elif is_pydata_spmatrix(A):
|
121 |
+
import sparse
|
122 |
+
lower_part = sparse.tril(A, -1)
|
123 |
+
return lower_part.nnz == 0
|
124 |
+
else:
|
125 |
+
return not np.tril(A, -1).any()
|
126 |
+
|
127 |
+
|
128 |
+
def _smart_matrix_product(A, B, alpha=None, structure=None):
|
129 |
+
"""
|
130 |
+
A matrix product that knows about sparse and structured matrices.
|
131 |
+
|
132 |
+
Parameters
|
133 |
+
----------
|
134 |
+
A : 2d ndarray
|
135 |
+
First matrix.
|
136 |
+
B : 2d ndarray
|
137 |
+
Second matrix.
|
138 |
+
alpha : float
|
139 |
+
The matrix product will be scaled by this constant.
|
140 |
+
structure : str, optional
|
141 |
+
A string describing the structure of both matrices `A` and `B`.
|
142 |
+
Only `upper_triangular` is currently supported.
|
143 |
+
|
144 |
+
Returns
|
145 |
+
-------
|
146 |
+
M : 2d ndarray
|
147 |
+
Matrix product of A and B.
|
148 |
+
|
149 |
+
"""
|
150 |
+
if len(A.shape) != 2:
|
151 |
+
raise ValueError('expected A to be a rectangular matrix')
|
152 |
+
if len(B.shape) != 2:
|
153 |
+
raise ValueError('expected B to be a rectangular matrix')
|
154 |
+
f = None
|
155 |
+
if structure == UPPER_TRIANGULAR:
|
156 |
+
if (not issparse(A) and not issparse(B)
|
157 |
+
and not is_pydata_spmatrix(A) and not is_pydata_spmatrix(B)):
|
158 |
+
f, = scipy.linalg.get_blas_funcs(('trmm',), (A, B))
|
159 |
+
if f is not None:
|
160 |
+
if alpha is None:
|
161 |
+
alpha = 1.
|
162 |
+
out = f(alpha, A, B)
|
163 |
+
else:
|
164 |
+
if alpha is None:
|
165 |
+
out = A.dot(B)
|
166 |
+
else:
|
167 |
+
out = alpha * A.dot(B)
|
168 |
+
return out
|
169 |
+
|
170 |
+
|
171 |
+
class MatrixPowerOperator(LinearOperator):
|
172 |
+
|
173 |
+
def __init__(self, A, p, structure=None):
|
174 |
+
if A.ndim != 2 or A.shape[0] != A.shape[1]:
|
175 |
+
raise ValueError('expected A to be like a square matrix')
|
176 |
+
if p < 0:
|
177 |
+
raise ValueError('expected p to be a non-negative integer')
|
178 |
+
self._A = A
|
179 |
+
self._p = p
|
180 |
+
self._structure = structure
|
181 |
+
self.dtype = A.dtype
|
182 |
+
self.ndim = A.ndim
|
183 |
+
self.shape = A.shape
|
184 |
+
|
185 |
+
def _matvec(self, x):
|
186 |
+
for i in range(self._p):
|
187 |
+
x = self._A.dot(x)
|
188 |
+
return x
|
189 |
+
|
190 |
+
def _rmatvec(self, x):
|
191 |
+
A_T = self._A.T
|
192 |
+
x = x.ravel()
|
193 |
+
for i in range(self._p):
|
194 |
+
x = A_T.dot(x)
|
195 |
+
return x
|
196 |
+
|
197 |
+
def _matmat(self, X):
|
198 |
+
for i in range(self._p):
|
199 |
+
X = _smart_matrix_product(self._A, X, structure=self._structure)
|
200 |
+
return X
|
201 |
+
|
202 |
+
@property
|
203 |
+
def T(self):
|
204 |
+
return MatrixPowerOperator(self._A.T, self._p)
|
205 |
+
|
206 |
+
|
207 |
+
class ProductOperator(LinearOperator):
|
208 |
+
"""
|
209 |
+
For now, this is limited to products of multiple square matrices.
|
210 |
+
"""
|
211 |
+
|
212 |
+
def __init__(self, *args, **kwargs):
|
213 |
+
self._structure = kwargs.get('structure', None)
|
214 |
+
for A in args:
|
215 |
+
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
|
216 |
+
raise ValueError(
|
217 |
+
'For now, the ProductOperator implementation is '
|
218 |
+
'limited to the product of multiple square matrices.')
|
219 |
+
if args:
|
220 |
+
n = args[0].shape[0]
|
221 |
+
for A in args:
|
222 |
+
for d in A.shape:
|
223 |
+
if d != n:
|
224 |
+
raise ValueError(
|
225 |
+
'The square matrices of the ProductOperator '
|
226 |
+
'must all have the same shape.')
|
227 |
+
self.shape = (n, n)
|
228 |
+
self.ndim = len(self.shape)
|
229 |
+
self.dtype = np.result_type(*[x.dtype for x in args])
|
230 |
+
self._operator_sequence = args
|
231 |
+
|
232 |
+
def _matvec(self, x):
|
233 |
+
for A in reversed(self._operator_sequence):
|
234 |
+
x = A.dot(x)
|
235 |
+
return x
|
236 |
+
|
237 |
+
def _rmatvec(self, x):
|
238 |
+
x = x.ravel()
|
239 |
+
for A in self._operator_sequence:
|
240 |
+
x = A.T.dot(x)
|
241 |
+
return x
|
242 |
+
|
243 |
+
def _matmat(self, X):
|
244 |
+
for A in reversed(self._operator_sequence):
|
245 |
+
X = _smart_matrix_product(A, X, structure=self._structure)
|
246 |
+
return X
|
247 |
+
|
248 |
+
@property
|
249 |
+
def T(self):
|
250 |
+
T_args = [A.T for A in reversed(self._operator_sequence)]
|
251 |
+
return ProductOperator(*T_args)
|
252 |
+
|
253 |
+
|
254 |
+
def _onenormest_matrix_power(A, p,
|
255 |
+
t=2, itmax=5, compute_v=False, compute_w=False, structure=None):
|
256 |
+
"""
|
257 |
+
Efficiently estimate the 1-norm of A^p.
|
258 |
+
|
259 |
+
Parameters
|
260 |
+
----------
|
261 |
+
A : ndarray
|
262 |
+
Matrix whose 1-norm of a power is to be computed.
|
263 |
+
p : int
|
264 |
+
Non-negative integer power.
|
265 |
+
t : int, optional
|
266 |
+
A positive parameter controlling the tradeoff between
|
267 |
+
accuracy versus time and memory usage.
|
268 |
+
Larger values take longer and use more memory
|
269 |
+
but give more accurate output.
|
270 |
+
itmax : int, optional
|
271 |
+
Use at most this many iterations.
|
272 |
+
compute_v : bool, optional
|
273 |
+
Request a norm-maximizing linear operator input vector if True.
|
274 |
+
compute_w : bool, optional
|
275 |
+
Request a norm-maximizing linear operator output vector if True.
|
276 |
+
|
277 |
+
Returns
|
278 |
+
-------
|
279 |
+
est : float
|
280 |
+
An underestimate of the 1-norm of the sparse matrix.
|
281 |
+
v : ndarray, optional
|
282 |
+
The vector such that ||Av||_1 == est*||v||_1.
|
283 |
+
It can be thought of as an input to the linear operator
|
284 |
+
that gives an output with particularly large norm.
|
285 |
+
w : ndarray, optional
|
286 |
+
The vector Av which has relatively large 1-norm.
|
287 |
+
It can be thought of as an output of the linear operator
|
288 |
+
that is relatively large in norm compared to the input.
|
289 |
+
|
290 |
+
"""
|
291 |
+
return scipy.sparse.linalg.onenormest(
|
292 |
+
MatrixPowerOperator(A, p, structure=structure))
|
293 |
+
|
294 |
+
|
295 |
+
def _onenormest_product(operator_seq,
|
296 |
+
t=2, itmax=5, compute_v=False, compute_w=False, structure=None):
|
297 |
+
"""
|
298 |
+
Efficiently estimate the 1-norm of the matrix product of the args.
|
299 |
+
|
300 |
+
Parameters
|
301 |
+
----------
|
302 |
+
operator_seq : linear operator sequence
|
303 |
+
Matrices whose 1-norm of product is to be computed.
|
304 |
+
t : int, optional
|
305 |
+
A positive parameter controlling the tradeoff between
|
306 |
+
accuracy versus time and memory usage.
|
307 |
+
Larger values take longer and use more memory
|
308 |
+
but give more accurate output.
|
309 |
+
itmax : int, optional
|
310 |
+
Use at most this many iterations.
|
311 |
+
compute_v : bool, optional
|
312 |
+
Request a norm-maximizing linear operator input vector if True.
|
313 |
+
compute_w : bool, optional
|
314 |
+
Request a norm-maximizing linear operator output vector if True.
|
315 |
+
structure : str, optional
|
316 |
+
A string describing the structure of all operators.
|
317 |
+
Only `upper_triangular` is currently supported.
|
318 |
+
|
319 |
+
Returns
|
320 |
+
-------
|
321 |
+
est : float
|
322 |
+
An underestimate of the 1-norm of the sparse matrix.
|
323 |
+
v : ndarray, optional
|
324 |
+
The vector such that ||Av||_1 == est*||v||_1.
|
325 |
+
It can be thought of as an input to the linear operator
|
326 |
+
that gives an output with particularly large norm.
|
327 |
+
w : ndarray, optional
|
328 |
+
The vector Av which has relatively large 1-norm.
|
329 |
+
It can be thought of as an output of the linear operator
|
330 |
+
that is relatively large in norm compared to the input.
|
331 |
+
|
332 |
+
"""
|
333 |
+
return scipy.sparse.linalg.onenormest(
|
334 |
+
ProductOperator(*operator_seq, structure=structure))
|
335 |
+
|
336 |
+
|
337 |
+
class _ExpmPadeHelper:
|
338 |
+
"""
|
339 |
+
Help lazily evaluate a matrix exponential.
|
340 |
+
|
341 |
+
The idea is to not do more work than we need for high expm precision,
|
342 |
+
so we lazily compute matrix powers and store or precompute
|
343 |
+
other properties of the matrix.
|
344 |
+
|
345 |
+
"""
|
346 |
+
|
347 |
+
def __init__(self, A, structure=None, use_exact_onenorm=False):
|
348 |
+
"""
|
349 |
+
Initialize the object.
|
350 |
+
|
351 |
+
Parameters
|
352 |
+
----------
|
353 |
+
A : a dense or sparse square numpy matrix or ndarray
|
354 |
+
The matrix to be exponentiated.
|
355 |
+
structure : str, optional
|
356 |
+
A string describing the structure of matrix `A`.
|
357 |
+
Only `upper_triangular` is currently supported.
|
358 |
+
use_exact_onenorm : bool, optional
|
359 |
+
If True then only the exact one-norm of matrix powers and products
|
360 |
+
will be used. Otherwise, the one-norm of powers and products
|
361 |
+
may initially be estimated.
|
362 |
+
"""
|
363 |
+
self.A = A
|
364 |
+
self._A2 = None
|
365 |
+
self._A4 = None
|
366 |
+
self._A6 = None
|
367 |
+
self._A8 = None
|
368 |
+
self._A10 = None
|
369 |
+
self._d4_exact = None
|
370 |
+
self._d6_exact = None
|
371 |
+
self._d8_exact = None
|
372 |
+
self._d10_exact = None
|
373 |
+
self._d4_approx = None
|
374 |
+
self._d6_approx = None
|
375 |
+
self._d8_approx = None
|
376 |
+
self._d10_approx = None
|
377 |
+
self.ident = _ident_like(A)
|
378 |
+
self.structure = structure
|
379 |
+
self.use_exact_onenorm = use_exact_onenorm
|
380 |
+
|
381 |
+
@property
|
382 |
+
def A2(self):
|
383 |
+
if self._A2 is None:
|
384 |
+
self._A2 = _smart_matrix_product(
|
385 |
+
self.A, self.A, structure=self.structure)
|
386 |
+
return self._A2
|
387 |
+
|
388 |
+
@property
|
389 |
+
def A4(self):
|
390 |
+
if self._A4 is None:
|
391 |
+
self._A4 = _smart_matrix_product(
|
392 |
+
self.A2, self.A2, structure=self.structure)
|
393 |
+
return self._A4
|
394 |
+
|
395 |
+
@property
|
396 |
+
def A6(self):
|
397 |
+
if self._A6 is None:
|
398 |
+
self._A6 = _smart_matrix_product(
|
399 |
+
self.A4, self.A2, structure=self.structure)
|
400 |
+
return self._A6
|
401 |
+
|
402 |
+
@property
|
403 |
+
def A8(self):
|
404 |
+
if self._A8 is None:
|
405 |
+
self._A8 = _smart_matrix_product(
|
406 |
+
self.A6, self.A2, structure=self.structure)
|
407 |
+
return self._A8
|
408 |
+
|
409 |
+
@property
|
410 |
+
def A10(self):
|
411 |
+
if self._A10 is None:
|
412 |
+
self._A10 = _smart_matrix_product(
|
413 |
+
self.A4, self.A6, structure=self.structure)
|
414 |
+
return self._A10
|
415 |
+
|
416 |
+
@property
|
417 |
+
def d4_tight(self):
|
418 |
+
if self._d4_exact is None:
|
419 |
+
self._d4_exact = _onenorm(self.A4)**(1/4.)
|
420 |
+
return self._d4_exact
|
421 |
+
|
422 |
+
@property
|
423 |
+
def d6_tight(self):
|
424 |
+
if self._d6_exact is None:
|
425 |
+
self._d6_exact = _onenorm(self.A6)**(1/6.)
|
426 |
+
return self._d6_exact
|
427 |
+
|
428 |
+
@property
|
429 |
+
def d8_tight(self):
|
430 |
+
if self._d8_exact is None:
|
431 |
+
self._d8_exact = _onenorm(self.A8)**(1/8.)
|
432 |
+
return self._d8_exact
|
433 |
+
|
434 |
+
@property
|
435 |
+
def d10_tight(self):
|
436 |
+
if self._d10_exact is None:
|
437 |
+
self._d10_exact = _onenorm(self.A10)**(1/10.)
|
438 |
+
return self._d10_exact
|
439 |
+
|
440 |
+
@property
|
441 |
+
def d4_loose(self):
|
442 |
+
if self.use_exact_onenorm:
|
443 |
+
return self.d4_tight
|
444 |
+
if self._d4_exact is not None:
|
445 |
+
return self._d4_exact
|
446 |
+
else:
|
447 |
+
if self._d4_approx is None:
|
448 |
+
self._d4_approx = _onenormest_matrix_power(self.A2, 2,
|
449 |
+
structure=self.structure)**(1/4.)
|
450 |
+
return self._d4_approx
|
451 |
+
|
452 |
+
@property
|
453 |
+
def d6_loose(self):
|
454 |
+
if self.use_exact_onenorm:
|
455 |
+
return self.d6_tight
|
456 |
+
if self._d6_exact is not None:
|
457 |
+
return self._d6_exact
|
458 |
+
else:
|
459 |
+
if self._d6_approx is None:
|
460 |
+
self._d6_approx = _onenormest_matrix_power(self.A2, 3,
|
461 |
+
structure=self.structure)**(1/6.)
|
462 |
+
return self._d6_approx
|
463 |
+
|
464 |
+
@property
|
465 |
+
def d8_loose(self):
|
466 |
+
if self.use_exact_onenorm:
|
467 |
+
return self.d8_tight
|
468 |
+
if self._d8_exact is not None:
|
469 |
+
return self._d8_exact
|
470 |
+
else:
|
471 |
+
if self._d8_approx is None:
|
472 |
+
self._d8_approx = _onenormest_matrix_power(self.A4, 2,
|
473 |
+
structure=self.structure)**(1/8.)
|
474 |
+
return self._d8_approx
|
475 |
+
|
476 |
+
@property
|
477 |
+
def d10_loose(self):
|
478 |
+
if self.use_exact_onenorm:
|
479 |
+
return self.d10_tight
|
480 |
+
if self._d10_exact is not None:
|
481 |
+
return self._d10_exact
|
482 |
+
else:
|
483 |
+
if self._d10_approx is None:
|
484 |
+
self._d10_approx = _onenormest_product((self.A4, self.A6),
|
485 |
+
structure=self.structure)**(1/10.)
|
486 |
+
return self._d10_approx
|
487 |
+
|
488 |
+
def pade3(self):
|
489 |
+
b = (120., 60., 12., 1.)
|
490 |
+
U = _smart_matrix_product(self.A,
|
491 |
+
b[3]*self.A2 + b[1]*self.ident,
|
492 |
+
structure=self.structure)
|
493 |
+
V = b[2]*self.A2 + b[0]*self.ident
|
494 |
+
return U, V
|
495 |
+
|
496 |
+
def pade5(self):
|
497 |
+
b = (30240., 15120., 3360., 420., 30., 1.)
|
498 |
+
U = _smart_matrix_product(self.A,
|
499 |
+
b[5]*self.A4 + b[3]*self.A2 + b[1]*self.ident,
|
500 |
+
structure=self.structure)
|
501 |
+
V = b[4]*self.A4 + b[2]*self.A2 + b[0]*self.ident
|
502 |
+
return U, V
|
503 |
+
|
504 |
+
def pade7(self):
|
505 |
+
b = (17297280., 8648640., 1995840., 277200., 25200., 1512., 56., 1.)
|
506 |
+
U = _smart_matrix_product(self.A,
|
507 |
+
b[7]*self.A6 + b[5]*self.A4 + b[3]*self.A2 + b[1]*self.ident,
|
508 |
+
structure=self.structure)
|
509 |
+
V = b[6]*self.A6 + b[4]*self.A4 + b[2]*self.A2 + b[0]*self.ident
|
510 |
+
return U, V
|
511 |
+
|
512 |
+
def pade9(self):
|
513 |
+
b = (17643225600., 8821612800., 2075673600., 302702400., 30270240.,
|
514 |
+
2162160., 110880., 3960., 90., 1.)
|
515 |
+
U = _smart_matrix_product(self.A,
|
516 |
+
(b[9]*self.A8 + b[7]*self.A6 + b[5]*self.A4 +
|
517 |
+
b[3]*self.A2 + b[1]*self.ident),
|
518 |
+
structure=self.structure)
|
519 |
+
V = (b[8]*self.A8 + b[6]*self.A6 + b[4]*self.A4 +
|
520 |
+
b[2]*self.A2 + b[0]*self.ident)
|
521 |
+
return U, V
|
522 |
+
|
523 |
+
def pade13_scaled(self, s):
|
524 |
+
b = (64764752532480000., 32382376266240000., 7771770303897600.,
|
525 |
+
1187353796428800., 129060195264000., 10559470521600.,
|
526 |
+
670442572800., 33522128640., 1323241920., 40840800., 960960.,
|
527 |
+
16380., 182., 1.)
|
528 |
+
B = self.A * 2**-s
|
529 |
+
B2 = self.A2 * 2**(-2*s)
|
530 |
+
B4 = self.A4 * 2**(-4*s)
|
531 |
+
B6 = self.A6 * 2**(-6*s)
|
532 |
+
U2 = _smart_matrix_product(B6,
|
533 |
+
b[13]*B6 + b[11]*B4 + b[9]*B2,
|
534 |
+
structure=self.structure)
|
535 |
+
U = _smart_matrix_product(B,
|
536 |
+
(U2 + b[7]*B6 + b[5]*B4 +
|
537 |
+
b[3]*B2 + b[1]*self.ident),
|
538 |
+
structure=self.structure)
|
539 |
+
V2 = _smart_matrix_product(B6,
|
540 |
+
b[12]*B6 + b[10]*B4 + b[8]*B2,
|
541 |
+
structure=self.structure)
|
542 |
+
V = V2 + b[6]*B6 + b[4]*B4 + b[2]*B2 + b[0]*self.ident
|
543 |
+
return U, V
|
544 |
+
|
545 |
+
|
546 |
+
def expm(A):
|
547 |
+
"""
|
548 |
+
Compute the matrix exponential using Pade approximation.
|
549 |
+
|
550 |
+
Parameters
|
551 |
+
----------
|
552 |
+
A : (M,M) array_like or sparse matrix
|
553 |
+
2D Array or Matrix (sparse or dense) to be exponentiated
|
554 |
+
|
555 |
+
Returns
|
556 |
+
-------
|
557 |
+
expA : (M,M) ndarray
|
558 |
+
Matrix exponential of `A`
|
559 |
+
|
560 |
+
Notes
|
561 |
+
-----
|
562 |
+
This is algorithm (6.1) which is a simplification of algorithm (5.1).
|
563 |
+
|
564 |
+
.. versionadded:: 0.12.0
|
565 |
+
|
566 |
+
References
|
567 |
+
----------
|
568 |
+
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2009)
|
569 |
+
"A New Scaling and Squaring Algorithm for the Matrix Exponential."
|
570 |
+
SIAM Journal on Matrix Analysis and Applications.
|
571 |
+
31 (3). pp. 970-989. ISSN 1095-7162
|
572 |
+
|
573 |
+
Examples
|
574 |
+
--------
|
575 |
+
>>> from scipy.sparse import csc_matrix
|
576 |
+
>>> from scipy.sparse.linalg import expm
|
577 |
+
>>> A = csc_matrix([[1, 0, 0], [0, 2, 0], [0, 0, 3]])
|
578 |
+
>>> A.toarray()
|
579 |
+
array([[1, 0, 0],
|
580 |
+
[0, 2, 0],
|
581 |
+
[0, 0, 3]], dtype=int64)
|
582 |
+
>>> Aexp = expm(A)
|
583 |
+
>>> Aexp
|
584 |
+
<3x3 sparse matrix of type '<class 'numpy.float64'>'
|
585 |
+
with 3 stored elements in Compressed Sparse Column format>
|
586 |
+
>>> Aexp.toarray()
|
587 |
+
array([[ 2.71828183, 0. , 0. ],
|
588 |
+
[ 0. , 7.3890561 , 0. ],
|
589 |
+
[ 0. , 0. , 20.08553692]])
|
590 |
+
"""
|
591 |
+
return _expm(A, use_exact_onenorm='auto')
|
592 |
+
|
593 |
+
|
594 |
+
def _expm(A, use_exact_onenorm):
|
595 |
+
# Core of expm, separated to allow testing exact and approximate
|
596 |
+
# algorithms.
|
597 |
+
|
598 |
+
# Avoid indiscriminate asarray() to allow sparse or other strange arrays.
|
599 |
+
if isinstance(A, (list, tuple, np.matrix)):
|
600 |
+
A = np.asarray(A)
|
601 |
+
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
|
602 |
+
raise ValueError('expected a square matrix')
|
603 |
+
|
604 |
+
# gracefully handle size-0 input,
|
605 |
+
# carefully handling sparse scenario
|
606 |
+
if A.shape == (0, 0):
|
607 |
+
out = np.zeros([0, 0], dtype=A.dtype)
|
608 |
+
if issparse(A) or is_pydata_spmatrix(A):
|
609 |
+
return A.__class__(out)
|
610 |
+
return out
|
611 |
+
|
612 |
+
# Trivial case
|
613 |
+
if A.shape == (1, 1):
|
614 |
+
out = [[np.exp(A[0, 0])]]
|
615 |
+
|
616 |
+
# Avoid indiscriminate casting to ndarray to
|
617 |
+
# allow for sparse or other strange arrays
|
618 |
+
if issparse(A) or is_pydata_spmatrix(A):
|
619 |
+
return A.__class__(out)
|
620 |
+
|
621 |
+
return np.array(out)
|
622 |
+
|
623 |
+
# Ensure input is of float type, to avoid integer overflows etc.
|
624 |
+
if ((isinstance(A, np.ndarray) or issparse(A) or is_pydata_spmatrix(A))
|
625 |
+
and not np.issubdtype(A.dtype, np.inexact)):
|
626 |
+
A = A.astype(float)
|
627 |
+
|
628 |
+
# Detect upper triangularity.
|
629 |
+
structure = UPPER_TRIANGULAR if _is_upper_triangular(A) else None
|
630 |
+
|
631 |
+
if use_exact_onenorm == "auto":
|
632 |
+
# Hardcode a matrix order threshold for exact vs. estimated one-norms.
|
633 |
+
use_exact_onenorm = A.shape[0] < 200
|
634 |
+
|
635 |
+
# Track functions of A to help compute the matrix exponential.
|
636 |
+
h = _ExpmPadeHelper(
|
637 |
+
A, structure=structure, use_exact_onenorm=use_exact_onenorm)
|
638 |
+
|
639 |
+
# Try Pade order 3.
|
640 |
+
eta_1 = max(h.d4_loose, h.d6_loose)
|
641 |
+
if eta_1 < 1.495585217958292e-002 and _ell(h.A, 3) == 0:
|
642 |
+
U, V = h.pade3()
|
643 |
+
return _solve_P_Q(U, V, structure=structure)
|
644 |
+
|
645 |
+
# Try Pade order 5.
|
646 |
+
eta_2 = max(h.d4_tight, h.d6_loose)
|
647 |
+
if eta_2 < 2.539398330063230e-001 and _ell(h.A, 5) == 0:
|
648 |
+
U, V = h.pade5()
|
649 |
+
return _solve_P_Q(U, V, structure=structure)
|
650 |
+
|
651 |
+
# Try Pade orders 7 and 9.
|
652 |
+
eta_3 = max(h.d6_tight, h.d8_loose)
|
653 |
+
if eta_3 < 9.504178996162932e-001 and _ell(h.A, 7) == 0:
|
654 |
+
U, V = h.pade7()
|
655 |
+
return _solve_P_Q(U, V, structure=structure)
|
656 |
+
if eta_3 < 2.097847961257068e+000 and _ell(h.A, 9) == 0:
|
657 |
+
U, V = h.pade9()
|
658 |
+
return _solve_P_Q(U, V, structure=structure)
|
659 |
+
|
660 |
+
# Use Pade order 13.
|
661 |
+
eta_4 = max(h.d8_loose, h.d10_loose)
|
662 |
+
eta_5 = min(eta_3, eta_4)
|
663 |
+
theta_13 = 4.25
|
664 |
+
|
665 |
+
# Choose smallest s>=0 such that 2**(-s) eta_5 <= theta_13
|
666 |
+
if eta_5 == 0:
|
667 |
+
# Nilpotent special case
|
668 |
+
s = 0
|
669 |
+
else:
|
670 |
+
s = max(int(np.ceil(np.log2(eta_5 / theta_13))), 0)
|
671 |
+
s = s + _ell(2**-s * h.A, 13)
|
672 |
+
U, V = h.pade13_scaled(s)
|
673 |
+
X = _solve_P_Q(U, V, structure=structure)
|
674 |
+
if structure == UPPER_TRIANGULAR:
|
675 |
+
# Invoke Code Fragment 2.1.
|
676 |
+
X = _fragment_2_1(X, h.A, s)
|
677 |
+
else:
|
678 |
+
# X = r_13(A)^(2^s) by repeated squaring.
|
679 |
+
for i in range(s):
|
680 |
+
X = X.dot(X)
|
681 |
+
return X
|
682 |
+
|
683 |
+
|
684 |
+
def _solve_P_Q(U, V, structure=None):
|
685 |
+
"""
|
686 |
+
A helper function for expm_2009.
|
687 |
+
|
688 |
+
Parameters
|
689 |
+
----------
|
690 |
+
U : ndarray
|
691 |
+
Pade numerator.
|
692 |
+
V : ndarray
|
693 |
+
Pade denominator.
|
694 |
+
structure : str, optional
|
695 |
+
A string describing the structure of both matrices `U` and `V`.
|
696 |
+
Only `upper_triangular` is currently supported.
|
697 |
+
|
698 |
+
Notes
|
699 |
+
-----
|
700 |
+
The `structure` argument is inspired by similar args
|
701 |
+
for theano and cvxopt functions.
|
702 |
+
|
703 |
+
"""
|
704 |
+
P = U + V
|
705 |
+
Q = -U + V
|
706 |
+
if issparse(U) or is_pydata_spmatrix(U):
|
707 |
+
return spsolve(Q, P)
|
708 |
+
elif structure is None:
|
709 |
+
return solve(Q, P)
|
710 |
+
elif structure == UPPER_TRIANGULAR:
|
711 |
+
return solve_triangular(Q, P)
|
712 |
+
else:
|
713 |
+
raise ValueError('unsupported matrix structure: ' + str(structure))
|
714 |
+
|
715 |
+
|
716 |
+
def _exp_sinch(a, x):
|
717 |
+
"""
|
718 |
+
Stably evaluate exp(a)*sinh(x)/x
|
719 |
+
|
720 |
+
Notes
|
721 |
+
-----
|
722 |
+
The strategy of falling back to a sixth order Taylor expansion
|
723 |
+
was suggested by the Spallation Neutron Source docs
|
724 |
+
which was found on the internet by google search.
|
725 |
+
http://www.ornl.gov/~t6p/resources/xal/javadoc/gov/sns/tools/math/ElementaryFunction.html
|
726 |
+
The details of the cutoff point and the Horner-like evaluation
|
727 |
+
was picked without reference to anything in particular.
|
728 |
+
|
729 |
+
Note that sinch is not currently implemented in scipy.special,
|
730 |
+
whereas the "engineer's" definition of sinc is implemented.
|
731 |
+
The implementation of sinc involves a scaling factor of pi
|
732 |
+
that distinguishes it from the "mathematician's" version of sinc.
|
733 |
+
|
734 |
+
"""
|
735 |
+
|
736 |
+
# If x is small then use sixth order Taylor expansion.
|
737 |
+
# How small is small? I am using the point where the relative error
|
738 |
+
# of the approximation is less than 1e-14.
|
739 |
+
# If x is large then directly evaluate sinh(x) / x.
|
740 |
+
if abs(x) < 0.0135:
|
741 |
+
x2 = x*x
|
742 |
+
return np.exp(a) * (1 + (x2/6.)*(1 + (x2/20.)*(1 + (x2/42.))))
|
743 |
+
else:
|
744 |
+
return (np.exp(a + x) - np.exp(a - x)) / (2*x)
|
745 |
+
|
746 |
+
|
747 |
+
def _eq_10_42(lam_1, lam_2, t_12):
|
748 |
+
"""
|
749 |
+
Equation (10.42) of Functions of Matrices: Theory and Computation.
|
750 |
+
|
751 |
+
Notes
|
752 |
+
-----
|
753 |
+
This is a helper function for _fragment_2_1 of expm_2009.
|
754 |
+
Equation (10.42) is on page 251 in the section on Schur algorithms.
|
755 |
+
In particular, section 10.4.3 explains the Schur-Parlett algorithm.
|
756 |
+
expm([[lam_1, t_12], [0, lam_1])
|
757 |
+
=
|
758 |
+
[[exp(lam_1), t_12*exp((lam_1 + lam_2)/2)*sinch((lam_1 - lam_2)/2)],
|
759 |
+
[0, exp(lam_2)]
|
760 |
+
"""
|
761 |
+
|
762 |
+
# The plain formula t_12 * (exp(lam_2) - exp(lam_2)) / (lam_2 - lam_1)
|
763 |
+
# apparently suffers from cancellation, according to Higham's textbook.
|
764 |
+
# A nice implementation of sinch, defined as sinh(x)/x,
|
765 |
+
# will apparently work around the cancellation.
|
766 |
+
a = 0.5 * (lam_1 + lam_2)
|
767 |
+
b = 0.5 * (lam_1 - lam_2)
|
768 |
+
return t_12 * _exp_sinch(a, b)
|
769 |
+
|
770 |
+
|
771 |
+
def _fragment_2_1(X, T, s):
|
772 |
+
"""
|
773 |
+
A helper function for expm_2009.
|
774 |
+
|
775 |
+
Notes
|
776 |
+
-----
|
777 |
+
The argument X is modified in-place, but this modification is not the same
|
778 |
+
as the returned value of the function.
|
779 |
+
This function also takes pains to do things in ways that are compatible
|
780 |
+
with sparse matrices, for example by avoiding fancy indexing
|
781 |
+
and by using methods of the matrices whenever possible instead of
|
782 |
+
using functions of the numpy or scipy libraries themselves.
|
783 |
+
|
784 |
+
"""
|
785 |
+
# Form X = r_m(2^-s T)
|
786 |
+
# Replace diag(X) by exp(2^-s diag(T)).
|
787 |
+
n = X.shape[0]
|
788 |
+
diag_T = np.ravel(T.diagonal().copy())
|
789 |
+
|
790 |
+
# Replace diag(X) by exp(2^-s diag(T)).
|
791 |
+
scale = 2 ** -s
|
792 |
+
exp_diag = np.exp(scale * diag_T)
|
793 |
+
for k in range(n):
|
794 |
+
X[k, k] = exp_diag[k]
|
795 |
+
|
796 |
+
for i in range(s-1, -1, -1):
|
797 |
+
X = X.dot(X)
|
798 |
+
|
799 |
+
# Replace diag(X) by exp(2^-i diag(T)).
|
800 |
+
scale = 2 ** -i
|
801 |
+
exp_diag = np.exp(scale * diag_T)
|
802 |
+
for k in range(n):
|
803 |
+
X[k, k] = exp_diag[k]
|
804 |
+
|
805 |
+
# Replace (first) superdiagonal of X by explicit formula
|
806 |
+
# for superdiagonal of exp(2^-i T) from Eq (10.42) of
|
807 |
+
# the author's 2008 textbook
|
808 |
+
# Functions of Matrices: Theory and Computation.
|
809 |
+
for k in range(n-1):
|
810 |
+
lam_1 = scale * diag_T[k]
|
811 |
+
lam_2 = scale * diag_T[k+1]
|
812 |
+
t_12 = scale * T[k, k+1]
|
813 |
+
value = _eq_10_42(lam_1, lam_2, t_12)
|
814 |
+
X[k, k+1] = value
|
815 |
+
|
816 |
+
# Return the updated X matrix.
|
817 |
+
return X
|
818 |
+
|
819 |
+
|
820 |
+
def _ell(A, m):
|
821 |
+
"""
|
822 |
+
A helper function for expm_2009.
|
823 |
+
|
824 |
+
Parameters
|
825 |
+
----------
|
826 |
+
A : linear operator
|
827 |
+
A linear operator whose norm of power we care about.
|
828 |
+
m : int
|
829 |
+
The power of the linear operator
|
830 |
+
|
831 |
+
Returns
|
832 |
+
-------
|
833 |
+
value : int
|
834 |
+
A value related to a bound.
|
835 |
+
|
836 |
+
"""
|
837 |
+
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
|
838 |
+
raise ValueError('expected A to be like a square matrix')
|
839 |
+
|
840 |
+
# The c_i are explained in (2.2) and (2.6) of the 2005 expm paper.
|
841 |
+
# They are coefficients of terms of a generating function series expansion.
|
842 |
+
c_i = {3: 100800.,
|
843 |
+
5: 10059033600.,
|
844 |
+
7: 4487938430976000.,
|
845 |
+
9: 5914384781877411840000.,
|
846 |
+
13: 113250775606021113483283660800000000.
|
847 |
+
}
|
848 |
+
abs_c_recip = c_i[m]
|
849 |
+
|
850 |
+
# This is explained after Eq. (1.2) of the 2009 expm paper.
|
851 |
+
# It is the "unit roundoff" of IEEE double precision arithmetic.
|
852 |
+
u = 2**-53
|
853 |
+
|
854 |
+
# Compute the one-norm of matrix power p of abs(A).
|
855 |
+
A_abs_onenorm = _onenorm_matrix_power_nnm(abs(A), 2*m + 1)
|
856 |
+
|
857 |
+
# Treat zero norm as a special case.
|
858 |
+
if not A_abs_onenorm:
|
859 |
+
return 0
|
860 |
+
|
861 |
+
alpha = A_abs_onenorm / (_onenorm(A) * abs_c_recip)
|
862 |
+
log2_alpha_div_u = np.log2(alpha/u)
|
863 |
+
value = int(np.ceil(log2_alpha_div_u / (2 * m)))
|
864 |
+
return max(value, 0)
|
865 |
+
|
866 |
+
def matrix_power(A, power):
|
867 |
+
"""
|
868 |
+
Raise a square matrix to the integer power, `power`.
|
869 |
+
|
870 |
+
For non-negative integers, ``A**power`` is computed using repeated
|
871 |
+
matrix multiplications. Negative integers are not supported.
|
872 |
+
|
873 |
+
Parameters
|
874 |
+
----------
|
875 |
+
A : (M, M) square sparse array or matrix
|
876 |
+
sparse array that will be raised to power `power`
|
877 |
+
power : int
|
878 |
+
Exponent used to raise sparse array `A`
|
879 |
+
|
880 |
+
Returns
|
881 |
+
-------
|
882 |
+
A**power : (M, M) sparse array or matrix
|
883 |
+
The output matrix will be the same shape as A, and will preserve
|
884 |
+
the class of A, but the format of the output may be changed.
|
885 |
+
|
886 |
+
Notes
|
887 |
+
-----
|
888 |
+
This uses a recursive implementation of the matrix power. For computing
|
889 |
+
the matrix power using a reasonably large `power`, this may be less efficient
|
890 |
+
than computing the product directly, using A @ A @ ... @ A.
|
891 |
+
This is contingent upon the number of nonzero entries in the matrix.
|
892 |
+
|
893 |
+
.. versionadded:: 1.12.0
|
894 |
+
|
895 |
+
Examples
|
896 |
+
--------
|
897 |
+
>>> from scipy import sparse
|
898 |
+
>>> A = sparse.csc_array([[0,1,0],[1,0,1],[0,1,0]])
|
899 |
+
>>> A.todense()
|
900 |
+
array([[0, 1, 0],
|
901 |
+
[1, 0, 1],
|
902 |
+
[0, 1, 0]])
|
903 |
+
>>> (A @ A).todense()
|
904 |
+
array([[1, 0, 1],
|
905 |
+
[0, 2, 0],
|
906 |
+
[1, 0, 1]])
|
907 |
+
>>> A2 = sparse.linalg.matrix_power(A, 2)
|
908 |
+
>>> A2.todense()
|
909 |
+
array([[1, 0, 1],
|
910 |
+
[0, 2, 0],
|
911 |
+
[1, 0, 1]])
|
912 |
+
>>> A4 = sparse.linalg.matrix_power(A, 4)
|
913 |
+
>>> A4.todense()
|
914 |
+
array([[2, 0, 2],
|
915 |
+
[0, 4, 0],
|
916 |
+
[2, 0, 2]])
|
917 |
+
|
918 |
+
"""
|
919 |
+
M, N = A.shape
|
920 |
+
if M != N:
|
921 |
+
raise TypeError('sparse matrix is not square')
|
922 |
+
|
923 |
+
if isintlike(power):
|
924 |
+
power = int(power)
|
925 |
+
if power < 0:
|
926 |
+
raise ValueError('exponent must be >= 0')
|
927 |
+
|
928 |
+
if power == 0:
|
929 |
+
return eye(M, dtype=A.dtype)
|
930 |
+
|
931 |
+
if power == 1:
|
932 |
+
return A.copy()
|
933 |
+
|
934 |
+
tmp = matrix_power(A, power // 2)
|
935 |
+
if power % 2:
|
936 |
+
return A @ tmp @ tmp
|
937 |
+
else:
|
938 |
+
return tmp @ tmp
|
939 |
+
else:
|
940 |
+
raise ValueError("exponent must be an integer")
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_norm.py
ADDED
@@ -0,0 +1,193 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Sparse matrix norms.
|
2 |
+
|
3 |
+
"""
|
4 |
+
import numpy as np
|
5 |
+
from scipy.sparse import issparse
|
6 |
+
from scipy.sparse.linalg import svds
|
7 |
+
import scipy.sparse as sp
|
8 |
+
|
9 |
+
from numpy import sqrt, abs
|
10 |
+
|
11 |
+
__all__ = ['norm']
|
12 |
+
|
13 |
+
|
14 |
+
def _sparse_frobenius_norm(x):
|
15 |
+
data = sp._sputils._todata(x)
|
16 |
+
return np.linalg.norm(data)
|
17 |
+
|
18 |
+
|
19 |
+
def norm(x, ord=None, axis=None):
|
20 |
+
"""
|
21 |
+
Norm of a sparse matrix
|
22 |
+
|
23 |
+
This function is able to return one of seven different matrix norms,
|
24 |
+
depending on the value of the ``ord`` parameter.
|
25 |
+
|
26 |
+
Parameters
|
27 |
+
----------
|
28 |
+
x : a sparse matrix
|
29 |
+
Input sparse matrix.
|
30 |
+
ord : {non-zero int, inf, -inf, 'fro'}, optional
|
31 |
+
Order of the norm (see table under ``Notes``). inf means numpy's
|
32 |
+
`inf` object.
|
33 |
+
axis : {int, 2-tuple of ints, None}, optional
|
34 |
+
If `axis` is an integer, it specifies the axis of `x` along which to
|
35 |
+
compute the vector norms. If `axis` is a 2-tuple, it specifies the
|
36 |
+
axes that hold 2-D matrices, and the matrix norms of these matrices
|
37 |
+
are computed. If `axis` is None then either a vector norm (when `x`
|
38 |
+
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
|
39 |
+
|
40 |
+
Returns
|
41 |
+
-------
|
42 |
+
n : float or ndarray
|
43 |
+
|
44 |
+
Notes
|
45 |
+
-----
|
46 |
+
Some of the ord are not implemented because some associated functions like,
|
47 |
+
_multi_svd_norm, are not yet available for sparse matrix.
|
48 |
+
|
49 |
+
This docstring is modified based on numpy.linalg.norm.
|
50 |
+
https://github.com/numpy/numpy/blob/main/numpy/linalg/linalg.py
|
51 |
+
|
52 |
+
The following norms can be calculated:
|
53 |
+
|
54 |
+
===== ============================
|
55 |
+
ord norm for sparse matrices
|
56 |
+
===== ============================
|
57 |
+
None Frobenius norm
|
58 |
+
'fro' Frobenius norm
|
59 |
+
inf max(sum(abs(x), axis=1))
|
60 |
+
-inf min(sum(abs(x), axis=1))
|
61 |
+
0 abs(x).sum(axis=axis)
|
62 |
+
1 max(sum(abs(x), axis=0))
|
63 |
+
-1 min(sum(abs(x), axis=0))
|
64 |
+
2 Spectral norm (the largest singular value)
|
65 |
+
-2 Not implemented
|
66 |
+
other Not implemented
|
67 |
+
===== ============================
|
68 |
+
|
69 |
+
The Frobenius norm is given by [1]_:
|
70 |
+
|
71 |
+
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
|
72 |
+
|
73 |
+
References
|
74 |
+
----------
|
75 |
+
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
|
76 |
+
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
|
77 |
+
|
78 |
+
Examples
|
79 |
+
--------
|
80 |
+
>>> from scipy.sparse import *
|
81 |
+
>>> import numpy as np
|
82 |
+
>>> from scipy.sparse.linalg import norm
|
83 |
+
>>> a = np.arange(9) - 4
|
84 |
+
>>> a
|
85 |
+
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
|
86 |
+
>>> b = a.reshape((3, 3))
|
87 |
+
>>> b
|
88 |
+
array([[-4, -3, -2],
|
89 |
+
[-1, 0, 1],
|
90 |
+
[ 2, 3, 4]])
|
91 |
+
|
92 |
+
>>> b = csr_matrix(b)
|
93 |
+
>>> norm(b)
|
94 |
+
7.745966692414834
|
95 |
+
>>> norm(b, 'fro')
|
96 |
+
7.745966692414834
|
97 |
+
>>> norm(b, np.inf)
|
98 |
+
9
|
99 |
+
>>> norm(b, -np.inf)
|
100 |
+
2
|
101 |
+
>>> norm(b, 1)
|
102 |
+
7
|
103 |
+
>>> norm(b, -1)
|
104 |
+
6
|
105 |
+
|
106 |
+
The matrix 2-norm or the spectral norm is the largest singular
|
107 |
+
value, computed approximately and with limitations.
|
108 |
+
|
109 |
+
>>> b = diags([-1, 1], [0, 1], shape=(9, 10))
|
110 |
+
>>> norm(b, 2)
|
111 |
+
1.9753...
|
112 |
+
"""
|
113 |
+
if not issparse(x):
|
114 |
+
raise TypeError("input is not sparse. use numpy.linalg.norm")
|
115 |
+
|
116 |
+
# Check the default case first and handle it immediately.
|
117 |
+
if axis is None and ord in (None, 'fro', 'f'):
|
118 |
+
return _sparse_frobenius_norm(x)
|
119 |
+
|
120 |
+
# Some norms require functions that are not implemented for all types.
|
121 |
+
x = x.tocsr()
|
122 |
+
|
123 |
+
if axis is None:
|
124 |
+
axis = (0, 1)
|
125 |
+
elif not isinstance(axis, tuple):
|
126 |
+
msg = "'axis' must be None, an integer or a tuple of integers"
|
127 |
+
try:
|
128 |
+
int_axis = int(axis)
|
129 |
+
except TypeError as e:
|
130 |
+
raise TypeError(msg) from e
|
131 |
+
if axis != int_axis:
|
132 |
+
raise TypeError(msg)
|
133 |
+
axis = (int_axis,)
|
134 |
+
|
135 |
+
nd = 2
|
136 |
+
if len(axis) == 2:
|
137 |
+
row_axis, col_axis = axis
|
138 |
+
if not (-nd <= row_axis < nd and -nd <= col_axis < nd):
|
139 |
+
message = f'Invalid axis {axis!r} for an array with shape {x.shape!r}'
|
140 |
+
raise ValueError(message)
|
141 |
+
if row_axis % nd == col_axis % nd:
|
142 |
+
raise ValueError('Duplicate axes given.')
|
143 |
+
if ord == 2:
|
144 |
+
# Only solver="lobpcg" supports all numpy dtypes
|
145 |
+
_, s, _ = svds(x, k=1, solver="lobpcg")
|
146 |
+
return s[0]
|
147 |
+
elif ord == -2:
|
148 |
+
raise NotImplementedError
|
149 |
+
#return _multi_svd_norm(x, row_axis, col_axis, amin)
|
150 |
+
elif ord == 1:
|
151 |
+
return abs(x).sum(axis=row_axis).max(axis=col_axis)[0,0]
|
152 |
+
elif ord == np.inf:
|
153 |
+
return abs(x).sum(axis=col_axis).max(axis=row_axis)[0,0]
|
154 |
+
elif ord == -1:
|
155 |
+
return abs(x).sum(axis=row_axis).min(axis=col_axis)[0,0]
|
156 |
+
elif ord == -np.inf:
|
157 |
+
return abs(x).sum(axis=col_axis).min(axis=row_axis)[0,0]
|
158 |
+
elif ord in (None, 'f', 'fro'):
|
159 |
+
# The axis order does not matter for this norm.
|
160 |
+
return _sparse_frobenius_norm(x)
|
161 |
+
else:
|
162 |
+
raise ValueError("Invalid norm order for matrices.")
|
163 |
+
elif len(axis) == 1:
|
164 |
+
a, = axis
|
165 |
+
if not (-nd <= a < nd):
|
166 |
+
message = f'Invalid axis {axis!r} for an array with shape {x.shape!r}'
|
167 |
+
raise ValueError(message)
|
168 |
+
if ord == np.inf:
|
169 |
+
M = abs(x).max(axis=a)
|
170 |
+
elif ord == -np.inf:
|
171 |
+
M = abs(x).min(axis=a)
|
172 |
+
elif ord == 0:
|
173 |
+
# Zero norm
|
174 |
+
M = (x != 0).sum(axis=a)
|
175 |
+
elif ord == 1:
|
176 |
+
# special case for speedup
|
177 |
+
M = abs(x).sum(axis=a)
|
178 |
+
elif ord in (2, None):
|
179 |
+
M = sqrt(abs(x).power(2).sum(axis=a))
|
180 |
+
else:
|
181 |
+
try:
|
182 |
+
ord + 1
|
183 |
+
except TypeError as e:
|
184 |
+
raise ValueError('Invalid norm order for vectors.') from e
|
185 |
+
M = np.power(abs(x).power(ord).sum(axis=a), 1 / ord)
|
186 |
+
if hasattr(M, 'toarray'):
|
187 |
+
return M.toarray().ravel()
|
188 |
+
elif hasattr(M, 'A'):
|
189 |
+
return M.A.ravel()
|
190 |
+
else:
|
191 |
+
return M.ravel()
|
192 |
+
else:
|
193 |
+
raise ValueError("Improper number of dimensions to norm.")
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_onenormest.py
ADDED
@@ -0,0 +1,467 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Sparse block 1-norm estimator.
|
2 |
+
"""
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
from scipy.sparse.linalg import aslinearoperator
|
6 |
+
|
7 |
+
|
8 |
+
__all__ = ['onenormest']
|
9 |
+
|
10 |
+
|
11 |
+
def onenormest(A, t=2, itmax=5, compute_v=False, compute_w=False):
|
12 |
+
"""
|
13 |
+
Compute a lower bound of the 1-norm of a sparse matrix.
|
14 |
+
|
15 |
+
Parameters
|
16 |
+
----------
|
17 |
+
A : ndarray or other linear operator
|
18 |
+
A linear operator that can be transposed and that can
|
19 |
+
produce matrix products.
|
20 |
+
t : int, optional
|
21 |
+
A positive parameter controlling the tradeoff between
|
22 |
+
accuracy versus time and memory usage.
|
23 |
+
Larger values take longer and use more memory
|
24 |
+
but give more accurate output.
|
25 |
+
itmax : int, optional
|
26 |
+
Use at most this many iterations.
|
27 |
+
compute_v : bool, optional
|
28 |
+
Request a norm-maximizing linear operator input vector if True.
|
29 |
+
compute_w : bool, optional
|
30 |
+
Request a norm-maximizing linear operator output vector if True.
|
31 |
+
|
32 |
+
Returns
|
33 |
+
-------
|
34 |
+
est : float
|
35 |
+
An underestimate of the 1-norm of the sparse matrix.
|
36 |
+
v : ndarray, optional
|
37 |
+
The vector such that ||Av||_1 == est*||v||_1.
|
38 |
+
It can be thought of as an input to the linear operator
|
39 |
+
that gives an output with particularly large norm.
|
40 |
+
w : ndarray, optional
|
41 |
+
The vector Av which has relatively large 1-norm.
|
42 |
+
It can be thought of as an output of the linear operator
|
43 |
+
that is relatively large in norm compared to the input.
|
44 |
+
|
45 |
+
Notes
|
46 |
+
-----
|
47 |
+
This is algorithm 2.4 of [1].
|
48 |
+
|
49 |
+
In [2] it is described as follows.
|
50 |
+
"This algorithm typically requires the evaluation of
|
51 |
+
about 4t matrix-vector products and almost invariably
|
52 |
+
produces a norm estimate (which is, in fact, a lower
|
53 |
+
bound on the norm) correct to within a factor 3."
|
54 |
+
|
55 |
+
.. versionadded:: 0.13.0
|
56 |
+
|
57 |
+
References
|
58 |
+
----------
|
59 |
+
.. [1] Nicholas J. Higham and Francoise Tisseur (2000),
|
60 |
+
"A Block Algorithm for Matrix 1-Norm Estimation,
|
61 |
+
with an Application to 1-Norm Pseudospectra."
|
62 |
+
SIAM J. Matrix Anal. Appl. Vol. 21, No. 4, pp. 1185-1201.
|
63 |
+
|
64 |
+
.. [2] Awad H. Al-Mohy and Nicholas J. Higham (2009),
|
65 |
+
"A new scaling and squaring algorithm for the matrix exponential."
|
66 |
+
SIAM J. Matrix Anal. Appl. Vol. 31, No. 3, pp. 970-989.
|
67 |
+
|
68 |
+
Examples
|
69 |
+
--------
|
70 |
+
>>> import numpy as np
|
71 |
+
>>> from scipy.sparse import csc_matrix
|
72 |
+
>>> from scipy.sparse.linalg import onenormest
|
73 |
+
>>> A = csc_matrix([[1., 0., 0.], [5., 8., 2.], [0., -1., 0.]], dtype=float)
|
74 |
+
>>> A.toarray()
|
75 |
+
array([[ 1., 0., 0.],
|
76 |
+
[ 5., 8., 2.],
|
77 |
+
[ 0., -1., 0.]])
|
78 |
+
>>> onenormest(A)
|
79 |
+
9.0
|
80 |
+
>>> np.linalg.norm(A.toarray(), ord=1)
|
81 |
+
9.0
|
82 |
+
"""
|
83 |
+
|
84 |
+
# Check the input.
|
85 |
+
A = aslinearoperator(A)
|
86 |
+
if A.shape[0] != A.shape[1]:
|
87 |
+
raise ValueError('expected the operator to act like a square matrix')
|
88 |
+
|
89 |
+
# If the operator size is small compared to t,
|
90 |
+
# then it is easier to compute the exact norm.
|
91 |
+
# Otherwise estimate the norm.
|
92 |
+
n = A.shape[1]
|
93 |
+
if t >= n:
|
94 |
+
A_explicit = np.asarray(aslinearoperator(A).matmat(np.identity(n)))
|
95 |
+
if A_explicit.shape != (n, n):
|
96 |
+
raise Exception('internal error: ',
|
97 |
+
'unexpected shape ' + str(A_explicit.shape))
|
98 |
+
col_abs_sums = abs(A_explicit).sum(axis=0)
|
99 |
+
if col_abs_sums.shape != (n, ):
|
100 |
+
raise Exception('internal error: ',
|
101 |
+
'unexpected shape ' + str(col_abs_sums.shape))
|
102 |
+
argmax_j = np.argmax(col_abs_sums)
|
103 |
+
v = elementary_vector(n, argmax_j)
|
104 |
+
w = A_explicit[:, argmax_j]
|
105 |
+
est = col_abs_sums[argmax_j]
|
106 |
+
else:
|
107 |
+
est, v, w, nmults, nresamples = _onenormest_core(A, A.H, t, itmax)
|
108 |
+
|
109 |
+
# Report the norm estimate along with some certificates of the estimate.
|
110 |
+
if compute_v or compute_w:
|
111 |
+
result = (est,)
|
112 |
+
if compute_v:
|
113 |
+
result += (v,)
|
114 |
+
if compute_w:
|
115 |
+
result += (w,)
|
116 |
+
return result
|
117 |
+
else:
|
118 |
+
return est
|
119 |
+
|
120 |
+
|
121 |
+
def _blocked_elementwise(func):
|
122 |
+
"""
|
123 |
+
Decorator for an elementwise function, to apply it blockwise along
|
124 |
+
first dimension, to avoid excessive memory usage in temporaries.
|
125 |
+
"""
|
126 |
+
block_size = 2**20
|
127 |
+
|
128 |
+
def wrapper(x):
|
129 |
+
if x.shape[0] < block_size:
|
130 |
+
return func(x)
|
131 |
+
else:
|
132 |
+
y0 = func(x[:block_size])
|
133 |
+
y = np.zeros((x.shape[0],) + y0.shape[1:], dtype=y0.dtype)
|
134 |
+
y[:block_size] = y0
|
135 |
+
del y0
|
136 |
+
for j in range(block_size, x.shape[0], block_size):
|
137 |
+
y[j:j+block_size] = func(x[j:j+block_size])
|
138 |
+
return y
|
139 |
+
return wrapper
|
140 |
+
|
141 |
+
|
142 |
+
@_blocked_elementwise
|
143 |
+
def sign_round_up(X):
|
144 |
+
"""
|
145 |
+
This should do the right thing for both real and complex matrices.
|
146 |
+
|
147 |
+
From Higham and Tisseur:
|
148 |
+
"Everything in this section remains valid for complex matrices
|
149 |
+
provided that sign(A) is redefined as the matrix (aij / |aij|)
|
150 |
+
(and sign(0) = 1) transposes are replaced by conjugate transposes."
|
151 |
+
|
152 |
+
"""
|
153 |
+
Y = X.copy()
|
154 |
+
Y[Y == 0] = 1
|
155 |
+
Y /= np.abs(Y)
|
156 |
+
return Y
|
157 |
+
|
158 |
+
|
159 |
+
@_blocked_elementwise
|
160 |
+
def _max_abs_axis1(X):
|
161 |
+
return np.max(np.abs(X), axis=1)
|
162 |
+
|
163 |
+
|
164 |
+
def _sum_abs_axis0(X):
|
165 |
+
block_size = 2**20
|
166 |
+
r = None
|
167 |
+
for j in range(0, X.shape[0], block_size):
|
168 |
+
y = np.sum(np.abs(X[j:j+block_size]), axis=0)
|
169 |
+
if r is None:
|
170 |
+
r = y
|
171 |
+
else:
|
172 |
+
r += y
|
173 |
+
return r
|
174 |
+
|
175 |
+
|
176 |
+
def elementary_vector(n, i):
|
177 |
+
v = np.zeros(n, dtype=float)
|
178 |
+
v[i] = 1
|
179 |
+
return v
|
180 |
+
|
181 |
+
|
182 |
+
def vectors_are_parallel(v, w):
|
183 |
+
# Columns are considered parallel when they are equal or negative.
|
184 |
+
# Entries are required to be in {-1, 1},
|
185 |
+
# which guarantees that the magnitudes of the vectors are identical.
|
186 |
+
if v.ndim != 1 or v.shape != w.shape:
|
187 |
+
raise ValueError('expected conformant vectors with entries in {-1,1}')
|
188 |
+
n = v.shape[0]
|
189 |
+
return np.dot(v, w) == n
|
190 |
+
|
191 |
+
|
192 |
+
def every_col_of_X_is_parallel_to_a_col_of_Y(X, Y):
|
193 |
+
for v in X.T:
|
194 |
+
if not any(vectors_are_parallel(v, w) for w in Y.T):
|
195 |
+
return False
|
196 |
+
return True
|
197 |
+
|
198 |
+
|
199 |
+
def column_needs_resampling(i, X, Y=None):
|
200 |
+
# column i of X needs resampling if either
|
201 |
+
# it is parallel to a previous column of X or
|
202 |
+
# it is parallel to a column of Y
|
203 |
+
n, t = X.shape
|
204 |
+
v = X[:, i]
|
205 |
+
if any(vectors_are_parallel(v, X[:, j]) for j in range(i)):
|
206 |
+
return True
|
207 |
+
if Y is not None:
|
208 |
+
if any(vectors_are_parallel(v, w) for w in Y.T):
|
209 |
+
return True
|
210 |
+
return False
|
211 |
+
|
212 |
+
|
213 |
+
def resample_column(i, X):
|
214 |
+
X[:, i] = np.random.randint(0, 2, size=X.shape[0])*2 - 1
|
215 |
+
|
216 |
+
|
217 |
+
def less_than_or_close(a, b):
|
218 |
+
return np.allclose(a, b) or (a < b)
|
219 |
+
|
220 |
+
|
221 |
+
def _algorithm_2_2(A, AT, t):
|
222 |
+
"""
|
223 |
+
This is Algorithm 2.2.
|
224 |
+
|
225 |
+
Parameters
|
226 |
+
----------
|
227 |
+
A : ndarray or other linear operator
|
228 |
+
A linear operator that can produce matrix products.
|
229 |
+
AT : ndarray or other linear operator
|
230 |
+
The transpose of A.
|
231 |
+
t : int, optional
|
232 |
+
A positive parameter controlling the tradeoff between
|
233 |
+
accuracy versus time and memory usage.
|
234 |
+
|
235 |
+
Returns
|
236 |
+
-------
|
237 |
+
g : sequence
|
238 |
+
A non-negative decreasing vector
|
239 |
+
such that g[j] is a lower bound for the 1-norm
|
240 |
+
of the column of A of jth largest 1-norm.
|
241 |
+
The first entry of this vector is therefore a lower bound
|
242 |
+
on the 1-norm of the linear operator A.
|
243 |
+
This sequence has length t.
|
244 |
+
ind : sequence
|
245 |
+
The ith entry of ind is the index of the column A whose 1-norm
|
246 |
+
is given by g[i].
|
247 |
+
This sequence of indices has length t, and its entries are
|
248 |
+
chosen from range(n), possibly with repetition,
|
249 |
+
where n is the order of the operator A.
|
250 |
+
|
251 |
+
Notes
|
252 |
+
-----
|
253 |
+
This algorithm is mainly for testing.
|
254 |
+
It uses the 'ind' array in a way that is similar to
|
255 |
+
its usage in algorithm 2.4. This algorithm 2.2 may be easier to test,
|
256 |
+
so it gives a chance of uncovering bugs related to indexing
|
257 |
+
which could have propagated less noticeably to algorithm 2.4.
|
258 |
+
|
259 |
+
"""
|
260 |
+
A_linear_operator = aslinearoperator(A)
|
261 |
+
AT_linear_operator = aslinearoperator(AT)
|
262 |
+
n = A_linear_operator.shape[0]
|
263 |
+
|
264 |
+
# Initialize the X block with columns of unit 1-norm.
|
265 |
+
X = np.ones((n, t))
|
266 |
+
if t > 1:
|
267 |
+
X[:, 1:] = np.random.randint(0, 2, size=(n, t-1))*2 - 1
|
268 |
+
X /= float(n)
|
269 |
+
|
270 |
+
# Iteratively improve the lower bounds.
|
271 |
+
# Track extra things, to assert invariants for debugging.
|
272 |
+
g_prev = None
|
273 |
+
h_prev = None
|
274 |
+
k = 1
|
275 |
+
ind = range(t)
|
276 |
+
while True:
|
277 |
+
Y = np.asarray(A_linear_operator.matmat(X))
|
278 |
+
g = _sum_abs_axis0(Y)
|
279 |
+
best_j = np.argmax(g)
|
280 |
+
g.sort()
|
281 |
+
g = g[::-1]
|
282 |
+
S = sign_round_up(Y)
|
283 |
+
Z = np.asarray(AT_linear_operator.matmat(S))
|
284 |
+
h = _max_abs_axis1(Z)
|
285 |
+
|
286 |
+
# If this algorithm runs for fewer than two iterations,
|
287 |
+
# then its return values do not have the properties indicated
|
288 |
+
# in the description of the algorithm.
|
289 |
+
# In particular, the entries of g are not 1-norms of any
|
290 |
+
# column of A until the second iteration.
|
291 |
+
# Therefore we will require the algorithm to run for at least
|
292 |
+
# two iterations, even though this requirement is not stated
|
293 |
+
# in the description of the algorithm.
|
294 |
+
if k >= 2:
|
295 |
+
if less_than_or_close(max(h), np.dot(Z[:, best_j], X[:, best_j])):
|
296 |
+
break
|
297 |
+
ind = np.argsort(h)[::-1][:t]
|
298 |
+
h = h[ind]
|
299 |
+
for j in range(t):
|
300 |
+
X[:, j] = elementary_vector(n, ind[j])
|
301 |
+
|
302 |
+
# Check invariant (2.2).
|
303 |
+
if k >= 2:
|
304 |
+
if not less_than_or_close(g_prev[0], h_prev[0]):
|
305 |
+
raise Exception('invariant (2.2) is violated')
|
306 |
+
if not less_than_or_close(h_prev[0], g[0]):
|
307 |
+
raise Exception('invariant (2.2) is violated')
|
308 |
+
|
309 |
+
# Check invariant (2.3).
|
310 |
+
if k >= 3:
|
311 |
+
for j in range(t):
|
312 |
+
if not less_than_or_close(g[j], g_prev[j]):
|
313 |
+
raise Exception('invariant (2.3) is violated')
|
314 |
+
|
315 |
+
# Update for the next iteration.
|
316 |
+
g_prev = g
|
317 |
+
h_prev = h
|
318 |
+
k += 1
|
319 |
+
|
320 |
+
# Return the lower bounds and the corresponding column indices.
|
321 |
+
return g, ind
|
322 |
+
|
323 |
+
|
324 |
+
def _onenormest_core(A, AT, t, itmax):
|
325 |
+
"""
|
326 |
+
Compute a lower bound of the 1-norm of a sparse matrix.
|
327 |
+
|
328 |
+
Parameters
|
329 |
+
----------
|
330 |
+
A : ndarray or other linear operator
|
331 |
+
A linear operator that can produce matrix products.
|
332 |
+
AT : ndarray or other linear operator
|
333 |
+
The transpose of A.
|
334 |
+
t : int, optional
|
335 |
+
A positive parameter controlling the tradeoff between
|
336 |
+
accuracy versus time and memory usage.
|
337 |
+
itmax : int, optional
|
338 |
+
Use at most this many iterations.
|
339 |
+
|
340 |
+
Returns
|
341 |
+
-------
|
342 |
+
est : float
|
343 |
+
An underestimate of the 1-norm of the sparse matrix.
|
344 |
+
v : ndarray, optional
|
345 |
+
The vector such that ||Av||_1 == est*||v||_1.
|
346 |
+
It can be thought of as an input to the linear operator
|
347 |
+
that gives an output with particularly large norm.
|
348 |
+
w : ndarray, optional
|
349 |
+
The vector Av which has relatively large 1-norm.
|
350 |
+
It can be thought of as an output of the linear operator
|
351 |
+
that is relatively large in norm compared to the input.
|
352 |
+
nmults : int, optional
|
353 |
+
The number of matrix products that were computed.
|
354 |
+
nresamples : int, optional
|
355 |
+
The number of times a parallel column was observed,
|
356 |
+
necessitating a re-randomization of the column.
|
357 |
+
|
358 |
+
Notes
|
359 |
+
-----
|
360 |
+
This is algorithm 2.4.
|
361 |
+
|
362 |
+
"""
|
363 |
+
# This function is a more or less direct translation
|
364 |
+
# of Algorithm 2.4 from the Higham and Tisseur (2000) paper.
|
365 |
+
A_linear_operator = aslinearoperator(A)
|
366 |
+
AT_linear_operator = aslinearoperator(AT)
|
367 |
+
if itmax < 2:
|
368 |
+
raise ValueError('at least two iterations are required')
|
369 |
+
if t < 1:
|
370 |
+
raise ValueError('at least one column is required')
|
371 |
+
n = A.shape[0]
|
372 |
+
if t >= n:
|
373 |
+
raise ValueError('t should be smaller than the order of A')
|
374 |
+
# Track the number of big*small matrix multiplications
|
375 |
+
# and the number of resamplings.
|
376 |
+
nmults = 0
|
377 |
+
nresamples = 0
|
378 |
+
# "We now explain our choice of starting matrix. We take the first
|
379 |
+
# column of X to be the vector of 1s [...] This has the advantage that
|
380 |
+
# for a matrix with nonnegative elements the algorithm converges
|
381 |
+
# with an exact estimate on the second iteration, and such matrices
|
382 |
+
# arise in applications [...]"
|
383 |
+
X = np.ones((n, t), dtype=float)
|
384 |
+
# "The remaining columns are chosen as rand{-1,1},
|
385 |
+
# with a check for and correction of parallel columns,
|
386 |
+
# exactly as for S in the body of the algorithm."
|
387 |
+
if t > 1:
|
388 |
+
for i in range(1, t):
|
389 |
+
# These are technically initial samples, not resamples,
|
390 |
+
# so the resampling count is not incremented.
|
391 |
+
resample_column(i, X)
|
392 |
+
for i in range(t):
|
393 |
+
while column_needs_resampling(i, X):
|
394 |
+
resample_column(i, X)
|
395 |
+
nresamples += 1
|
396 |
+
# "Choose starting matrix X with columns of unit 1-norm."
|
397 |
+
X /= float(n)
|
398 |
+
# "indices of used unit vectors e_j"
|
399 |
+
ind_hist = np.zeros(0, dtype=np.intp)
|
400 |
+
est_old = 0
|
401 |
+
S = np.zeros((n, t), dtype=float)
|
402 |
+
k = 1
|
403 |
+
ind = None
|
404 |
+
while True:
|
405 |
+
Y = np.asarray(A_linear_operator.matmat(X))
|
406 |
+
nmults += 1
|
407 |
+
mags = _sum_abs_axis0(Y)
|
408 |
+
est = np.max(mags)
|
409 |
+
best_j = np.argmax(mags)
|
410 |
+
if est > est_old or k == 2:
|
411 |
+
if k >= 2:
|
412 |
+
ind_best = ind[best_j]
|
413 |
+
w = Y[:, best_j]
|
414 |
+
# (1)
|
415 |
+
if k >= 2 and est <= est_old:
|
416 |
+
est = est_old
|
417 |
+
break
|
418 |
+
est_old = est
|
419 |
+
S_old = S
|
420 |
+
if k > itmax:
|
421 |
+
break
|
422 |
+
S = sign_round_up(Y)
|
423 |
+
del Y
|
424 |
+
# (2)
|
425 |
+
if every_col_of_X_is_parallel_to_a_col_of_Y(S, S_old):
|
426 |
+
break
|
427 |
+
if t > 1:
|
428 |
+
# "Ensure that no column of S is parallel to another column of S
|
429 |
+
# or to a column of S_old by replacing columns of S by rand{-1,1}."
|
430 |
+
for i in range(t):
|
431 |
+
while column_needs_resampling(i, S, S_old):
|
432 |
+
resample_column(i, S)
|
433 |
+
nresamples += 1
|
434 |
+
del S_old
|
435 |
+
# (3)
|
436 |
+
Z = np.asarray(AT_linear_operator.matmat(S))
|
437 |
+
nmults += 1
|
438 |
+
h = _max_abs_axis1(Z)
|
439 |
+
del Z
|
440 |
+
# (4)
|
441 |
+
if k >= 2 and max(h) == h[ind_best]:
|
442 |
+
break
|
443 |
+
# "Sort h so that h_first >= ... >= h_last
|
444 |
+
# and re-order ind correspondingly."
|
445 |
+
#
|
446 |
+
# Later on, we will need at most t+len(ind_hist) largest
|
447 |
+
# entries, so drop the rest
|
448 |
+
ind = np.argsort(h)[::-1][:t+len(ind_hist)].copy()
|
449 |
+
del h
|
450 |
+
if t > 1:
|
451 |
+
# (5)
|
452 |
+
# Break if the most promising t vectors have been visited already.
|
453 |
+
if np.isin(ind[:t], ind_hist).all():
|
454 |
+
break
|
455 |
+
# Put the most promising unvisited vectors at the front of the list
|
456 |
+
# and put the visited vectors at the end of the list.
|
457 |
+
# Preserve the order of the indices induced by the ordering of h.
|
458 |
+
seen = np.isin(ind, ind_hist)
|
459 |
+
ind = np.concatenate((ind[~seen], ind[seen]))
|
460 |
+
for j in range(t):
|
461 |
+
X[:, j] = elementary_vector(n, ind[j])
|
462 |
+
|
463 |
+
new_ind = ind[:t][~np.isin(ind[:t], ind_hist)]
|
464 |
+
ind_hist = np.concatenate((ind_hist, new_ind))
|
465 |
+
k += 1
|
466 |
+
v = elementary_vector(n, ind_best)
|
467 |
+
return est, v, w, nmults, nresamples
|
llmeval-env/lib/python3.10/site-packages/scipy/sparse/linalg/_propack/_cpropack.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (158 kB). View file
|
|