Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- env-llmeval/lib/python3.10/site-packages/scipy/misc/common.py +20 -0
- env-llmeval/lib/python3.10/site-packages/scipy/misc/ecg.dat +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/_sparsetools.cpython-310-x86_64-linux-gnu.so +3 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/linsolve.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/__init__.py +22 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/__pycache__/_svds.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/__pycache__/_svds_doc.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/_svds.py +545 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/_svds_doc.py +400 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/COPYING +45 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/__init__.py +20 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/__pycache__/arpack.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/_arpack.cpython-310-x86_64-linux-gnu.so +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/arpack.py +1702 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/tests/__init__.py +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/tests/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/tests/__pycache__/test_arpack.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/tests/test_arpack.py +718 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/__init__.py +16 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/__pycache__/lobpcg.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/lobpcg.py +1112 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/tests/__init__.py +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/tests/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/tests/__pycache__/test_lobpcg.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/tests/test_lobpcg.py +645 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/tests/__init__.py +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/tests/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/tests/__pycache__/test_svds.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/tests/test_svds.py +862 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/_gcrotmk.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/lgmres.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/lsmr.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/lsqr.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/minres.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/tfqmr.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/_gcrotmk.py +514 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/lsmr.py +486 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__init__.py +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/test_gcrotmk.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/test_iterative.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/test_lgmres.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/test_lsmr.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/test_lsqr.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/test_minres.cpython-310.pyc +0 -0
.gitattributes
CHANGED
@@ -157,3 +157,4 @@ env-llmeval/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcupti.so.12 fi
|
|
157 |
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_host.so filter=lfs diff=lfs merge=lfs -text
|
158 |
env-llmeval/lib/python3.10/site-packages/tokenizers/tokenizers.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
159 |
llmeval-env/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
157 |
env-llmeval/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_host.so filter=lfs diff=lfs merge=lfs -text
|
158 |
env-llmeval/lib/python3.10/site-packages/tokenizers/tokenizers.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
159 |
llmeval-env/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so filter=lfs diff=lfs merge=lfs -text
|
160 |
+
env-llmeval/lib/python3.10/site-packages/scipy/sparse/_sparsetools.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
env-llmeval/lib/python3.10/site-packages/scipy/misc/common.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file is not meant for public use and will be removed in SciPy v2.0.0.
|
2 |
+
# Use the `scipy.datasets` namespace for importing the dataset functions
|
3 |
+
# included below.
|
4 |
+
|
5 |
+
from scipy._lib.deprecation import _sub_module_deprecation
|
6 |
+
|
7 |
+
__all__ = [ # noqa: F822
|
8 |
+
'central_diff_weights', 'derivative', 'ascent', 'face',
|
9 |
+
'electrocardiogram', 'array', 'load'
|
10 |
+
]
|
11 |
+
|
12 |
+
|
13 |
+
def __dir__():
|
14 |
+
return __all__
|
15 |
+
|
16 |
+
|
17 |
+
def __getattr__(name):
|
18 |
+
return _sub_module_deprecation(sub_package="misc", module="common",
|
19 |
+
private_modules=["_common"], all=__all__,
|
20 |
+
attribute=name)
|
env-llmeval/lib/python3.10/site-packages/scipy/misc/ecg.dat
ADDED
Binary file (119 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/_sparsetools.cpython-310-x86_64-linux-gnu.so
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eefa5f8b2a452e465518c29630626ee17c5a5eff022e165520382cff2b1966a8
|
3 |
+
size 4466608
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_dsolve/__pycache__/linsolve.cpython-310.pyc
ADDED
Binary file (21.4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/__init__.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Sparse Eigenvalue Solvers
|
3 |
+
-------------------------
|
4 |
+
|
5 |
+
The submodules of sparse.linalg._eigen:
|
6 |
+
1. lobpcg: Locally Optimal Block Preconditioned Conjugate Gradient Method
|
7 |
+
|
8 |
+
"""
|
9 |
+
from .arpack import *
|
10 |
+
from .lobpcg import *
|
11 |
+
from ._svds import svds
|
12 |
+
|
13 |
+
from . import arpack
|
14 |
+
|
15 |
+
__all__ = [
|
16 |
+
'ArpackError', 'ArpackNoConvergence',
|
17 |
+
'eigs', 'eigsh', 'lobpcg', 'svds'
|
18 |
+
]
|
19 |
+
|
20 |
+
from scipy._lib._testutils import PytestTester
|
21 |
+
test = PytestTester(__name__)
|
22 |
+
del PytestTester
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (650 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/__pycache__/_svds.cpython-310.pyc
ADDED
Binary file (17.3 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/__pycache__/_svds_doc.cpython-310.pyc
ADDED
Binary file (15.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/_svds.py
ADDED
@@ -0,0 +1,545 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
|
3 |
+
from .arpack import _arpack # type: ignore[attr-defined]
|
4 |
+
from . import eigsh
|
5 |
+
|
6 |
+
from scipy._lib._util import check_random_state
|
7 |
+
from scipy.sparse.linalg._interface import LinearOperator, aslinearoperator
|
8 |
+
from scipy.sparse.linalg._eigen.lobpcg import lobpcg # type: ignore[no-redef]
|
9 |
+
from scipy.sparse.linalg._svdp import _svdp
|
10 |
+
from scipy.linalg import svd
|
11 |
+
|
12 |
+
arpack_int = _arpack.timing.nbx.dtype
|
13 |
+
__all__ = ['svds']
|
14 |
+
|
15 |
+
|
16 |
+
def _herm(x):
|
17 |
+
return x.T.conj()
|
18 |
+
|
19 |
+
|
20 |
+
def _iv(A, k, ncv, tol, which, v0, maxiter,
|
21 |
+
return_singular, solver, random_state):
|
22 |
+
|
23 |
+
# input validation/standardization for `solver`
|
24 |
+
# out of order because it's needed for other parameters
|
25 |
+
solver = str(solver).lower()
|
26 |
+
solvers = {"arpack", "lobpcg", "propack"}
|
27 |
+
if solver not in solvers:
|
28 |
+
raise ValueError(f"solver must be one of {solvers}.")
|
29 |
+
|
30 |
+
# input validation/standardization for `A`
|
31 |
+
A = aslinearoperator(A) # this takes care of some input validation
|
32 |
+
if not (np.issubdtype(A.dtype, np.complexfloating)
|
33 |
+
or np.issubdtype(A.dtype, np.floating)):
|
34 |
+
message = "`A` must be of floating or complex floating data type."
|
35 |
+
raise ValueError(message)
|
36 |
+
if np.prod(A.shape) == 0:
|
37 |
+
message = "`A` must not be empty."
|
38 |
+
raise ValueError(message)
|
39 |
+
|
40 |
+
# input validation/standardization for `k`
|
41 |
+
kmax = min(A.shape) if solver == 'propack' else min(A.shape) - 1
|
42 |
+
if int(k) != k or not (0 < k <= kmax):
|
43 |
+
message = "`k` must be an integer satisfying `0 < k < min(A.shape)`."
|
44 |
+
raise ValueError(message)
|
45 |
+
k = int(k)
|
46 |
+
|
47 |
+
# input validation/standardization for `ncv`
|
48 |
+
if solver == "arpack" and ncv is not None:
|
49 |
+
if int(ncv) != ncv or not (k < ncv < min(A.shape)):
|
50 |
+
message = ("`ncv` must be an integer satisfying "
|
51 |
+
"`k < ncv < min(A.shape)`.")
|
52 |
+
raise ValueError(message)
|
53 |
+
ncv = int(ncv)
|
54 |
+
|
55 |
+
# input validation/standardization for `tol`
|
56 |
+
if tol < 0 or not np.isfinite(tol):
|
57 |
+
message = "`tol` must be a non-negative floating point value."
|
58 |
+
raise ValueError(message)
|
59 |
+
tol = float(tol)
|
60 |
+
|
61 |
+
# input validation/standardization for `which`
|
62 |
+
which = str(which).upper()
|
63 |
+
whichs = {'LM', 'SM'}
|
64 |
+
if which not in whichs:
|
65 |
+
raise ValueError(f"`which` must be in {whichs}.")
|
66 |
+
|
67 |
+
# input validation/standardization for `v0`
|
68 |
+
if v0 is not None:
|
69 |
+
v0 = np.atleast_1d(v0)
|
70 |
+
if not (np.issubdtype(v0.dtype, np.complexfloating)
|
71 |
+
or np.issubdtype(v0.dtype, np.floating)):
|
72 |
+
message = ("`v0` must be of floating or complex floating "
|
73 |
+
"data type.")
|
74 |
+
raise ValueError(message)
|
75 |
+
|
76 |
+
shape = (A.shape[0],) if solver == 'propack' else (min(A.shape),)
|
77 |
+
if v0.shape != shape:
|
78 |
+
message = f"`v0` must have shape {shape}."
|
79 |
+
raise ValueError(message)
|
80 |
+
|
81 |
+
# input validation/standardization for `maxiter`
|
82 |
+
if maxiter is not None and (int(maxiter) != maxiter or maxiter <= 0):
|
83 |
+
message = "`maxiter` must be a positive integer."
|
84 |
+
raise ValueError(message)
|
85 |
+
maxiter = int(maxiter) if maxiter is not None else maxiter
|
86 |
+
|
87 |
+
# input validation/standardization for `return_singular_vectors`
|
88 |
+
# not going to be flexible with this; too complicated for little gain
|
89 |
+
rs_options = {True, False, "vh", "u"}
|
90 |
+
if return_singular not in rs_options:
|
91 |
+
raise ValueError(f"`return_singular_vectors` must be in {rs_options}.")
|
92 |
+
|
93 |
+
random_state = check_random_state(random_state)
|
94 |
+
|
95 |
+
return (A, k, ncv, tol, which, v0, maxiter,
|
96 |
+
return_singular, solver, random_state)
|
97 |
+
|
98 |
+
|
99 |
+
def svds(A, k=6, ncv=None, tol=0, which='LM', v0=None,
|
100 |
+
maxiter=None, return_singular_vectors=True,
|
101 |
+
solver='arpack', random_state=None, options=None):
|
102 |
+
"""
|
103 |
+
Partial singular value decomposition of a sparse matrix.
|
104 |
+
|
105 |
+
Compute the largest or smallest `k` singular values and corresponding
|
106 |
+
singular vectors of a sparse matrix `A`. The order in which the singular
|
107 |
+
values are returned is not guaranteed.
|
108 |
+
|
109 |
+
In the descriptions below, let ``M, N = A.shape``.
|
110 |
+
|
111 |
+
Parameters
|
112 |
+
----------
|
113 |
+
A : ndarray, sparse matrix, or LinearOperator
|
114 |
+
Matrix to decompose of a floating point numeric dtype.
|
115 |
+
k : int, default: 6
|
116 |
+
Number of singular values and singular vectors to compute.
|
117 |
+
Must satisfy ``1 <= k <= kmax``, where ``kmax=min(M, N)`` for
|
118 |
+
``solver='propack'`` and ``kmax=min(M, N) - 1`` otherwise.
|
119 |
+
ncv : int, optional
|
120 |
+
When ``solver='arpack'``, this is the number of Lanczos vectors
|
121 |
+
generated. See :ref:`'arpack' <sparse.linalg.svds-arpack>` for details.
|
122 |
+
When ``solver='lobpcg'`` or ``solver='propack'``, this parameter is
|
123 |
+
ignored.
|
124 |
+
tol : float, optional
|
125 |
+
Tolerance for singular values. Zero (default) means machine precision.
|
126 |
+
which : {'LM', 'SM'}
|
127 |
+
Which `k` singular values to find: either the largest magnitude ('LM')
|
128 |
+
or smallest magnitude ('SM') singular values.
|
129 |
+
v0 : ndarray, optional
|
130 |
+
The starting vector for iteration; see method-specific
|
131 |
+
documentation (:ref:`'arpack' <sparse.linalg.svds-arpack>`,
|
132 |
+
:ref:`'lobpcg' <sparse.linalg.svds-lobpcg>`), or
|
133 |
+
:ref:`'propack' <sparse.linalg.svds-propack>` for details.
|
134 |
+
maxiter : int, optional
|
135 |
+
Maximum number of iterations; see method-specific
|
136 |
+
documentation (:ref:`'arpack' <sparse.linalg.svds-arpack>`,
|
137 |
+
:ref:`'lobpcg' <sparse.linalg.svds-lobpcg>`), or
|
138 |
+
:ref:`'propack' <sparse.linalg.svds-propack>` for details.
|
139 |
+
return_singular_vectors : {True, False, "u", "vh"}
|
140 |
+
Singular values are always computed and returned; this parameter
|
141 |
+
controls the computation and return of singular vectors.
|
142 |
+
|
143 |
+
- ``True``: return singular vectors.
|
144 |
+
- ``False``: do not return singular vectors.
|
145 |
+
- ``"u"``: if ``M <= N``, compute only the left singular vectors and
|
146 |
+
return ``None`` for the right singular vectors. Otherwise, compute
|
147 |
+
all singular vectors.
|
148 |
+
- ``"vh"``: if ``M > N``, compute only the right singular vectors and
|
149 |
+
return ``None`` for the left singular vectors. Otherwise, compute
|
150 |
+
all singular vectors.
|
151 |
+
|
152 |
+
If ``solver='propack'``, the option is respected regardless of the
|
153 |
+
matrix shape.
|
154 |
+
|
155 |
+
solver : {'arpack', 'propack', 'lobpcg'}, optional
|
156 |
+
The solver used.
|
157 |
+
:ref:`'arpack' <sparse.linalg.svds-arpack>`,
|
158 |
+
:ref:`'lobpcg' <sparse.linalg.svds-lobpcg>`, and
|
159 |
+
:ref:`'propack' <sparse.linalg.svds-propack>` are supported.
|
160 |
+
Default: `'arpack'`.
|
161 |
+
random_state : {None, int, `numpy.random.Generator`,
|
162 |
+
`numpy.random.RandomState`}, optional
|
163 |
+
|
164 |
+
Pseudorandom number generator state used to generate resamples.
|
165 |
+
|
166 |
+
If `random_state` is ``None`` (or `np.random`), the
|
167 |
+
`numpy.random.RandomState` singleton is used.
|
168 |
+
If `random_state` is an int, a new ``RandomState`` instance is used,
|
169 |
+
seeded with `random_state`.
|
170 |
+
If `random_state` is already a ``Generator`` or ``RandomState``
|
171 |
+
instance then that instance is used.
|
172 |
+
options : dict, optional
|
173 |
+
A dictionary of solver-specific options. No solver-specific options
|
174 |
+
are currently supported; this parameter is reserved for future use.
|
175 |
+
|
176 |
+
Returns
|
177 |
+
-------
|
178 |
+
u : ndarray, shape=(M, k)
|
179 |
+
Unitary matrix having left singular vectors as columns.
|
180 |
+
s : ndarray, shape=(k,)
|
181 |
+
The singular values.
|
182 |
+
vh : ndarray, shape=(k, N)
|
183 |
+
Unitary matrix having right singular vectors as rows.
|
184 |
+
|
185 |
+
Notes
|
186 |
+
-----
|
187 |
+
This is a naive implementation using ARPACK or LOBPCG as an eigensolver
|
188 |
+
on the matrix ``A.conj().T @ A`` or ``A @ A.conj().T``, depending on
|
189 |
+
which one is smaller size, followed by the Rayleigh-Ritz method
|
190 |
+
as postprocessing; see
|
191 |
+
Using the normal matrix, in Rayleigh-Ritz method, (2022, Nov. 19),
|
192 |
+
Wikipedia, https://w.wiki/4zms.
|
193 |
+
|
194 |
+
Alternatively, the PROPACK solver can be called.
|
195 |
+
|
196 |
+
Choices of the input matrix `A` numeric dtype may be limited.
|
197 |
+
Only ``solver="lobpcg"`` supports all floating point dtypes
|
198 |
+
real: 'np.float32', 'np.float64', 'np.longdouble' and
|
199 |
+
complex: 'np.complex64', 'np.complex128', 'np.clongdouble'.
|
200 |
+
The ``solver="arpack"`` supports only
|
201 |
+
'np.float32', 'np.float64', and 'np.complex128'.
|
202 |
+
|
203 |
+
Examples
|
204 |
+
--------
|
205 |
+
Construct a matrix `A` from singular values and vectors.
|
206 |
+
|
207 |
+
>>> import numpy as np
|
208 |
+
>>> from scipy import sparse, linalg, stats
|
209 |
+
>>> from scipy.sparse.linalg import svds, aslinearoperator, LinearOperator
|
210 |
+
|
211 |
+
Construct a dense matrix `A` from singular values and vectors.
|
212 |
+
|
213 |
+
>>> rng = np.random.default_rng(258265244568965474821194062361901728911)
|
214 |
+
>>> orthogonal = stats.ortho_group.rvs(10, random_state=rng)
|
215 |
+
>>> s = [1e-3, 1, 2, 3, 4] # non-zero singular values
|
216 |
+
>>> u = orthogonal[:, :5] # left singular vectors
|
217 |
+
>>> vT = orthogonal[:, 5:].T # right singular vectors
|
218 |
+
>>> A = u @ np.diag(s) @ vT
|
219 |
+
|
220 |
+
With only four singular values/vectors, the SVD approximates the original
|
221 |
+
matrix.
|
222 |
+
|
223 |
+
>>> u4, s4, vT4 = svds(A, k=4)
|
224 |
+
>>> A4 = u4 @ np.diag(s4) @ vT4
|
225 |
+
>>> np.allclose(A4, A, atol=1e-3)
|
226 |
+
True
|
227 |
+
|
228 |
+
With all five non-zero singular values/vectors, we can reproduce
|
229 |
+
the original matrix more accurately.
|
230 |
+
|
231 |
+
>>> u5, s5, vT5 = svds(A, k=5)
|
232 |
+
>>> A5 = u5 @ np.diag(s5) @ vT5
|
233 |
+
>>> np.allclose(A5, A)
|
234 |
+
True
|
235 |
+
|
236 |
+
The singular values match the expected singular values.
|
237 |
+
|
238 |
+
>>> np.allclose(s5, s)
|
239 |
+
True
|
240 |
+
|
241 |
+
Since the singular values are not close to each other in this example,
|
242 |
+
every singular vector matches as expected up to a difference in sign.
|
243 |
+
|
244 |
+
>>> (np.allclose(np.abs(u5), np.abs(u)) and
|
245 |
+
... np.allclose(np.abs(vT5), np.abs(vT)))
|
246 |
+
True
|
247 |
+
|
248 |
+
The singular vectors are also orthogonal.
|
249 |
+
|
250 |
+
>>> (np.allclose(u5.T @ u5, np.eye(5)) and
|
251 |
+
... np.allclose(vT5 @ vT5.T, np.eye(5)))
|
252 |
+
True
|
253 |
+
|
254 |
+
If there are (nearly) multiple singular values, the corresponding
|
255 |
+
individual singular vectors may be unstable, but the whole invariant
|
256 |
+
subspace containing all such singular vectors is computed accurately
|
257 |
+
as can be measured by angles between subspaces via 'subspace_angles'.
|
258 |
+
|
259 |
+
>>> rng = np.random.default_rng(178686584221410808734965903901790843963)
|
260 |
+
>>> s = [1, 1 + 1e-6] # non-zero singular values
|
261 |
+
>>> u, _ = np.linalg.qr(rng.standard_normal((99, 2)))
|
262 |
+
>>> v, _ = np.linalg.qr(rng.standard_normal((99, 2)))
|
263 |
+
>>> vT = v.T
|
264 |
+
>>> A = u @ np.diag(s) @ vT
|
265 |
+
>>> A = A.astype(np.float32)
|
266 |
+
>>> u2, s2, vT2 = svds(A, k=2, random_state=rng)
|
267 |
+
>>> np.allclose(s2, s)
|
268 |
+
True
|
269 |
+
|
270 |
+
The angles between the individual exact and computed singular vectors
|
271 |
+
may not be so small. To check use:
|
272 |
+
|
273 |
+
>>> (linalg.subspace_angles(u2[:, :1], u[:, :1]) +
|
274 |
+
... linalg.subspace_angles(u2[:, 1:], u[:, 1:]))
|
275 |
+
array([0.06562513]) # may vary
|
276 |
+
>>> (linalg.subspace_angles(vT2[:1, :].T, vT[:1, :].T) +
|
277 |
+
... linalg.subspace_angles(vT2[1:, :].T, vT[1:, :].T))
|
278 |
+
array([0.06562507]) # may vary
|
279 |
+
|
280 |
+
As opposed to the angles between the 2-dimensional invariant subspaces
|
281 |
+
that these vectors span, which are small for rights singular vectors
|
282 |
+
|
283 |
+
>>> linalg.subspace_angles(u2, u).sum() < 1e-6
|
284 |
+
True
|
285 |
+
|
286 |
+
as well as for left singular vectors.
|
287 |
+
|
288 |
+
>>> linalg.subspace_angles(vT2.T, vT.T).sum() < 1e-6
|
289 |
+
True
|
290 |
+
|
291 |
+
The next example follows that of 'sklearn.decomposition.TruncatedSVD'.
|
292 |
+
|
293 |
+
>>> rng = np.random.RandomState(0)
|
294 |
+
>>> X_dense = rng.random(size=(100, 100))
|
295 |
+
>>> X_dense[:, 2 * np.arange(50)] = 0
|
296 |
+
>>> X = sparse.csr_matrix(X_dense)
|
297 |
+
>>> _, singular_values, _ = svds(X, k=5, random_state=rng)
|
298 |
+
>>> print(singular_values)
|
299 |
+
[ 4.3293... 4.4491... 4.5420... 4.5987... 35.2410...]
|
300 |
+
|
301 |
+
The function can be called without the transpose of the input matrix
|
302 |
+
ever explicitly constructed.
|
303 |
+
|
304 |
+
>>> rng = np.random.default_rng(102524723947864966825913730119128190974)
|
305 |
+
>>> G = sparse.rand(8, 9, density=0.5, random_state=rng)
|
306 |
+
>>> Glo = aslinearoperator(G)
|
307 |
+
>>> _, singular_values_svds, _ = svds(Glo, k=5, random_state=rng)
|
308 |
+
>>> _, singular_values_svd, _ = linalg.svd(G.toarray())
|
309 |
+
>>> np.allclose(singular_values_svds, singular_values_svd[-4::-1])
|
310 |
+
True
|
311 |
+
|
312 |
+
The most memory efficient scenario is where neither
|
313 |
+
the original matrix, nor its transpose, is explicitly constructed.
|
314 |
+
Our example computes the smallest singular values and vectors
|
315 |
+
of 'LinearOperator' constructed from the numpy function 'np.diff' used
|
316 |
+
column-wise to be consistent with 'LinearOperator' operating on columns.
|
317 |
+
|
318 |
+
>>> diff0 = lambda a: np.diff(a, axis=0)
|
319 |
+
|
320 |
+
Let us create the matrix from 'diff0' to be used for validation only.
|
321 |
+
|
322 |
+
>>> n = 5 # The dimension of the space.
|
323 |
+
>>> M_from_diff0 = diff0(np.eye(n))
|
324 |
+
>>> print(M_from_diff0.astype(int))
|
325 |
+
[[-1 1 0 0 0]
|
326 |
+
[ 0 -1 1 0 0]
|
327 |
+
[ 0 0 -1 1 0]
|
328 |
+
[ 0 0 0 -1 1]]
|
329 |
+
|
330 |
+
The matrix 'M_from_diff0' is bi-diagonal and could be alternatively
|
331 |
+
created directly by
|
332 |
+
|
333 |
+
>>> M = - np.eye(n - 1, n, dtype=int)
|
334 |
+
>>> np.fill_diagonal(M[:,1:], 1)
|
335 |
+
>>> np.allclose(M, M_from_diff0)
|
336 |
+
True
|
337 |
+
|
338 |
+
Its transpose
|
339 |
+
|
340 |
+
>>> print(M.T)
|
341 |
+
[[-1 0 0 0]
|
342 |
+
[ 1 -1 0 0]
|
343 |
+
[ 0 1 -1 0]
|
344 |
+
[ 0 0 1 -1]
|
345 |
+
[ 0 0 0 1]]
|
346 |
+
|
347 |
+
can be viewed as the incidence matrix; see
|
348 |
+
Incidence matrix, (2022, Nov. 19), Wikipedia, https://w.wiki/5YXU,
|
349 |
+
of a linear graph with 5 vertices and 4 edges. The 5x5 normal matrix
|
350 |
+
``M.T @ M`` thus is
|
351 |
+
|
352 |
+
>>> print(M.T @ M)
|
353 |
+
[[ 1 -1 0 0 0]
|
354 |
+
[-1 2 -1 0 0]
|
355 |
+
[ 0 -1 2 -1 0]
|
356 |
+
[ 0 0 -1 2 -1]
|
357 |
+
[ 0 0 0 -1 1]]
|
358 |
+
|
359 |
+
the graph Laplacian, while the actually used in 'svds' smaller size
|
360 |
+
4x4 normal matrix ``M @ M.T``
|
361 |
+
|
362 |
+
>>> print(M @ M.T)
|
363 |
+
[[ 2 -1 0 0]
|
364 |
+
[-1 2 -1 0]
|
365 |
+
[ 0 -1 2 -1]
|
366 |
+
[ 0 0 -1 2]]
|
367 |
+
|
368 |
+
is the so-called edge-based Laplacian; see
|
369 |
+
Symmetric Laplacian via the incidence matrix, in Laplacian matrix,
|
370 |
+
(2022, Nov. 19), Wikipedia, https://w.wiki/5YXW.
|
371 |
+
|
372 |
+
The 'LinearOperator' setup needs the options 'rmatvec' and 'rmatmat'
|
373 |
+
of multiplication by the matrix transpose ``M.T``, but we want to be
|
374 |
+
matrix-free to save memory, so knowing how ``M.T`` looks like, we
|
375 |
+
manually construct the following function to be
|
376 |
+
used in ``rmatmat=diff0t``.
|
377 |
+
|
378 |
+
>>> def diff0t(a):
|
379 |
+
... if a.ndim == 1:
|
380 |
+
... a = a[:,np.newaxis] # Turn 1D into 2D array
|
381 |
+
... d = np.zeros((a.shape[0] + 1, a.shape[1]), dtype=a.dtype)
|
382 |
+
... d[0, :] = - a[0, :]
|
383 |
+
... d[1:-1, :] = a[0:-1, :] - a[1:, :]
|
384 |
+
... d[-1, :] = a[-1, :]
|
385 |
+
... return d
|
386 |
+
|
387 |
+
We check that our function 'diff0t' for the matrix transpose is valid.
|
388 |
+
|
389 |
+
>>> np.allclose(M.T, diff0t(np.eye(n-1)))
|
390 |
+
True
|
391 |
+
|
392 |
+
Now we setup our matrix-free 'LinearOperator' called 'diff0_func_aslo'
|
393 |
+
and for validation the matrix-based 'diff0_matrix_aslo'.
|
394 |
+
|
395 |
+
>>> def diff0_func_aslo_def(n):
|
396 |
+
... return LinearOperator(matvec=diff0,
|
397 |
+
... matmat=diff0,
|
398 |
+
... rmatvec=diff0t,
|
399 |
+
... rmatmat=diff0t,
|
400 |
+
... shape=(n - 1, n))
|
401 |
+
>>> diff0_func_aslo = diff0_func_aslo_def(n)
|
402 |
+
>>> diff0_matrix_aslo = aslinearoperator(M_from_diff0)
|
403 |
+
|
404 |
+
And validate both the matrix and its transpose in 'LinearOperator'.
|
405 |
+
|
406 |
+
>>> np.allclose(diff0_func_aslo(np.eye(n)),
|
407 |
+
... diff0_matrix_aslo(np.eye(n)))
|
408 |
+
True
|
409 |
+
>>> np.allclose(diff0_func_aslo.T(np.eye(n-1)),
|
410 |
+
... diff0_matrix_aslo.T(np.eye(n-1)))
|
411 |
+
True
|
412 |
+
|
413 |
+
Having the 'LinearOperator' setup validated, we run the solver.
|
414 |
+
|
415 |
+
>>> n = 100
|
416 |
+
>>> diff0_func_aslo = diff0_func_aslo_def(n)
|
417 |
+
>>> u, s, vT = svds(diff0_func_aslo, k=3, which='SM')
|
418 |
+
|
419 |
+
The singular values squared and the singular vectors are known
|
420 |
+
explicitly; see
|
421 |
+
Pure Dirichlet boundary conditions, in
|
422 |
+
Eigenvalues and eigenvectors of the second derivative,
|
423 |
+
(2022, Nov. 19), Wikipedia, https://w.wiki/5YX6,
|
424 |
+
since 'diff' corresponds to first
|
425 |
+
derivative, and its smaller size n-1 x n-1 normal matrix
|
426 |
+
``M @ M.T`` represent the discrete second derivative with the Dirichlet
|
427 |
+
boundary conditions. We use these analytic expressions for validation.
|
428 |
+
|
429 |
+
>>> se = 2. * np.sin(np.pi * np.arange(1, 4) / (2. * n))
|
430 |
+
>>> ue = np.sqrt(2 / n) * np.sin(np.pi * np.outer(np.arange(1, n),
|
431 |
+
... np.arange(1, 4)) / n)
|
432 |
+
>>> np.allclose(s, se, atol=1e-3)
|
433 |
+
True
|
434 |
+
>>> print(np.allclose(np.abs(u), np.abs(ue), atol=1e-6))
|
435 |
+
True
|
436 |
+
|
437 |
+
"""
|
438 |
+
args = _iv(A, k, ncv, tol, which, v0, maxiter, return_singular_vectors,
|
439 |
+
solver, random_state)
|
440 |
+
(A, k, ncv, tol, which, v0, maxiter,
|
441 |
+
return_singular_vectors, solver, random_state) = args
|
442 |
+
|
443 |
+
largest = (which == 'LM')
|
444 |
+
n, m = A.shape
|
445 |
+
|
446 |
+
if n >= m:
|
447 |
+
X_dot = A.matvec
|
448 |
+
X_matmat = A.matmat
|
449 |
+
XH_dot = A.rmatvec
|
450 |
+
XH_mat = A.rmatmat
|
451 |
+
transpose = False
|
452 |
+
else:
|
453 |
+
X_dot = A.rmatvec
|
454 |
+
X_matmat = A.rmatmat
|
455 |
+
XH_dot = A.matvec
|
456 |
+
XH_mat = A.matmat
|
457 |
+
transpose = True
|
458 |
+
|
459 |
+
dtype = getattr(A, 'dtype', None)
|
460 |
+
if dtype is None:
|
461 |
+
dtype = A.dot(np.zeros([m, 1])).dtype
|
462 |
+
|
463 |
+
def matvec_XH_X(x):
|
464 |
+
return XH_dot(X_dot(x))
|
465 |
+
|
466 |
+
def matmat_XH_X(x):
|
467 |
+
return XH_mat(X_matmat(x))
|
468 |
+
|
469 |
+
XH_X = LinearOperator(matvec=matvec_XH_X, dtype=A.dtype,
|
470 |
+
matmat=matmat_XH_X,
|
471 |
+
shape=(min(A.shape), min(A.shape)))
|
472 |
+
|
473 |
+
# Get a low rank approximation of the implicitly defined gramian matrix.
|
474 |
+
# This is not a stable way to approach the problem.
|
475 |
+
if solver == 'lobpcg':
|
476 |
+
|
477 |
+
if k == 1 and v0 is not None:
|
478 |
+
X = np.reshape(v0, (-1, 1))
|
479 |
+
else:
|
480 |
+
X = random_state.standard_normal(size=(min(A.shape), k))
|
481 |
+
|
482 |
+
_, eigvec = lobpcg(XH_X, X, tol=tol ** 2, maxiter=maxiter,
|
483 |
+
largest=largest)
|
484 |
+
|
485 |
+
elif solver == 'propack':
|
486 |
+
jobu = return_singular_vectors in {True, 'u'}
|
487 |
+
jobv = return_singular_vectors in {True, 'vh'}
|
488 |
+
irl_mode = (which == 'SM')
|
489 |
+
res = _svdp(A, k=k, tol=tol**2, which=which, maxiter=None,
|
490 |
+
compute_u=jobu, compute_v=jobv, irl_mode=irl_mode,
|
491 |
+
kmax=maxiter, v0=v0, random_state=random_state)
|
492 |
+
|
493 |
+
u, s, vh, _ = res # but we'll ignore bnd, the last output
|
494 |
+
|
495 |
+
# PROPACK order appears to be largest first. `svds` output order is not
|
496 |
+
# guaranteed, according to documentation, but for ARPACK and LOBPCG
|
497 |
+
# they actually are ordered smallest to largest, so reverse for
|
498 |
+
# consistency.
|
499 |
+
s = s[::-1]
|
500 |
+
u = u[:, ::-1]
|
501 |
+
vh = vh[::-1]
|
502 |
+
|
503 |
+
u = u if jobu else None
|
504 |
+
vh = vh if jobv else None
|
505 |
+
|
506 |
+
if return_singular_vectors:
|
507 |
+
return u, s, vh
|
508 |
+
else:
|
509 |
+
return s
|
510 |
+
|
511 |
+
elif solver == 'arpack' or solver is None:
|
512 |
+
if v0 is None:
|
513 |
+
v0 = random_state.standard_normal(size=(min(A.shape),))
|
514 |
+
_, eigvec = eigsh(XH_X, k=k, tol=tol ** 2, maxiter=maxiter,
|
515 |
+
ncv=ncv, which=which, v0=v0)
|
516 |
+
# arpack do not guarantee exactly orthonormal eigenvectors
|
517 |
+
# for clustered eigenvalues, especially in complex arithmetic
|
518 |
+
eigvec, _ = np.linalg.qr(eigvec)
|
519 |
+
|
520 |
+
# the eigenvectors eigvec must be orthonomal here; see gh-16712
|
521 |
+
Av = X_matmat(eigvec)
|
522 |
+
if not return_singular_vectors:
|
523 |
+
s = svd(Av, compute_uv=False, overwrite_a=True)
|
524 |
+
return s[::-1]
|
525 |
+
|
526 |
+
# compute the left singular vectors of X and update the right ones
|
527 |
+
# accordingly
|
528 |
+
u, s, vh = svd(Av, full_matrices=False, overwrite_a=True)
|
529 |
+
u = u[:, ::-1]
|
530 |
+
s = s[::-1]
|
531 |
+
vh = vh[::-1]
|
532 |
+
|
533 |
+
jobu = return_singular_vectors in {True, 'u'}
|
534 |
+
jobv = return_singular_vectors in {True, 'vh'}
|
535 |
+
|
536 |
+
if transpose:
|
537 |
+
u_tmp = eigvec @ _herm(vh) if jobu else None
|
538 |
+
vh = _herm(u) if jobv else None
|
539 |
+
u = u_tmp
|
540 |
+
else:
|
541 |
+
if not jobu:
|
542 |
+
u = None
|
543 |
+
vh = vh @ _herm(eigvec) if jobv else None
|
544 |
+
|
545 |
+
return u, s, vh
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/_svds_doc.py
ADDED
@@ -0,0 +1,400 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def _svds_arpack_doc(A, k=6, ncv=None, tol=0, which='LM', v0=None,
|
2 |
+
maxiter=None, return_singular_vectors=True,
|
3 |
+
solver='arpack', random_state=None):
|
4 |
+
"""
|
5 |
+
Partial singular value decomposition of a sparse matrix using ARPACK.
|
6 |
+
|
7 |
+
Compute the largest or smallest `k` singular values and corresponding
|
8 |
+
singular vectors of a sparse matrix `A`. The order in which the singular
|
9 |
+
values are returned is not guaranteed.
|
10 |
+
|
11 |
+
In the descriptions below, let ``M, N = A.shape``.
|
12 |
+
|
13 |
+
Parameters
|
14 |
+
----------
|
15 |
+
A : sparse matrix or LinearOperator
|
16 |
+
Matrix to decompose.
|
17 |
+
k : int, optional
|
18 |
+
Number of singular values and singular vectors to compute.
|
19 |
+
Must satisfy ``1 <= k <= min(M, N) - 1``.
|
20 |
+
Default is 6.
|
21 |
+
ncv : int, optional
|
22 |
+
The number of Lanczos vectors generated.
|
23 |
+
The default is ``min(n, max(2*k + 1, 20))``.
|
24 |
+
If specified, must satistify ``k + 1 < ncv < min(M, N)``; ``ncv > 2*k``
|
25 |
+
is recommended.
|
26 |
+
tol : float, optional
|
27 |
+
Tolerance for singular values. Zero (default) means machine precision.
|
28 |
+
which : {'LM', 'SM'}
|
29 |
+
Which `k` singular values to find: either the largest magnitude ('LM')
|
30 |
+
or smallest magnitude ('SM') singular values.
|
31 |
+
v0 : ndarray, optional
|
32 |
+
The starting vector for iteration:
|
33 |
+
an (approximate) left singular vector if ``N > M`` and a right singular
|
34 |
+
vector otherwise. Must be of length ``min(M, N)``.
|
35 |
+
Default: random
|
36 |
+
maxiter : int, optional
|
37 |
+
Maximum number of Arnoldi update iterations allowed;
|
38 |
+
default is ``min(M, N) * 10``.
|
39 |
+
return_singular_vectors : {True, False, "u", "vh"}
|
40 |
+
Singular values are always computed and returned; this parameter
|
41 |
+
controls the computation and return of singular vectors.
|
42 |
+
|
43 |
+
- ``True``: return singular vectors.
|
44 |
+
- ``False``: do not return singular vectors.
|
45 |
+
- ``"u"``: if ``M <= N``, compute only the left singular vectors and
|
46 |
+
return ``None`` for the right singular vectors. Otherwise, compute
|
47 |
+
all singular vectors.
|
48 |
+
- ``"vh"``: if ``M > N``, compute only the right singular vectors and
|
49 |
+
return ``None`` for the left singular vectors. Otherwise, compute
|
50 |
+
all singular vectors.
|
51 |
+
|
52 |
+
solver : {'arpack', 'propack', 'lobpcg'}, optional
|
53 |
+
This is the solver-specific documentation for ``solver='arpack'``.
|
54 |
+
:ref:`'lobpcg' <sparse.linalg.svds-lobpcg>` and
|
55 |
+
:ref:`'propack' <sparse.linalg.svds-propack>`
|
56 |
+
are also supported.
|
57 |
+
random_state : {None, int, `numpy.random.Generator`,
|
58 |
+
`numpy.random.RandomState`}, optional
|
59 |
+
|
60 |
+
Pseudorandom number generator state used to generate resamples.
|
61 |
+
|
62 |
+
If `random_state` is ``None`` (or `np.random`), the
|
63 |
+
`numpy.random.RandomState` singleton is used.
|
64 |
+
If `random_state` is an int, a new ``RandomState`` instance is used,
|
65 |
+
seeded with `random_state`.
|
66 |
+
If `random_state` is already a ``Generator`` or ``RandomState``
|
67 |
+
instance then that instance is used.
|
68 |
+
options : dict, optional
|
69 |
+
A dictionary of solver-specific options. No solver-specific options
|
70 |
+
are currently supported; this parameter is reserved for future use.
|
71 |
+
|
72 |
+
Returns
|
73 |
+
-------
|
74 |
+
u : ndarray, shape=(M, k)
|
75 |
+
Unitary matrix having left singular vectors as columns.
|
76 |
+
s : ndarray, shape=(k,)
|
77 |
+
The singular values.
|
78 |
+
vh : ndarray, shape=(k, N)
|
79 |
+
Unitary matrix having right singular vectors as rows.
|
80 |
+
|
81 |
+
Notes
|
82 |
+
-----
|
83 |
+
This is a naive implementation using ARPACK as an eigensolver
|
84 |
+
on ``A.conj().T @ A`` or ``A @ A.conj().T``, depending on which one is more
|
85 |
+
efficient.
|
86 |
+
|
87 |
+
Examples
|
88 |
+
--------
|
89 |
+
Construct a matrix ``A`` from singular values and vectors.
|
90 |
+
|
91 |
+
>>> import numpy as np
|
92 |
+
>>> from scipy.stats import ortho_group
|
93 |
+
>>> from scipy.sparse import csc_matrix, diags
|
94 |
+
>>> from scipy.sparse.linalg import svds
|
95 |
+
>>> rng = np.random.default_rng()
|
96 |
+
>>> orthogonal = csc_matrix(ortho_group.rvs(10, random_state=rng))
|
97 |
+
>>> s = [0.0001, 0.001, 3, 4, 5] # singular values
|
98 |
+
>>> u = orthogonal[:, :5] # left singular vectors
|
99 |
+
>>> vT = orthogonal[:, 5:].T # right singular vectors
|
100 |
+
>>> A = u @ diags(s) @ vT
|
101 |
+
|
102 |
+
With only three singular values/vectors, the SVD approximates the original
|
103 |
+
matrix.
|
104 |
+
|
105 |
+
>>> u2, s2, vT2 = svds(A, k=3, solver='arpack')
|
106 |
+
>>> A2 = u2 @ np.diag(s2) @ vT2
|
107 |
+
>>> np.allclose(A2, A.toarray(), atol=1e-3)
|
108 |
+
True
|
109 |
+
|
110 |
+
With all five singular values/vectors, we can reproduce the original
|
111 |
+
matrix.
|
112 |
+
|
113 |
+
>>> u3, s3, vT3 = svds(A, k=5, solver='arpack')
|
114 |
+
>>> A3 = u3 @ np.diag(s3) @ vT3
|
115 |
+
>>> np.allclose(A3, A.toarray())
|
116 |
+
True
|
117 |
+
|
118 |
+
The singular values match the expected singular values, and the singular
|
119 |
+
vectors are as expected up to a difference in sign.
|
120 |
+
|
121 |
+
>>> (np.allclose(s3, s) and
|
122 |
+
... np.allclose(np.abs(u3), np.abs(u.toarray())) and
|
123 |
+
... np.allclose(np.abs(vT3), np.abs(vT.toarray())))
|
124 |
+
True
|
125 |
+
|
126 |
+
The singular vectors are also orthogonal.
|
127 |
+
|
128 |
+
>>> (np.allclose(u3.T @ u3, np.eye(5)) and
|
129 |
+
... np.allclose(vT3 @ vT3.T, np.eye(5)))
|
130 |
+
True
|
131 |
+
"""
|
132 |
+
pass
|
133 |
+
|
134 |
+
|
135 |
+
def _svds_lobpcg_doc(A, k=6, ncv=None, tol=0, which='LM', v0=None,
|
136 |
+
maxiter=None, return_singular_vectors=True,
|
137 |
+
solver='lobpcg', random_state=None):
|
138 |
+
"""
|
139 |
+
Partial singular value decomposition of a sparse matrix using LOBPCG.
|
140 |
+
|
141 |
+
Compute the largest or smallest `k` singular values and corresponding
|
142 |
+
singular vectors of a sparse matrix `A`. The order in which the singular
|
143 |
+
values are returned is not guaranteed.
|
144 |
+
|
145 |
+
In the descriptions below, let ``M, N = A.shape``.
|
146 |
+
|
147 |
+
Parameters
|
148 |
+
----------
|
149 |
+
A : sparse matrix or LinearOperator
|
150 |
+
Matrix to decompose.
|
151 |
+
k : int, default: 6
|
152 |
+
Number of singular values and singular vectors to compute.
|
153 |
+
Must satisfy ``1 <= k <= min(M, N) - 1``.
|
154 |
+
ncv : int, optional
|
155 |
+
Ignored.
|
156 |
+
tol : float, optional
|
157 |
+
Tolerance for singular values. Zero (default) means machine precision.
|
158 |
+
which : {'LM', 'SM'}
|
159 |
+
Which `k` singular values to find: either the largest magnitude ('LM')
|
160 |
+
or smallest magnitude ('SM') singular values.
|
161 |
+
v0 : ndarray, optional
|
162 |
+
If `k` is 1, the starting vector for iteration:
|
163 |
+
an (approximate) left singular vector if ``N > M`` and a right singular
|
164 |
+
vector otherwise. Must be of length ``min(M, N)``.
|
165 |
+
Ignored otherwise.
|
166 |
+
Default: random
|
167 |
+
maxiter : int, default: 20
|
168 |
+
Maximum number of iterations.
|
169 |
+
return_singular_vectors : {True, False, "u", "vh"}
|
170 |
+
Singular values are always computed and returned; this parameter
|
171 |
+
controls the computation and return of singular vectors.
|
172 |
+
|
173 |
+
- ``True``: return singular vectors.
|
174 |
+
- ``False``: do not return singular vectors.
|
175 |
+
- ``"u"``: if ``M <= N``, compute only the left singular vectors and
|
176 |
+
return ``None`` for the right singular vectors. Otherwise, compute
|
177 |
+
all singular vectors.
|
178 |
+
- ``"vh"``: if ``M > N``, compute only the right singular vectors and
|
179 |
+
return ``None`` for the left singular vectors. Otherwise, compute
|
180 |
+
all singular vectors.
|
181 |
+
|
182 |
+
solver : {'arpack', 'propack', 'lobpcg'}, optional
|
183 |
+
This is the solver-specific documentation for ``solver='lobpcg'``.
|
184 |
+
:ref:`'arpack' <sparse.linalg.svds-arpack>` and
|
185 |
+
:ref:`'propack' <sparse.linalg.svds-propack>`
|
186 |
+
are also supported.
|
187 |
+
random_state : {None, int, `numpy.random.Generator`,
|
188 |
+
`numpy.random.RandomState`}, optional
|
189 |
+
|
190 |
+
Pseudorandom number generator state used to generate resamples.
|
191 |
+
|
192 |
+
If `random_state` is ``None`` (or `np.random`), the
|
193 |
+
`numpy.random.RandomState` singleton is used.
|
194 |
+
If `random_state` is an int, a new ``RandomState`` instance is used,
|
195 |
+
seeded with `random_state`.
|
196 |
+
If `random_state` is already a ``Generator`` or ``RandomState``
|
197 |
+
instance then that instance is used.
|
198 |
+
options : dict, optional
|
199 |
+
A dictionary of solver-specific options. No solver-specific options
|
200 |
+
are currently supported; this parameter is reserved for future use.
|
201 |
+
|
202 |
+
Returns
|
203 |
+
-------
|
204 |
+
u : ndarray, shape=(M, k)
|
205 |
+
Unitary matrix having left singular vectors as columns.
|
206 |
+
s : ndarray, shape=(k,)
|
207 |
+
The singular values.
|
208 |
+
vh : ndarray, shape=(k, N)
|
209 |
+
Unitary matrix having right singular vectors as rows.
|
210 |
+
|
211 |
+
Notes
|
212 |
+
-----
|
213 |
+
This is a naive implementation using LOBPCG as an eigensolver
|
214 |
+
on ``A.conj().T @ A`` or ``A @ A.conj().T``, depending on which one is more
|
215 |
+
efficient.
|
216 |
+
|
217 |
+
Examples
|
218 |
+
--------
|
219 |
+
Construct a matrix ``A`` from singular values and vectors.
|
220 |
+
|
221 |
+
>>> import numpy as np
|
222 |
+
>>> from scipy.stats import ortho_group
|
223 |
+
>>> from scipy.sparse import csc_matrix, diags
|
224 |
+
>>> from scipy.sparse.linalg import svds
|
225 |
+
>>> rng = np.random.default_rng()
|
226 |
+
>>> orthogonal = csc_matrix(ortho_group.rvs(10, random_state=rng))
|
227 |
+
>>> s = [0.0001, 0.001, 3, 4, 5] # singular values
|
228 |
+
>>> u = orthogonal[:, :5] # left singular vectors
|
229 |
+
>>> vT = orthogonal[:, 5:].T # right singular vectors
|
230 |
+
>>> A = u @ diags(s) @ vT
|
231 |
+
|
232 |
+
With only three singular values/vectors, the SVD approximates the original
|
233 |
+
matrix.
|
234 |
+
|
235 |
+
>>> u2, s2, vT2 = svds(A, k=3, solver='lobpcg')
|
236 |
+
>>> A2 = u2 @ np.diag(s2) @ vT2
|
237 |
+
>>> np.allclose(A2, A.toarray(), atol=1e-3)
|
238 |
+
True
|
239 |
+
|
240 |
+
With all five singular values/vectors, we can reproduce the original
|
241 |
+
matrix.
|
242 |
+
|
243 |
+
>>> u3, s3, vT3 = svds(A, k=5, solver='lobpcg')
|
244 |
+
>>> A3 = u3 @ np.diag(s3) @ vT3
|
245 |
+
>>> np.allclose(A3, A.toarray())
|
246 |
+
True
|
247 |
+
|
248 |
+
The singular values match the expected singular values, and the singular
|
249 |
+
vectors are as expected up to a difference in sign.
|
250 |
+
|
251 |
+
>>> (np.allclose(s3, s) and
|
252 |
+
... np.allclose(np.abs(u3), np.abs(u.todense())) and
|
253 |
+
... np.allclose(np.abs(vT3), np.abs(vT.todense())))
|
254 |
+
True
|
255 |
+
|
256 |
+
The singular vectors are also orthogonal.
|
257 |
+
|
258 |
+
>>> (np.allclose(u3.T @ u3, np.eye(5)) and
|
259 |
+
... np.allclose(vT3 @ vT3.T, np.eye(5)))
|
260 |
+
True
|
261 |
+
|
262 |
+
"""
|
263 |
+
pass
|
264 |
+
|
265 |
+
|
266 |
+
def _svds_propack_doc(A, k=6, ncv=None, tol=0, which='LM', v0=None,
|
267 |
+
maxiter=None, return_singular_vectors=True,
|
268 |
+
solver='propack', random_state=None):
|
269 |
+
"""
|
270 |
+
Partial singular value decomposition of a sparse matrix using PROPACK.
|
271 |
+
|
272 |
+
Compute the largest or smallest `k` singular values and corresponding
|
273 |
+
singular vectors of a sparse matrix `A`. The order in which the singular
|
274 |
+
values are returned is not guaranteed.
|
275 |
+
|
276 |
+
In the descriptions below, let ``M, N = A.shape``.
|
277 |
+
|
278 |
+
Parameters
|
279 |
+
----------
|
280 |
+
A : sparse matrix or LinearOperator
|
281 |
+
Matrix to decompose. If `A` is a ``LinearOperator``
|
282 |
+
object, it must define both ``matvec`` and ``rmatvec`` methods.
|
283 |
+
k : int, default: 6
|
284 |
+
Number of singular values and singular vectors to compute.
|
285 |
+
Must satisfy ``1 <= k <= min(M, N)``.
|
286 |
+
ncv : int, optional
|
287 |
+
Ignored.
|
288 |
+
tol : float, optional
|
289 |
+
The desired relative accuracy for computed singular values.
|
290 |
+
Zero (default) means machine precision.
|
291 |
+
which : {'LM', 'SM'}
|
292 |
+
Which `k` singular values to find: either the largest magnitude ('LM')
|
293 |
+
or smallest magnitude ('SM') singular values. Note that choosing
|
294 |
+
``which='SM'`` will force the ``irl`` option to be set ``True``.
|
295 |
+
v0 : ndarray, optional
|
296 |
+
Starting vector for iterations: must be of length ``A.shape[0]``.
|
297 |
+
If not specified, PROPACK will generate a starting vector.
|
298 |
+
maxiter : int, optional
|
299 |
+
Maximum number of iterations / maximal dimension of the Krylov
|
300 |
+
subspace. Default is ``10 * k``.
|
301 |
+
return_singular_vectors : {True, False, "u", "vh"}
|
302 |
+
Singular values are always computed and returned; this parameter
|
303 |
+
controls the computation and return of singular vectors.
|
304 |
+
|
305 |
+
- ``True``: return singular vectors.
|
306 |
+
- ``False``: do not return singular vectors.
|
307 |
+
- ``"u"``: compute only the left singular vectors; return ``None`` for
|
308 |
+
the right singular vectors.
|
309 |
+
- ``"vh"``: compute only the right singular vectors; return ``None``
|
310 |
+
for the left singular vectors.
|
311 |
+
|
312 |
+
solver : {'arpack', 'propack', 'lobpcg'}, optional
|
313 |
+
This is the solver-specific documentation for ``solver='propack'``.
|
314 |
+
:ref:`'arpack' <sparse.linalg.svds-arpack>` and
|
315 |
+
:ref:`'lobpcg' <sparse.linalg.svds-lobpcg>`
|
316 |
+
are also supported.
|
317 |
+
random_state : {None, int, `numpy.random.Generator`,
|
318 |
+
`numpy.random.RandomState`}, optional
|
319 |
+
|
320 |
+
Pseudorandom number generator state used to generate resamples.
|
321 |
+
|
322 |
+
If `random_state` is ``None`` (or `np.random`), the
|
323 |
+
`numpy.random.RandomState` singleton is used.
|
324 |
+
If `random_state` is an int, a new ``RandomState`` instance is used,
|
325 |
+
seeded with `random_state`.
|
326 |
+
If `random_state` is already a ``Generator`` or ``RandomState``
|
327 |
+
instance then that instance is used.
|
328 |
+
options : dict, optional
|
329 |
+
A dictionary of solver-specific options. No solver-specific options
|
330 |
+
are currently supported; this parameter is reserved for future use.
|
331 |
+
|
332 |
+
Returns
|
333 |
+
-------
|
334 |
+
u : ndarray, shape=(M, k)
|
335 |
+
Unitary matrix having left singular vectors as columns.
|
336 |
+
s : ndarray, shape=(k,)
|
337 |
+
The singular values.
|
338 |
+
vh : ndarray, shape=(k, N)
|
339 |
+
Unitary matrix having right singular vectors as rows.
|
340 |
+
|
341 |
+
Notes
|
342 |
+
-----
|
343 |
+
This is an interface to the Fortran library PROPACK [1]_.
|
344 |
+
The current default is to run with IRL mode disabled unless seeking the
|
345 |
+
smallest singular values/vectors (``which='SM'``).
|
346 |
+
|
347 |
+
References
|
348 |
+
----------
|
349 |
+
|
350 |
+
.. [1] Larsen, Rasmus Munk. "PROPACK-Software for large and sparse SVD
|
351 |
+
calculations." Available online. URL
|
352 |
+
http://sun.stanford.edu/~rmunk/PROPACK (2004): 2008-2009.
|
353 |
+
|
354 |
+
Examples
|
355 |
+
--------
|
356 |
+
Construct a matrix ``A`` from singular values and vectors.
|
357 |
+
|
358 |
+
>>> import numpy as np
|
359 |
+
>>> from scipy.stats import ortho_group
|
360 |
+
>>> from scipy.sparse import csc_matrix, diags
|
361 |
+
>>> from scipy.sparse.linalg import svds
|
362 |
+
>>> rng = np.random.default_rng()
|
363 |
+
>>> orthogonal = csc_matrix(ortho_group.rvs(10, random_state=rng))
|
364 |
+
>>> s = [0.0001, 0.001, 3, 4, 5] # singular values
|
365 |
+
>>> u = orthogonal[:, :5] # left singular vectors
|
366 |
+
>>> vT = orthogonal[:, 5:].T # right singular vectors
|
367 |
+
>>> A = u @ diags(s) @ vT
|
368 |
+
|
369 |
+
With only three singular values/vectors, the SVD approximates the original
|
370 |
+
matrix.
|
371 |
+
|
372 |
+
>>> u2, s2, vT2 = svds(A, k=3, solver='propack')
|
373 |
+
>>> A2 = u2 @ np.diag(s2) @ vT2
|
374 |
+
>>> np.allclose(A2, A.todense(), atol=1e-3)
|
375 |
+
True
|
376 |
+
|
377 |
+
With all five singular values/vectors, we can reproduce the original
|
378 |
+
matrix.
|
379 |
+
|
380 |
+
>>> u3, s3, vT3 = svds(A, k=5, solver='propack')
|
381 |
+
>>> A3 = u3 @ np.diag(s3) @ vT3
|
382 |
+
>>> np.allclose(A3, A.todense())
|
383 |
+
True
|
384 |
+
|
385 |
+
The singular values match the expected singular values, and the singular
|
386 |
+
vectors are as expected up to a difference in sign.
|
387 |
+
|
388 |
+
>>> (np.allclose(s3, s) and
|
389 |
+
... np.allclose(np.abs(u3), np.abs(u.toarray())) and
|
390 |
+
... np.allclose(np.abs(vT3), np.abs(vT.toarray())))
|
391 |
+
True
|
392 |
+
|
393 |
+
The singular vectors are also orthogonal.
|
394 |
+
|
395 |
+
>>> (np.allclose(u3.T @ u3, np.eye(5)) and
|
396 |
+
... np.allclose(vT3 @ vT3.T, np.eye(5)))
|
397 |
+
True
|
398 |
+
|
399 |
+
"""
|
400 |
+
pass
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/COPYING
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
BSD Software License
|
3 |
+
|
4 |
+
Pertains to ARPACK and P_ARPACK
|
5 |
+
|
6 |
+
Copyright (c) 1996-2008 Rice University.
|
7 |
+
Developed by D.C. Sorensen, R.B. Lehoucq, C. Yang, and K. Maschhoff.
|
8 |
+
All rights reserved.
|
9 |
+
|
10 |
+
Arpack has been renamed to arpack-ng.
|
11 |
+
|
12 |
+
Copyright (c) 2001-2011 - Scilab Enterprises
|
13 |
+
Updated by Allan Cornet, Sylvestre Ledru.
|
14 |
+
|
15 |
+
Copyright (c) 2010 - Jordi Gutiérrez Hermoso (Octave patch)
|
16 |
+
|
17 |
+
Copyright (c) 2007 - Sébastien Fabbro (gentoo patch)
|
18 |
+
|
19 |
+
Redistribution and use in source and binary forms, with or without
|
20 |
+
modification, are permitted provided that the following conditions are
|
21 |
+
met:
|
22 |
+
|
23 |
+
- Redistributions of source code must retain the above copyright
|
24 |
+
notice, this list of conditions and the following disclaimer.
|
25 |
+
|
26 |
+
- Redistributions in binary form must reproduce the above copyright
|
27 |
+
notice, this list of conditions and the following disclaimer listed
|
28 |
+
in this license in the documentation and/or other materials
|
29 |
+
provided with the distribution.
|
30 |
+
|
31 |
+
- Neither the name of the copyright holders nor the names of its
|
32 |
+
contributors may be used to endorse or promote products derived from
|
33 |
+
this software without specific prior written permission.
|
34 |
+
|
35 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
36 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
37 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
38 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
39 |
+
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
40 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
41 |
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
42 |
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
43 |
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
44 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
45 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/__init__.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Eigenvalue solver using iterative methods.
|
3 |
+
|
4 |
+
Find k eigenvectors and eigenvalues of a matrix A using the
|
5 |
+
Arnoldi/Lanczos iterative methods from ARPACK [1]_,[2]_.
|
6 |
+
|
7 |
+
These methods are most useful for large sparse matrices.
|
8 |
+
|
9 |
+
- eigs(A,k)
|
10 |
+
- eigsh(A,k)
|
11 |
+
|
12 |
+
References
|
13 |
+
----------
|
14 |
+
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
|
15 |
+
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
|
16 |
+
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
|
17 |
+
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
|
18 |
+
|
19 |
+
"""
|
20 |
+
from .arpack import *
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (774 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/__pycache__/arpack.cpython-310.pyc
ADDED
Binary file (44.3 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/_arpack.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (486 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/arpack.py
ADDED
@@ -0,0 +1,1702 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Find a few eigenvectors and eigenvalues of a matrix.
|
3 |
+
|
4 |
+
|
5 |
+
Uses ARPACK: https://github.com/opencollab/arpack-ng
|
6 |
+
|
7 |
+
"""
|
8 |
+
# Wrapper implementation notes
|
9 |
+
#
|
10 |
+
# ARPACK Entry Points
|
11 |
+
# -------------------
|
12 |
+
# The entry points to ARPACK are
|
13 |
+
# - (s,d)seupd : single and double precision symmetric matrix
|
14 |
+
# - (s,d,c,z)neupd: single,double,complex,double complex general matrix
|
15 |
+
# This wrapper puts the *neupd (general matrix) interfaces in eigs()
|
16 |
+
# and the *seupd (symmetric matrix) in eigsh().
|
17 |
+
# There is no specialized interface for complex Hermitian matrices.
|
18 |
+
# To find eigenvalues of a complex Hermitian matrix you
|
19 |
+
# may use eigsh(), but eigsh() will simply call eigs()
|
20 |
+
# and return the real part of the eigenvalues thus obtained.
|
21 |
+
|
22 |
+
# Number of eigenvalues returned and complex eigenvalues
|
23 |
+
# ------------------------------------------------------
|
24 |
+
# The ARPACK nonsymmetric real and double interface (s,d)naupd return
|
25 |
+
# eigenvalues and eigenvectors in real (float,double) arrays.
|
26 |
+
# Since the eigenvalues and eigenvectors are, in general, complex
|
27 |
+
# ARPACK puts the real and imaginary parts in consecutive entries
|
28 |
+
# in real-valued arrays. This wrapper puts the real entries
|
29 |
+
# into complex data types and attempts to return the requested eigenvalues
|
30 |
+
# and eigenvectors.
|
31 |
+
|
32 |
+
|
33 |
+
# Solver modes
|
34 |
+
# ------------
|
35 |
+
# ARPACK and handle shifted and shift-inverse computations
|
36 |
+
# for eigenvalues by providing a shift (sigma) and a solver.
|
37 |
+
|
38 |
+
import numpy as np
|
39 |
+
import warnings
|
40 |
+
from scipy.sparse.linalg._interface import aslinearoperator, LinearOperator
|
41 |
+
from scipy.sparse import eye, issparse
|
42 |
+
from scipy.linalg import eig, eigh, lu_factor, lu_solve
|
43 |
+
from scipy.sparse._sputils import isdense, is_pydata_spmatrix
|
44 |
+
from scipy.sparse.linalg import gmres, splu
|
45 |
+
from scipy._lib._util import _aligned_zeros
|
46 |
+
from scipy._lib._threadsafety import ReentrancyLock
|
47 |
+
|
48 |
+
from . import _arpack
|
49 |
+
arpack_int = _arpack.timing.nbx.dtype
|
50 |
+
|
51 |
+
__docformat__ = "restructuredtext en"
|
52 |
+
|
53 |
+
__all__ = ['eigs', 'eigsh', 'ArpackError', 'ArpackNoConvergence']
|
54 |
+
|
55 |
+
|
56 |
+
_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z'}
|
57 |
+
_ndigits = {'f': 5, 'd': 12, 'F': 5, 'D': 12}
|
58 |
+
|
59 |
+
DNAUPD_ERRORS = {
|
60 |
+
0: "Normal exit.",
|
61 |
+
1: "Maximum number of iterations taken. "
|
62 |
+
"All possible eigenvalues of OP has been found. IPARAM(5) "
|
63 |
+
"returns the number of wanted converged Ritz values.",
|
64 |
+
2: "No longer an informational error. Deprecated starting "
|
65 |
+
"with release 2 of ARPACK.",
|
66 |
+
3: "No shifts could be applied during a cycle of the "
|
67 |
+
"Implicitly restarted Arnoldi iteration. One possibility "
|
68 |
+
"is to increase the size of NCV relative to NEV. ",
|
69 |
+
-1: "N must be positive.",
|
70 |
+
-2: "NEV must be positive.",
|
71 |
+
-3: "NCV-NEV >= 2 and less than or equal to N.",
|
72 |
+
-4: "The maximum number of Arnoldi update iterations allowed "
|
73 |
+
"must be greater than zero.",
|
74 |
+
-5: " WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
|
75 |
+
-6: "BMAT must be one of 'I' or 'G'.",
|
76 |
+
-7: "Length of private work array WORKL is not sufficient.",
|
77 |
+
-8: "Error return from LAPACK eigenvalue calculation;",
|
78 |
+
-9: "Starting vector is zero.",
|
79 |
+
-10: "IPARAM(7) must be 1,2,3,4.",
|
80 |
+
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
|
81 |
+
-12: "IPARAM(1) must be equal to 0 or 1.",
|
82 |
+
-13: "NEV and WHICH = 'BE' are incompatible.",
|
83 |
+
-9999: "Could not build an Arnoldi factorization. "
|
84 |
+
"IPARAM(5) returns the size of the current Arnoldi "
|
85 |
+
"factorization. The user is advised to check that "
|
86 |
+
"enough workspace and array storage has been allocated."
|
87 |
+
}
|
88 |
+
|
89 |
+
SNAUPD_ERRORS = DNAUPD_ERRORS
|
90 |
+
|
91 |
+
ZNAUPD_ERRORS = DNAUPD_ERRORS.copy()
|
92 |
+
ZNAUPD_ERRORS[-10] = "IPARAM(7) must be 1,2,3."
|
93 |
+
|
94 |
+
CNAUPD_ERRORS = ZNAUPD_ERRORS
|
95 |
+
|
96 |
+
DSAUPD_ERRORS = {
|
97 |
+
0: "Normal exit.",
|
98 |
+
1: "Maximum number of iterations taken. "
|
99 |
+
"All possible eigenvalues of OP has been found.",
|
100 |
+
2: "No longer an informational error. Deprecated starting with "
|
101 |
+
"release 2 of ARPACK.",
|
102 |
+
3: "No shifts could be applied during a cycle of the Implicitly "
|
103 |
+
"restarted Arnoldi iteration. One possibility is to increase "
|
104 |
+
"the size of NCV relative to NEV. ",
|
105 |
+
-1: "N must be positive.",
|
106 |
+
-2: "NEV must be positive.",
|
107 |
+
-3: "NCV must be greater than NEV and less than or equal to N.",
|
108 |
+
-4: "The maximum number of Arnoldi update iterations allowed "
|
109 |
+
"must be greater than zero.",
|
110 |
+
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
|
111 |
+
-6: "BMAT must be one of 'I' or 'G'.",
|
112 |
+
-7: "Length of private work array WORKL is not sufficient.",
|
113 |
+
-8: "Error return from trid. eigenvalue calculation; "
|
114 |
+
"Informational error from LAPACK routine dsteqr .",
|
115 |
+
-9: "Starting vector is zero.",
|
116 |
+
-10: "IPARAM(7) must be 1,2,3,4,5.",
|
117 |
+
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
|
118 |
+
-12: "IPARAM(1) must be equal to 0 or 1.",
|
119 |
+
-13: "NEV and WHICH = 'BE' are incompatible. ",
|
120 |
+
-9999: "Could not build an Arnoldi factorization. "
|
121 |
+
"IPARAM(5) returns the size of the current Arnoldi "
|
122 |
+
"factorization. The user is advised to check that "
|
123 |
+
"enough workspace and array storage has been allocated.",
|
124 |
+
}
|
125 |
+
|
126 |
+
SSAUPD_ERRORS = DSAUPD_ERRORS
|
127 |
+
|
128 |
+
DNEUPD_ERRORS = {
|
129 |
+
0: "Normal exit.",
|
130 |
+
1: "The Schur form computed by LAPACK routine dlahqr "
|
131 |
+
"could not be reordered by LAPACK routine dtrsen. "
|
132 |
+
"Re-enter subroutine dneupd with IPARAM(5)NCV and "
|
133 |
+
"increase the size of the arrays DR and DI to have "
|
134 |
+
"dimension at least dimension NCV and allocate at least NCV "
|
135 |
+
"columns for Z. NOTE: Not necessary if Z and V share "
|
136 |
+
"the same space. Please notify the authors if this error"
|
137 |
+
"occurs.",
|
138 |
+
-1: "N must be positive.",
|
139 |
+
-2: "NEV must be positive.",
|
140 |
+
-3: "NCV-NEV >= 2 and less than or equal to N.",
|
141 |
+
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
|
142 |
+
-6: "BMAT must be one of 'I' or 'G'.",
|
143 |
+
-7: "Length of private work WORKL array is not sufficient.",
|
144 |
+
-8: "Error return from calculation of a real Schur form. "
|
145 |
+
"Informational error from LAPACK routine dlahqr .",
|
146 |
+
-9: "Error return from calculation of eigenvectors. "
|
147 |
+
"Informational error from LAPACK routine dtrevc.",
|
148 |
+
-10: "IPARAM(7) must be 1,2,3,4.",
|
149 |
+
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
|
150 |
+
-12: "HOWMNY = 'S' not yet implemented",
|
151 |
+
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
|
152 |
+
-14: "DNAUPD did not find any eigenvalues to sufficient "
|
153 |
+
"accuracy.",
|
154 |
+
-15: "DNEUPD got a different count of the number of converged "
|
155 |
+
"Ritz values than DNAUPD got. This indicates the user "
|
156 |
+
"probably made an error in passing data from DNAUPD to "
|
157 |
+
"DNEUPD or that the data was modified before entering "
|
158 |
+
"DNEUPD",
|
159 |
+
}
|
160 |
+
|
161 |
+
SNEUPD_ERRORS = DNEUPD_ERRORS.copy()
|
162 |
+
SNEUPD_ERRORS[1] = ("The Schur form computed by LAPACK routine slahqr "
|
163 |
+
"could not be reordered by LAPACK routine strsen . "
|
164 |
+
"Re-enter subroutine dneupd with IPARAM(5)=NCV and "
|
165 |
+
"increase the size of the arrays DR and DI to have "
|
166 |
+
"dimension at least dimension NCV and allocate at least "
|
167 |
+
"NCV columns for Z. NOTE: Not necessary if Z and V share "
|
168 |
+
"the same space. Please notify the authors if this error "
|
169 |
+
"occurs.")
|
170 |
+
SNEUPD_ERRORS[-14] = ("SNAUPD did not find any eigenvalues to sufficient "
|
171 |
+
"accuracy.")
|
172 |
+
SNEUPD_ERRORS[-15] = ("SNEUPD got a different count of the number of "
|
173 |
+
"converged Ritz values than SNAUPD got. This indicates "
|
174 |
+
"the user probably made an error in passing data from "
|
175 |
+
"SNAUPD to SNEUPD or that the data was modified before "
|
176 |
+
"entering SNEUPD")
|
177 |
+
|
178 |
+
ZNEUPD_ERRORS = {0: "Normal exit.",
|
179 |
+
1: "The Schur form computed by LAPACK routine csheqr "
|
180 |
+
"could not be reordered by LAPACK routine ztrsen. "
|
181 |
+
"Re-enter subroutine zneupd with IPARAM(5)=NCV and "
|
182 |
+
"increase the size of the array D to have "
|
183 |
+
"dimension at least dimension NCV and allocate at least "
|
184 |
+
"NCV columns for Z. NOTE: Not necessary if Z and V share "
|
185 |
+
"the same space. Please notify the authors if this error "
|
186 |
+
"occurs.",
|
187 |
+
-1: "N must be positive.",
|
188 |
+
-2: "NEV must be positive.",
|
189 |
+
-3: "NCV-NEV >= 1 and less than or equal to N.",
|
190 |
+
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
|
191 |
+
-6: "BMAT must be one of 'I' or 'G'.",
|
192 |
+
-7: "Length of private work WORKL array is not sufficient.",
|
193 |
+
-8: "Error return from LAPACK eigenvalue calculation. "
|
194 |
+
"This should never happened.",
|
195 |
+
-9: "Error return from calculation of eigenvectors. "
|
196 |
+
"Informational error from LAPACK routine ztrevc.",
|
197 |
+
-10: "IPARAM(7) must be 1,2,3",
|
198 |
+
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
|
199 |
+
-12: "HOWMNY = 'S' not yet implemented",
|
200 |
+
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
|
201 |
+
-14: "ZNAUPD did not find any eigenvalues to sufficient "
|
202 |
+
"accuracy.",
|
203 |
+
-15: "ZNEUPD got a different count of the number of "
|
204 |
+
"converged Ritz values than ZNAUPD got. This "
|
205 |
+
"indicates the user probably made an error in passing "
|
206 |
+
"data from ZNAUPD to ZNEUPD or that the data was "
|
207 |
+
"modified before entering ZNEUPD"
|
208 |
+
}
|
209 |
+
|
210 |
+
CNEUPD_ERRORS = ZNEUPD_ERRORS.copy()
|
211 |
+
CNEUPD_ERRORS[-14] = ("CNAUPD did not find any eigenvalues to sufficient "
|
212 |
+
"accuracy.")
|
213 |
+
CNEUPD_ERRORS[-15] = ("CNEUPD got a different count of the number of "
|
214 |
+
"converged Ritz values than CNAUPD got. This indicates "
|
215 |
+
"the user probably made an error in passing data from "
|
216 |
+
"CNAUPD to CNEUPD or that the data was modified before "
|
217 |
+
"entering CNEUPD")
|
218 |
+
|
219 |
+
DSEUPD_ERRORS = {
|
220 |
+
0: "Normal exit.",
|
221 |
+
-1: "N must be positive.",
|
222 |
+
-2: "NEV must be positive.",
|
223 |
+
-3: "NCV must be greater than NEV and less than or equal to N.",
|
224 |
+
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
|
225 |
+
-6: "BMAT must be one of 'I' or 'G'.",
|
226 |
+
-7: "Length of private work WORKL array is not sufficient.",
|
227 |
+
-8: ("Error return from trid. eigenvalue calculation; "
|
228 |
+
"Information error from LAPACK routine dsteqr."),
|
229 |
+
-9: "Starting vector is zero.",
|
230 |
+
-10: "IPARAM(7) must be 1,2,3,4,5.",
|
231 |
+
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
|
232 |
+
-12: "NEV and WHICH = 'BE' are incompatible.",
|
233 |
+
-14: "DSAUPD did not find any eigenvalues to sufficient accuracy.",
|
234 |
+
-15: "HOWMNY must be one of 'A' or 'S' if RVEC = .true.",
|
235 |
+
-16: "HOWMNY = 'S' not yet implemented",
|
236 |
+
-17: ("DSEUPD got a different count of the number of converged "
|
237 |
+
"Ritz values than DSAUPD got. This indicates the user "
|
238 |
+
"probably made an error in passing data from DSAUPD to "
|
239 |
+
"DSEUPD or that the data was modified before entering "
|
240 |
+
"DSEUPD.")
|
241 |
+
}
|
242 |
+
|
243 |
+
SSEUPD_ERRORS = DSEUPD_ERRORS.copy()
|
244 |
+
SSEUPD_ERRORS[-14] = ("SSAUPD did not find any eigenvalues "
|
245 |
+
"to sufficient accuracy.")
|
246 |
+
SSEUPD_ERRORS[-17] = ("SSEUPD got a different count of the number of "
|
247 |
+
"converged "
|
248 |
+
"Ritz values than SSAUPD got. This indicates the user "
|
249 |
+
"probably made an error in passing data from SSAUPD to "
|
250 |
+
"SSEUPD or that the data was modified before entering "
|
251 |
+
"SSEUPD.")
|
252 |
+
|
253 |
+
_SAUPD_ERRORS = {'d': DSAUPD_ERRORS,
|
254 |
+
's': SSAUPD_ERRORS}
|
255 |
+
_NAUPD_ERRORS = {'d': DNAUPD_ERRORS,
|
256 |
+
's': SNAUPD_ERRORS,
|
257 |
+
'z': ZNAUPD_ERRORS,
|
258 |
+
'c': CNAUPD_ERRORS}
|
259 |
+
_SEUPD_ERRORS = {'d': DSEUPD_ERRORS,
|
260 |
+
's': SSEUPD_ERRORS}
|
261 |
+
_NEUPD_ERRORS = {'d': DNEUPD_ERRORS,
|
262 |
+
's': SNEUPD_ERRORS,
|
263 |
+
'z': ZNEUPD_ERRORS,
|
264 |
+
'c': CNEUPD_ERRORS}
|
265 |
+
|
266 |
+
# accepted values of parameter WHICH in _SEUPD
|
267 |
+
_SEUPD_WHICH = ['LM', 'SM', 'LA', 'SA', 'BE']
|
268 |
+
|
269 |
+
# accepted values of parameter WHICH in _NAUPD
|
270 |
+
_NEUPD_WHICH = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
|
271 |
+
|
272 |
+
|
273 |
+
class ArpackError(RuntimeError):
|
274 |
+
"""
|
275 |
+
ARPACK error
|
276 |
+
"""
|
277 |
+
|
278 |
+
def __init__(self, info, infodict=_NAUPD_ERRORS):
|
279 |
+
msg = infodict.get(info, "Unknown error")
|
280 |
+
RuntimeError.__init__(self, "ARPACK error %d: %s" % (info, msg))
|
281 |
+
|
282 |
+
|
283 |
+
class ArpackNoConvergence(ArpackError):
|
284 |
+
"""
|
285 |
+
ARPACK iteration did not converge
|
286 |
+
|
287 |
+
Attributes
|
288 |
+
----------
|
289 |
+
eigenvalues : ndarray
|
290 |
+
Partial result. Converged eigenvalues.
|
291 |
+
eigenvectors : ndarray
|
292 |
+
Partial result. Converged eigenvectors.
|
293 |
+
|
294 |
+
"""
|
295 |
+
|
296 |
+
def __init__(self, msg, eigenvalues, eigenvectors):
|
297 |
+
ArpackError.__init__(self, -1, {-1: msg})
|
298 |
+
self.eigenvalues = eigenvalues
|
299 |
+
self.eigenvectors = eigenvectors
|
300 |
+
|
301 |
+
|
302 |
+
def choose_ncv(k):
|
303 |
+
"""
|
304 |
+
Choose number of lanczos vectors based on target number
|
305 |
+
of singular/eigen values and vectors to compute, k.
|
306 |
+
"""
|
307 |
+
return max(2 * k + 1, 20)
|
308 |
+
|
309 |
+
|
310 |
+
class _ArpackParams:
|
311 |
+
def __init__(self, n, k, tp, mode=1, sigma=None,
|
312 |
+
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
|
313 |
+
if k <= 0:
|
314 |
+
raise ValueError("k must be positive, k=%d" % k)
|
315 |
+
|
316 |
+
if maxiter is None:
|
317 |
+
maxiter = n * 10
|
318 |
+
if maxiter <= 0:
|
319 |
+
raise ValueError("maxiter must be positive, maxiter=%d" % maxiter)
|
320 |
+
|
321 |
+
if tp not in 'fdFD':
|
322 |
+
raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'")
|
323 |
+
|
324 |
+
if v0 is not None:
|
325 |
+
# ARPACK overwrites its initial resid, make a copy
|
326 |
+
self.resid = np.array(v0, copy=True)
|
327 |
+
info = 1
|
328 |
+
else:
|
329 |
+
# ARPACK will use a random initial vector.
|
330 |
+
self.resid = np.zeros(n, tp)
|
331 |
+
info = 0
|
332 |
+
|
333 |
+
if sigma is None:
|
334 |
+
#sigma not used
|
335 |
+
self.sigma = 0
|
336 |
+
else:
|
337 |
+
self.sigma = sigma
|
338 |
+
|
339 |
+
if ncv is None:
|
340 |
+
ncv = choose_ncv(k)
|
341 |
+
ncv = min(ncv, n)
|
342 |
+
|
343 |
+
self.v = np.zeros((n, ncv), tp) # holds Ritz vectors
|
344 |
+
self.iparam = np.zeros(11, arpack_int)
|
345 |
+
|
346 |
+
# set solver mode and parameters
|
347 |
+
ishfts = 1
|
348 |
+
self.mode = mode
|
349 |
+
self.iparam[0] = ishfts
|
350 |
+
self.iparam[2] = maxiter
|
351 |
+
self.iparam[3] = 1
|
352 |
+
self.iparam[6] = mode
|
353 |
+
|
354 |
+
self.n = n
|
355 |
+
self.tol = tol
|
356 |
+
self.k = k
|
357 |
+
self.maxiter = maxiter
|
358 |
+
self.ncv = ncv
|
359 |
+
self.which = which
|
360 |
+
self.tp = tp
|
361 |
+
self.info = info
|
362 |
+
|
363 |
+
self.converged = False
|
364 |
+
self.ido = 0
|
365 |
+
|
366 |
+
def _raise_no_convergence(self):
|
367 |
+
msg = "No convergence (%d iterations, %d/%d eigenvectors converged)"
|
368 |
+
k_ok = self.iparam[4]
|
369 |
+
num_iter = self.iparam[2]
|
370 |
+
try:
|
371 |
+
ev, vec = self.extract(True)
|
372 |
+
except ArpackError as err:
|
373 |
+
msg = f"{msg} [{err}]"
|
374 |
+
ev = np.zeros((0,))
|
375 |
+
vec = np.zeros((self.n, 0))
|
376 |
+
k_ok = 0
|
377 |
+
raise ArpackNoConvergence(msg % (num_iter, k_ok, self.k), ev, vec)
|
378 |
+
|
379 |
+
|
380 |
+
class _SymmetricArpackParams(_ArpackParams):
|
381 |
+
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
|
382 |
+
Minv_matvec=None, sigma=None,
|
383 |
+
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
|
384 |
+
# The following modes are supported:
|
385 |
+
# mode = 1:
|
386 |
+
# Solve the standard eigenvalue problem:
|
387 |
+
# A*x = lambda*x :
|
388 |
+
# A - symmetric
|
389 |
+
# Arguments should be
|
390 |
+
# matvec = left multiplication by A
|
391 |
+
# M_matvec = None [not used]
|
392 |
+
# Minv_matvec = None [not used]
|
393 |
+
#
|
394 |
+
# mode = 2:
|
395 |
+
# Solve the general eigenvalue problem:
|
396 |
+
# A*x = lambda*M*x
|
397 |
+
# A - symmetric
|
398 |
+
# M - symmetric positive definite
|
399 |
+
# Arguments should be
|
400 |
+
# matvec = left multiplication by A
|
401 |
+
# M_matvec = left multiplication by M
|
402 |
+
# Minv_matvec = left multiplication by M^-1
|
403 |
+
#
|
404 |
+
# mode = 3:
|
405 |
+
# Solve the general eigenvalue problem in shift-invert mode:
|
406 |
+
# A*x = lambda*M*x
|
407 |
+
# A - symmetric
|
408 |
+
# M - symmetric positive semi-definite
|
409 |
+
# Arguments should be
|
410 |
+
# matvec = None [not used]
|
411 |
+
# M_matvec = left multiplication by M
|
412 |
+
# or None, if M is the identity
|
413 |
+
# Minv_matvec = left multiplication by [A-sigma*M]^-1
|
414 |
+
#
|
415 |
+
# mode = 4:
|
416 |
+
# Solve the general eigenvalue problem in Buckling mode:
|
417 |
+
# A*x = lambda*AG*x
|
418 |
+
# A - symmetric positive semi-definite
|
419 |
+
# AG - symmetric indefinite
|
420 |
+
# Arguments should be
|
421 |
+
# matvec = left multiplication by A
|
422 |
+
# M_matvec = None [not used]
|
423 |
+
# Minv_matvec = left multiplication by [A-sigma*AG]^-1
|
424 |
+
#
|
425 |
+
# mode = 5:
|
426 |
+
# Solve the general eigenvalue problem in Cayley-transformed mode:
|
427 |
+
# A*x = lambda*M*x
|
428 |
+
# A - symmetric
|
429 |
+
# M - symmetric positive semi-definite
|
430 |
+
# Arguments should be
|
431 |
+
# matvec = left multiplication by A
|
432 |
+
# M_matvec = left multiplication by M
|
433 |
+
# or None, if M is the identity
|
434 |
+
# Minv_matvec = left multiplication by [A-sigma*M]^-1
|
435 |
+
if mode == 1:
|
436 |
+
if matvec is None:
|
437 |
+
raise ValueError("matvec must be specified for mode=1")
|
438 |
+
if M_matvec is not None:
|
439 |
+
raise ValueError("M_matvec cannot be specified for mode=1")
|
440 |
+
if Minv_matvec is not None:
|
441 |
+
raise ValueError("Minv_matvec cannot be specified for mode=1")
|
442 |
+
|
443 |
+
self.OP = matvec
|
444 |
+
self.B = lambda x: x
|
445 |
+
self.bmat = 'I'
|
446 |
+
elif mode == 2:
|
447 |
+
if matvec is None:
|
448 |
+
raise ValueError("matvec must be specified for mode=2")
|
449 |
+
if M_matvec is None:
|
450 |
+
raise ValueError("M_matvec must be specified for mode=2")
|
451 |
+
if Minv_matvec is None:
|
452 |
+
raise ValueError("Minv_matvec must be specified for mode=2")
|
453 |
+
|
454 |
+
self.OP = lambda x: Minv_matvec(matvec(x))
|
455 |
+
self.OPa = Minv_matvec
|
456 |
+
self.OPb = matvec
|
457 |
+
self.B = M_matvec
|
458 |
+
self.bmat = 'G'
|
459 |
+
elif mode == 3:
|
460 |
+
if matvec is not None:
|
461 |
+
raise ValueError("matvec must not be specified for mode=3")
|
462 |
+
if Minv_matvec is None:
|
463 |
+
raise ValueError("Minv_matvec must be specified for mode=3")
|
464 |
+
|
465 |
+
if M_matvec is None:
|
466 |
+
self.OP = Minv_matvec
|
467 |
+
self.OPa = Minv_matvec
|
468 |
+
self.B = lambda x: x
|
469 |
+
self.bmat = 'I'
|
470 |
+
else:
|
471 |
+
self.OP = lambda x: Minv_matvec(M_matvec(x))
|
472 |
+
self.OPa = Minv_matvec
|
473 |
+
self.B = M_matvec
|
474 |
+
self.bmat = 'G'
|
475 |
+
elif mode == 4:
|
476 |
+
if matvec is None:
|
477 |
+
raise ValueError("matvec must be specified for mode=4")
|
478 |
+
if M_matvec is not None:
|
479 |
+
raise ValueError("M_matvec must not be specified for mode=4")
|
480 |
+
if Minv_matvec is None:
|
481 |
+
raise ValueError("Minv_matvec must be specified for mode=4")
|
482 |
+
self.OPa = Minv_matvec
|
483 |
+
self.OP = lambda x: self.OPa(matvec(x))
|
484 |
+
self.B = matvec
|
485 |
+
self.bmat = 'G'
|
486 |
+
elif mode == 5:
|
487 |
+
if matvec is None:
|
488 |
+
raise ValueError("matvec must be specified for mode=5")
|
489 |
+
if Minv_matvec is None:
|
490 |
+
raise ValueError("Minv_matvec must be specified for mode=5")
|
491 |
+
|
492 |
+
self.OPa = Minv_matvec
|
493 |
+
self.A_matvec = matvec
|
494 |
+
|
495 |
+
if M_matvec is None:
|
496 |
+
self.OP = lambda x: Minv_matvec(matvec(x) + sigma * x)
|
497 |
+
self.B = lambda x: x
|
498 |
+
self.bmat = 'I'
|
499 |
+
else:
|
500 |
+
self.OP = lambda x: Minv_matvec(matvec(x)
|
501 |
+
+ sigma * M_matvec(x))
|
502 |
+
self.B = M_matvec
|
503 |
+
self.bmat = 'G'
|
504 |
+
else:
|
505 |
+
raise ValueError("mode=%i not implemented" % mode)
|
506 |
+
|
507 |
+
if which not in _SEUPD_WHICH:
|
508 |
+
raise ValueError("which must be one of %s"
|
509 |
+
% ' '.join(_SEUPD_WHICH))
|
510 |
+
if k >= n:
|
511 |
+
raise ValueError("k must be less than ndim(A), k=%d" % k)
|
512 |
+
|
513 |
+
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
|
514 |
+
ncv, v0, maxiter, which, tol)
|
515 |
+
|
516 |
+
if self.ncv > n or self.ncv <= k:
|
517 |
+
raise ValueError("ncv must be k<ncv<=n, ncv=%s" % self.ncv)
|
518 |
+
|
519 |
+
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
|
520 |
+
self.workd = _aligned_zeros(3 * n, self.tp)
|
521 |
+
self.workl = _aligned_zeros(self.ncv * (self.ncv + 8), self.tp)
|
522 |
+
|
523 |
+
ltr = _type_conv[self.tp]
|
524 |
+
if ltr not in ["s", "d"]:
|
525 |
+
raise ValueError("Input matrix is not real-valued.")
|
526 |
+
|
527 |
+
self._arpack_solver = _arpack.__dict__[ltr + 'saupd']
|
528 |
+
self._arpack_extract = _arpack.__dict__[ltr + 'seupd']
|
529 |
+
|
530 |
+
self.iterate_infodict = _SAUPD_ERRORS[ltr]
|
531 |
+
self.extract_infodict = _SEUPD_ERRORS[ltr]
|
532 |
+
|
533 |
+
self.ipntr = np.zeros(11, arpack_int)
|
534 |
+
|
535 |
+
def iterate(self):
|
536 |
+
self.ido, self.tol, self.resid, self.v, self.iparam, self.ipntr, self.info = \
|
537 |
+
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
|
538 |
+
self.tol, self.resid, self.v, self.iparam,
|
539 |
+
self.ipntr, self.workd, self.workl, self.info)
|
540 |
+
|
541 |
+
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
|
542 |
+
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
|
543 |
+
if self.ido == -1:
|
544 |
+
# initialization
|
545 |
+
self.workd[yslice] = self.OP(self.workd[xslice])
|
546 |
+
elif self.ido == 1:
|
547 |
+
# compute y = Op*x
|
548 |
+
if self.mode == 1:
|
549 |
+
self.workd[yslice] = self.OP(self.workd[xslice])
|
550 |
+
elif self.mode == 2:
|
551 |
+
self.workd[xslice] = self.OPb(self.workd[xslice])
|
552 |
+
self.workd[yslice] = self.OPa(self.workd[xslice])
|
553 |
+
elif self.mode == 5:
|
554 |
+
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
|
555 |
+
Ax = self.A_matvec(self.workd[xslice])
|
556 |
+
self.workd[yslice] = self.OPa(Ax + (self.sigma *
|
557 |
+
self.workd[Bxslice]))
|
558 |
+
else:
|
559 |
+
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
|
560 |
+
self.workd[yslice] = self.OPa(self.workd[Bxslice])
|
561 |
+
elif self.ido == 2:
|
562 |
+
self.workd[yslice] = self.B(self.workd[xslice])
|
563 |
+
elif self.ido == 3:
|
564 |
+
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
|
565 |
+
else:
|
566 |
+
self.converged = True
|
567 |
+
|
568 |
+
if self.info == 0:
|
569 |
+
pass
|
570 |
+
elif self.info == 1:
|
571 |
+
self._raise_no_convergence()
|
572 |
+
else:
|
573 |
+
raise ArpackError(self.info, infodict=self.iterate_infodict)
|
574 |
+
|
575 |
+
def extract(self, return_eigenvectors):
|
576 |
+
rvec = return_eigenvectors
|
577 |
+
ierr = 0
|
578 |
+
howmny = 'A' # return all eigenvectors
|
579 |
+
sselect = np.zeros(self.ncv, 'int') # unused
|
580 |
+
d, z, ierr = self._arpack_extract(rvec, howmny, sselect, self.sigma,
|
581 |
+
self.bmat, self.which, self.k,
|
582 |
+
self.tol, self.resid, self.v,
|
583 |
+
self.iparam[0:7], self.ipntr,
|
584 |
+
self.workd[0:2 * self.n],
|
585 |
+
self.workl, ierr)
|
586 |
+
if ierr != 0:
|
587 |
+
raise ArpackError(ierr, infodict=self.extract_infodict)
|
588 |
+
k_ok = self.iparam[4]
|
589 |
+
d = d[:k_ok]
|
590 |
+
z = z[:, :k_ok]
|
591 |
+
|
592 |
+
if return_eigenvectors:
|
593 |
+
return d, z
|
594 |
+
else:
|
595 |
+
return d
|
596 |
+
|
597 |
+
|
598 |
+
class _UnsymmetricArpackParams(_ArpackParams):
|
599 |
+
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
|
600 |
+
Minv_matvec=None, sigma=None,
|
601 |
+
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
|
602 |
+
# The following modes are supported:
|
603 |
+
# mode = 1:
|
604 |
+
# Solve the standard eigenvalue problem:
|
605 |
+
# A*x = lambda*x
|
606 |
+
# A - square matrix
|
607 |
+
# Arguments should be
|
608 |
+
# matvec = left multiplication by A
|
609 |
+
# M_matvec = None [not used]
|
610 |
+
# Minv_matvec = None [not used]
|
611 |
+
#
|
612 |
+
# mode = 2:
|
613 |
+
# Solve the generalized eigenvalue problem:
|
614 |
+
# A*x = lambda*M*x
|
615 |
+
# A - square matrix
|
616 |
+
# M - symmetric, positive semi-definite
|
617 |
+
# Arguments should be
|
618 |
+
# matvec = left multiplication by A
|
619 |
+
# M_matvec = left multiplication by M
|
620 |
+
# Minv_matvec = left multiplication by M^-1
|
621 |
+
#
|
622 |
+
# mode = 3,4:
|
623 |
+
# Solve the general eigenvalue problem in shift-invert mode:
|
624 |
+
# A*x = lambda*M*x
|
625 |
+
# A - square matrix
|
626 |
+
# M - symmetric, positive semi-definite
|
627 |
+
# Arguments should be
|
628 |
+
# matvec = None [not used]
|
629 |
+
# M_matvec = left multiplication by M
|
630 |
+
# or None, if M is the identity
|
631 |
+
# Minv_matvec = left multiplication by [A-sigma*M]^-1
|
632 |
+
# if A is real and mode==3, use the real part of Minv_matvec
|
633 |
+
# if A is real and mode==4, use the imag part of Minv_matvec
|
634 |
+
# if A is complex and mode==3,
|
635 |
+
# use real and imag parts of Minv_matvec
|
636 |
+
if mode == 1:
|
637 |
+
if matvec is None:
|
638 |
+
raise ValueError("matvec must be specified for mode=1")
|
639 |
+
if M_matvec is not None:
|
640 |
+
raise ValueError("M_matvec cannot be specified for mode=1")
|
641 |
+
if Minv_matvec is not None:
|
642 |
+
raise ValueError("Minv_matvec cannot be specified for mode=1")
|
643 |
+
|
644 |
+
self.OP = matvec
|
645 |
+
self.B = lambda x: x
|
646 |
+
self.bmat = 'I'
|
647 |
+
elif mode == 2:
|
648 |
+
if matvec is None:
|
649 |
+
raise ValueError("matvec must be specified for mode=2")
|
650 |
+
if M_matvec is None:
|
651 |
+
raise ValueError("M_matvec must be specified for mode=2")
|
652 |
+
if Minv_matvec is None:
|
653 |
+
raise ValueError("Minv_matvec must be specified for mode=2")
|
654 |
+
|
655 |
+
self.OP = lambda x: Minv_matvec(matvec(x))
|
656 |
+
self.OPa = Minv_matvec
|
657 |
+
self.OPb = matvec
|
658 |
+
self.B = M_matvec
|
659 |
+
self.bmat = 'G'
|
660 |
+
elif mode in (3, 4):
|
661 |
+
if matvec is None:
|
662 |
+
raise ValueError("matvec must be specified "
|
663 |
+
"for mode in (3,4)")
|
664 |
+
if Minv_matvec is None:
|
665 |
+
raise ValueError("Minv_matvec must be specified "
|
666 |
+
"for mode in (3,4)")
|
667 |
+
|
668 |
+
self.matvec = matvec
|
669 |
+
if tp in 'DF': # complex type
|
670 |
+
if mode == 3:
|
671 |
+
self.OPa = Minv_matvec
|
672 |
+
else:
|
673 |
+
raise ValueError("mode=4 invalid for complex A")
|
674 |
+
else: # real type
|
675 |
+
if mode == 3:
|
676 |
+
self.OPa = lambda x: np.real(Minv_matvec(x))
|
677 |
+
else:
|
678 |
+
self.OPa = lambda x: np.imag(Minv_matvec(x))
|
679 |
+
if M_matvec is None:
|
680 |
+
self.B = lambda x: x
|
681 |
+
self.bmat = 'I'
|
682 |
+
self.OP = self.OPa
|
683 |
+
else:
|
684 |
+
self.B = M_matvec
|
685 |
+
self.bmat = 'G'
|
686 |
+
self.OP = lambda x: self.OPa(M_matvec(x))
|
687 |
+
else:
|
688 |
+
raise ValueError("mode=%i not implemented" % mode)
|
689 |
+
|
690 |
+
if which not in _NEUPD_WHICH:
|
691 |
+
raise ValueError("Parameter which must be one of %s"
|
692 |
+
% ' '.join(_NEUPD_WHICH))
|
693 |
+
if k >= n - 1:
|
694 |
+
raise ValueError("k must be less than ndim(A)-1, k=%d" % k)
|
695 |
+
|
696 |
+
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
|
697 |
+
ncv, v0, maxiter, which, tol)
|
698 |
+
|
699 |
+
if self.ncv > n or self.ncv <= k + 1:
|
700 |
+
raise ValueError("ncv must be k+1<ncv<=n, ncv=%s" % self.ncv)
|
701 |
+
|
702 |
+
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
|
703 |
+
self.workd = _aligned_zeros(3 * n, self.tp)
|
704 |
+
self.workl = _aligned_zeros(3 * self.ncv * (self.ncv + 2), self.tp)
|
705 |
+
|
706 |
+
ltr = _type_conv[self.tp]
|
707 |
+
self._arpack_solver = _arpack.__dict__[ltr + 'naupd']
|
708 |
+
self._arpack_extract = _arpack.__dict__[ltr + 'neupd']
|
709 |
+
|
710 |
+
self.iterate_infodict = _NAUPD_ERRORS[ltr]
|
711 |
+
self.extract_infodict = _NEUPD_ERRORS[ltr]
|
712 |
+
|
713 |
+
self.ipntr = np.zeros(14, arpack_int)
|
714 |
+
|
715 |
+
if self.tp in 'FD':
|
716 |
+
# Use _aligned_zeros to work around a f2py bug in Numpy 1.9.1
|
717 |
+
self.rwork = _aligned_zeros(self.ncv, self.tp.lower())
|
718 |
+
else:
|
719 |
+
self.rwork = None
|
720 |
+
|
721 |
+
def iterate(self):
|
722 |
+
if self.tp in 'fd':
|
723 |
+
results = self._arpack_solver(self.ido, self.bmat, self.which, self.k,
|
724 |
+
self.tol, self.resid, self.v, self.iparam,
|
725 |
+
self.ipntr, self.workd, self.workl, self.info)
|
726 |
+
self.ido, self.tol, self.resid, self.v, \
|
727 |
+
self.iparam, self.ipntr, self.info = results
|
728 |
+
|
729 |
+
else:
|
730 |
+
results = self._arpack_solver(self.ido, self.bmat, self.which, self.k,
|
731 |
+
self.tol, self.resid, self.v, self.iparam,
|
732 |
+
self.ipntr, self.workd, self.workl,
|
733 |
+
self.rwork, self.info)
|
734 |
+
self.ido, self.tol, self.resid, self.v, \
|
735 |
+
self.iparam, self.ipntr, self.info = results
|
736 |
+
|
737 |
+
|
738 |
+
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
|
739 |
+
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
|
740 |
+
if self.ido == -1:
|
741 |
+
# initialization
|
742 |
+
self.workd[yslice] = self.OP(self.workd[xslice])
|
743 |
+
elif self.ido == 1:
|
744 |
+
# compute y = Op*x
|
745 |
+
if self.mode in (1, 2):
|
746 |
+
self.workd[yslice] = self.OP(self.workd[xslice])
|
747 |
+
else:
|
748 |
+
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
|
749 |
+
self.workd[yslice] = self.OPa(self.workd[Bxslice])
|
750 |
+
elif self.ido == 2:
|
751 |
+
self.workd[yslice] = self.B(self.workd[xslice])
|
752 |
+
elif self.ido == 3:
|
753 |
+
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
|
754 |
+
else:
|
755 |
+
self.converged = True
|
756 |
+
|
757 |
+
if self.info == 0:
|
758 |
+
pass
|
759 |
+
elif self.info == 1:
|
760 |
+
self._raise_no_convergence()
|
761 |
+
else:
|
762 |
+
raise ArpackError(self.info, infodict=self.iterate_infodict)
|
763 |
+
|
764 |
+
def extract(self, return_eigenvectors):
|
765 |
+
k, n = self.k, self.n
|
766 |
+
|
767 |
+
ierr = 0
|
768 |
+
howmny = 'A' # return all eigenvectors
|
769 |
+
sselect = np.zeros(self.ncv, 'int') # unused
|
770 |
+
sigmar = np.real(self.sigma)
|
771 |
+
sigmai = np.imag(self.sigma)
|
772 |
+
workev = np.zeros(3 * self.ncv, self.tp)
|
773 |
+
|
774 |
+
if self.tp in 'fd':
|
775 |
+
dr = np.zeros(k + 1, self.tp)
|
776 |
+
di = np.zeros(k + 1, self.tp)
|
777 |
+
zr = np.zeros((n, k + 1), self.tp)
|
778 |
+
dr, di, zr, ierr = \
|
779 |
+
self._arpack_extract(return_eigenvectors,
|
780 |
+
howmny, sselect, sigmar, sigmai, workev,
|
781 |
+
self.bmat, self.which, k, self.tol, self.resid,
|
782 |
+
self.v, self.iparam, self.ipntr,
|
783 |
+
self.workd, self.workl, self.info)
|
784 |
+
if ierr != 0:
|
785 |
+
raise ArpackError(ierr, infodict=self.extract_infodict)
|
786 |
+
nreturned = self.iparam[4] # number of good eigenvalues returned
|
787 |
+
|
788 |
+
# Build complex eigenvalues from real and imaginary parts
|
789 |
+
d = dr + 1.0j * di
|
790 |
+
|
791 |
+
# Arrange the eigenvectors: complex eigenvectors are stored as
|
792 |
+
# real,imaginary in consecutive columns
|
793 |
+
z = zr.astype(self.tp.upper())
|
794 |
+
|
795 |
+
# The ARPACK nonsymmetric real and double interface (s,d)naupd
|
796 |
+
# return eigenvalues and eigenvectors in real (float,double)
|
797 |
+
# arrays.
|
798 |
+
|
799 |
+
# Efficiency: this should check that return_eigenvectors == True
|
800 |
+
# before going through this construction.
|
801 |
+
if sigmai == 0:
|
802 |
+
i = 0
|
803 |
+
while i <= k:
|
804 |
+
# check if complex
|
805 |
+
if abs(d[i].imag) != 0:
|
806 |
+
# this is a complex conjugate pair with eigenvalues
|
807 |
+
# in consecutive columns
|
808 |
+
if i < k:
|
809 |
+
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
|
810 |
+
z[:, i + 1] = z[:, i].conjugate()
|
811 |
+
i += 1
|
812 |
+
else:
|
813 |
+
#last eigenvalue is complex: the imaginary part of
|
814 |
+
# the eigenvector has not been returned
|
815 |
+
#this can only happen if nreturned > k, so we'll
|
816 |
+
# throw out this case.
|
817 |
+
nreturned -= 1
|
818 |
+
i += 1
|
819 |
+
|
820 |
+
else:
|
821 |
+
# real matrix, mode 3 or 4, imag(sigma) is nonzero:
|
822 |
+
# see remark 3 in <s,d>neupd.f
|
823 |
+
# Build complex eigenvalues from real and imaginary parts
|
824 |
+
i = 0
|
825 |
+
while i <= k:
|
826 |
+
if abs(d[i].imag) == 0:
|
827 |
+
d[i] = np.dot(zr[:, i], self.matvec(zr[:, i]))
|
828 |
+
else:
|
829 |
+
if i < k:
|
830 |
+
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
|
831 |
+
z[:, i + 1] = z[:, i].conjugate()
|
832 |
+
d[i] = ((np.dot(zr[:, i],
|
833 |
+
self.matvec(zr[:, i]))
|
834 |
+
+ np.dot(zr[:, i + 1],
|
835 |
+
self.matvec(zr[:, i + 1])))
|
836 |
+
+ 1j * (np.dot(zr[:, i],
|
837 |
+
self.matvec(zr[:, i + 1]))
|
838 |
+
- np.dot(zr[:, i + 1],
|
839 |
+
self.matvec(zr[:, i]))))
|
840 |
+
d[i + 1] = d[i].conj()
|
841 |
+
i += 1
|
842 |
+
else:
|
843 |
+
#last eigenvalue is complex: the imaginary part of
|
844 |
+
# the eigenvector has not been returned
|
845 |
+
#this can only happen if nreturned > k, so we'll
|
846 |
+
# throw out this case.
|
847 |
+
nreturned -= 1
|
848 |
+
i += 1
|
849 |
+
|
850 |
+
# Now we have k+1 possible eigenvalues and eigenvectors
|
851 |
+
# Return the ones specified by the keyword "which"
|
852 |
+
|
853 |
+
if nreturned <= k:
|
854 |
+
# we got less or equal as many eigenvalues we wanted
|
855 |
+
d = d[:nreturned]
|
856 |
+
z = z[:, :nreturned]
|
857 |
+
else:
|
858 |
+
# we got one extra eigenvalue (likely a cc pair, but which?)
|
859 |
+
if self.mode in (1, 2):
|
860 |
+
rd = d
|
861 |
+
elif self.mode in (3, 4):
|
862 |
+
rd = 1 / (d - self.sigma)
|
863 |
+
|
864 |
+
if self.which in ['LR', 'SR']:
|
865 |
+
ind = np.argsort(rd.real)
|
866 |
+
elif self.which in ['LI', 'SI']:
|
867 |
+
# for LI,SI ARPACK returns largest,smallest
|
868 |
+
# abs(imaginary) (complex pairs come together)
|
869 |
+
ind = np.argsort(abs(rd.imag))
|
870 |
+
else:
|
871 |
+
ind = np.argsort(abs(rd))
|
872 |
+
|
873 |
+
if self.which in ['LR', 'LM', 'LI']:
|
874 |
+
ind = ind[-k:][::-1]
|
875 |
+
elif self.which in ['SR', 'SM', 'SI']:
|
876 |
+
ind = ind[:k]
|
877 |
+
|
878 |
+
d = d[ind]
|
879 |
+
z = z[:, ind]
|
880 |
+
else:
|
881 |
+
# complex is so much simpler...
|
882 |
+
d, z, ierr =\
|
883 |
+
self._arpack_extract(return_eigenvectors,
|
884 |
+
howmny, sselect, self.sigma, workev,
|
885 |
+
self.bmat, self.which, k, self.tol, self.resid,
|
886 |
+
self.v, self.iparam, self.ipntr,
|
887 |
+
self.workd, self.workl, self.rwork, ierr)
|
888 |
+
|
889 |
+
if ierr != 0:
|
890 |
+
raise ArpackError(ierr, infodict=self.extract_infodict)
|
891 |
+
|
892 |
+
k_ok = self.iparam[4]
|
893 |
+
d = d[:k_ok]
|
894 |
+
z = z[:, :k_ok]
|
895 |
+
|
896 |
+
if return_eigenvectors:
|
897 |
+
return d, z
|
898 |
+
else:
|
899 |
+
return d
|
900 |
+
|
901 |
+
|
902 |
+
def _aslinearoperator_with_dtype(m):
|
903 |
+
m = aslinearoperator(m)
|
904 |
+
if not hasattr(m, 'dtype'):
|
905 |
+
x = np.zeros(m.shape[1])
|
906 |
+
m.dtype = (m * x).dtype
|
907 |
+
return m
|
908 |
+
|
909 |
+
|
910 |
+
class SpLuInv(LinearOperator):
|
911 |
+
"""
|
912 |
+
SpLuInv:
|
913 |
+
helper class to repeatedly solve M*x=b
|
914 |
+
using a sparse LU-decomposition of M
|
915 |
+
"""
|
916 |
+
|
917 |
+
def __init__(self, M):
|
918 |
+
self.M_lu = splu(M)
|
919 |
+
self.shape = M.shape
|
920 |
+
self.dtype = M.dtype
|
921 |
+
self.isreal = not np.issubdtype(self.dtype, np.complexfloating)
|
922 |
+
|
923 |
+
def _matvec(self, x):
|
924 |
+
# careful here: splu.solve will throw away imaginary
|
925 |
+
# part of x if M is real
|
926 |
+
x = np.asarray(x)
|
927 |
+
if self.isreal and np.issubdtype(x.dtype, np.complexfloating):
|
928 |
+
return (self.M_lu.solve(np.real(x).astype(self.dtype))
|
929 |
+
+ 1j * self.M_lu.solve(np.imag(x).astype(self.dtype)))
|
930 |
+
else:
|
931 |
+
return self.M_lu.solve(x.astype(self.dtype))
|
932 |
+
|
933 |
+
|
934 |
+
class LuInv(LinearOperator):
|
935 |
+
"""
|
936 |
+
LuInv:
|
937 |
+
helper class to repeatedly solve M*x=b
|
938 |
+
using an LU-decomposition of M
|
939 |
+
"""
|
940 |
+
|
941 |
+
def __init__(self, M):
|
942 |
+
self.M_lu = lu_factor(M)
|
943 |
+
self.shape = M.shape
|
944 |
+
self.dtype = M.dtype
|
945 |
+
|
946 |
+
def _matvec(self, x):
|
947 |
+
return lu_solve(self.M_lu, x)
|
948 |
+
|
949 |
+
|
950 |
+
def gmres_loose(A, b, tol):
|
951 |
+
"""
|
952 |
+
gmres with looser termination condition.
|
953 |
+
"""
|
954 |
+
b = np.asarray(b)
|
955 |
+
min_tol = 1000 * np.sqrt(b.size) * np.finfo(b.dtype).eps
|
956 |
+
return gmres(A, b, rtol=max(tol, min_tol), atol=0)
|
957 |
+
|
958 |
+
|
959 |
+
class IterInv(LinearOperator):
|
960 |
+
"""
|
961 |
+
IterInv:
|
962 |
+
helper class to repeatedly solve M*x=b
|
963 |
+
using an iterative method.
|
964 |
+
"""
|
965 |
+
|
966 |
+
def __init__(self, M, ifunc=gmres_loose, tol=0):
|
967 |
+
self.M = M
|
968 |
+
if hasattr(M, 'dtype'):
|
969 |
+
self.dtype = M.dtype
|
970 |
+
else:
|
971 |
+
x = np.zeros(M.shape[1])
|
972 |
+
self.dtype = (M * x).dtype
|
973 |
+
self.shape = M.shape
|
974 |
+
|
975 |
+
if tol <= 0:
|
976 |
+
# when tol=0, ARPACK uses machine tolerance as calculated
|
977 |
+
# by LAPACK's _LAMCH function. We should match this
|
978 |
+
tol = 2 * np.finfo(self.dtype).eps
|
979 |
+
self.ifunc = ifunc
|
980 |
+
self.tol = tol
|
981 |
+
|
982 |
+
def _matvec(self, x):
|
983 |
+
b, info = self.ifunc(self.M, x, tol=self.tol)
|
984 |
+
if info != 0:
|
985 |
+
raise ValueError("Error in inverting M: function "
|
986 |
+
"%s did not converge (info = %i)."
|
987 |
+
% (self.ifunc.__name__, info))
|
988 |
+
return b
|
989 |
+
|
990 |
+
|
991 |
+
class IterOpInv(LinearOperator):
|
992 |
+
"""
|
993 |
+
IterOpInv:
|
994 |
+
helper class to repeatedly solve [A-sigma*M]*x = b
|
995 |
+
using an iterative method
|
996 |
+
"""
|
997 |
+
|
998 |
+
def __init__(self, A, M, sigma, ifunc=gmres_loose, tol=0):
|
999 |
+
self.A = A
|
1000 |
+
self.M = M
|
1001 |
+
self.sigma = sigma
|
1002 |
+
|
1003 |
+
def mult_func(x):
|
1004 |
+
return A.matvec(x) - sigma * M.matvec(x)
|
1005 |
+
|
1006 |
+
def mult_func_M_None(x):
|
1007 |
+
return A.matvec(x) - sigma * x
|
1008 |
+
|
1009 |
+
x = np.zeros(A.shape[1])
|
1010 |
+
if M is None:
|
1011 |
+
dtype = mult_func_M_None(x).dtype
|
1012 |
+
self.OP = LinearOperator(self.A.shape,
|
1013 |
+
mult_func_M_None,
|
1014 |
+
dtype=dtype)
|
1015 |
+
else:
|
1016 |
+
dtype = mult_func(x).dtype
|
1017 |
+
self.OP = LinearOperator(self.A.shape,
|
1018 |
+
mult_func,
|
1019 |
+
dtype=dtype)
|
1020 |
+
self.shape = A.shape
|
1021 |
+
|
1022 |
+
if tol <= 0:
|
1023 |
+
# when tol=0, ARPACK uses machine tolerance as calculated
|
1024 |
+
# by LAPACK's _LAMCH function. We should match this
|
1025 |
+
tol = 2 * np.finfo(self.OP.dtype).eps
|
1026 |
+
self.ifunc = ifunc
|
1027 |
+
self.tol = tol
|
1028 |
+
|
1029 |
+
def _matvec(self, x):
|
1030 |
+
b, info = self.ifunc(self.OP, x, tol=self.tol)
|
1031 |
+
if info != 0:
|
1032 |
+
raise ValueError("Error in inverting [A-sigma*M]: function "
|
1033 |
+
"%s did not converge (info = %i)."
|
1034 |
+
% (self.ifunc.__name__, info))
|
1035 |
+
return b
|
1036 |
+
|
1037 |
+
@property
|
1038 |
+
def dtype(self):
|
1039 |
+
return self.OP.dtype
|
1040 |
+
|
1041 |
+
|
1042 |
+
def _fast_spmatrix_to_csc(A, hermitian=False):
|
1043 |
+
"""Convert sparse matrix to CSC (by transposing, if possible)"""
|
1044 |
+
if (A.format == "csr" and hermitian
|
1045 |
+
and not np.issubdtype(A.dtype, np.complexfloating)):
|
1046 |
+
return A.T
|
1047 |
+
elif is_pydata_spmatrix(A):
|
1048 |
+
# No need to convert
|
1049 |
+
return A
|
1050 |
+
else:
|
1051 |
+
return A.tocsc()
|
1052 |
+
|
1053 |
+
|
1054 |
+
def get_inv_matvec(M, hermitian=False, tol=0):
|
1055 |
+
if isdense(M):
|
1056 |
+
return LuInv(M).matvec
|
1057 |
+
elif issparse(M) or is_pydata_spmatrix(M):
|
1058 |
+
M = _fast_spmatrix_to_csc(M, hermitian=hermitian)
|
1059 |
+
return SpLuInv(M).matvec
|
1060 |
+
else:
|
1061 |
+
return IterInv(M, tol=tol).matvec
|
1062 |
+
|
1063 |
+
|
1064 |
+
def get_OPinv_matvec(A, M, sigma, hermitian=False, tol=0):
|
1065 |
+
if sigma == 0:
|
1066 |
+
return get_inv_matvec(A, hermitian=hermitian, tol=tol)
|
1067 |
+
|
1068 |
+
if M is None:
|
1069 |
+
#M is the identity matrix
|
1070 |
+
if isdense(A):
|
1071 |
+
if (np.issubdtype(A.dtype, np.complexfloating)
|
1072 |
+
or np.imag(sigma) == 0):
|
1073 |
+
A = np.copy(A)
|
1074 |
+
else:
|
1075 |
+
A = A + 0j
|
1076 |
+
A.flat[::A.shape[1] + 1] -= sigma
|
1077 |
+
return LuInv(A).matvec
|
1078 |
+
elif issparse(A) or is_pydata_spmatrix(A):
|
1079 |
+
A = A - sigma * eye(A.shape[0])
|
1080 |
+
A = _fast_spmatrix_to_csc(A, hermitian=hermitian)
|
1081 |
+
return SpLuInv(A).matvec
|
1082 |
+
else:
|
1083 |
+
return IterOpInv(_aslinearoperator_with_dtype(A),
|
1084 |
+
M, sigma, tol=tol).matvec
|
1085 |
+
else:
|
1086 |
+
if ((not isdense(A) and not issparse(A) and not is_pydata_spmatrix(A)) or
|
1087 |
+
(not isdense(M) and not issparse(M) and not is_pydata_spmatrix(A))):
|
1088 |
+
return IterOpInv(_aslinearoperator_with_dtype(A),
|
1089 |
+
_aslinearoperator_with_dtype(M),
|
1090 |
+
sigma, tol=tol).matvec
|
1091 |
+
elif isdense(A) or isdense(M):
|
1092 |
+
return LuInv(A - sigma * M).matvec
|
1093 |
+
else:
|
1094 |
+
OP = A - sigma * M
|
1095 |
+
OP = _fast_spmatrix_to_csc(OP, hermitian=hermitian)
|
1096 |
+
return SpLuInv(OP).matvec
|
1097 |
+
|
1098 |
+
|
1099 |
+
# ARPACK is not threadsafe or reentrant (SAVE variables), so we need a
|
1100 |
+
# lock and a re-entering check.
|
1101 |
+
_ARPACK_LOCK = ReentrancyLock("Nested calls to eigs/eighs not allowed: "
|
1102 |
+
"ARPACK is not re-entrant")
|
1103 |
+
|
1104 |
+
|
1105 |
+
def eigs(A, k=6, M=None, sigma=None, which='LM', v0=None,
|
1106 |
+
ncv=None, maxiter=None, tol=0, return_eigenvectors=True,
|
1107 |
+
Minv=None, OPinv=None, OPpart=None):
|
1108 |
+
"""
|
1109 |
+
Find k eigenvalues and eigenvectors of the square matrix A.
|
1110 |
+
|
1111 |
+
Solves ``A @ x[i] = w[i] * x[i]``, the standard eigenvalue problem
|
1112 |
+
for w[i] eigenvalues with corresponding eigenvectors x[i].
|
1113 |
+
|
1114 |
+
If M is specified, solves ``A @ x[i] = w[i] * M @ x[i]``, the
|
1115 |
+
generalized eigenvalue problem for w[i] eigenvalues
|
1116 |
+
with corresponding eigenvectors x[i]
|
1117 |
+
|
1118 |
+
Parameters
|
1119 |
+
----------
|
1120 |
+
A : ndarray, sparse matrix or LinearOperator
|
1121 |
+
An array, sparse matrix, or LinearOperator representing
|
1122 |
+
the operation ``A @ x``, where A is a real or complex square matrix.
|
1123 |
+
k : int, optional
|
1124 |
+
The number of eigenvalues and eigenvectors desired.
|
1125 |
+
`k` must be smaller than N-1. It is not possible to compute all
|
1126 |
+
eigenvectors of a matrix.
|
1127 |
+
M : ndarray, sparse matrix or LinearOperator, optional
|
1128 |
+
An array, sparse matrix, or LinearOperator representing
|
1129 |
+
the operation M@x for the generalized eigenvalue problem
|
1130 |
+
|
1131 |
+
A @ x = w * M @ x.
|
1132 |
+
|
1133 |
+
M must represent a real symmetric matrix if A is real, and must
|
1134 |
+
represent a complex Hermitian matrix if A is complex. For best
|
1135 |
+
results, the data type of M should be the same as that of A.
|
1136 |
+
Additionally:
|
1137 |
+
|
1138 |
+
If `sigma` is None, M is positive definite
|
1139 |
+
|
1140 |
+
If sigma is specified, M is positive semi-definite
|
1141 |
+
|
1142 |
+
If sigma is None, eigs requires an operator to compute the solution
|
1143 |
+
of the linear equation ``M @ x = b``. This is done internally via a
|
1144 |
+
(sparse) LU decomposition for an explicit matrix M, or via an
|
1145 |
+
iterative solver for a general linear operator. Alternatively,
|
1146 |
+
the user can supply the matrix or operator Minv, which gives
|
1147 |
+
``x = Minv @ b = M^-1 @ b``.
|
1148 |
+
sigma : real or complex, optional
|
1149 |
+
Find eigenvalues near sigma using shift-invert mode. This requires
|
1150 |
+
an operator to compute the solution of the linear system
|
1151 |
+
``[A - sigma * M] @ x = b``, where M is the identity matrix if
|
1152 |
+
unspecified. This is computed internally via a (sparse) LU
|
1153 |
+
decomposition for explicit matrices A & M, or via an iterative
|
1154 |
+
solver if either A or M is a general linear operator.
|
1155 |
+
Alternatively, the user can supply the matrix or operator OPinv,
|
1156 |
+
which gives ``x = OPinv @ b = [A - sigma * M]^-1 @ b``.
|
1157 |
+
For a real matrix A, shift-invert can either be done in imaginary
|
1158 |
+
mode or real mode, specified by the parameter OPpart ('r' or 'i').
|
1159 |
+
Note that when sigma is specified, the keyword 'which' (below)
|
1160 |
+
refers to the shifted eigenvalues ``w'[i]`` where:
|
1161 |
+
|
1162 |
+
If A is real and OPpart == 'r' (default),
|
1163 |
+
``w'[i] = 1/2 * [1/(w[i]-sigma) + 1/(w[i]-conj(sigma))]``.
|
1164 |
+
|
1165 |
+
If A is real and OPpart == 'i',
|
1166 |
+
``w'[i] = 1/2i * [1/(w[i]-sigma) - 1/(w[i]-conj(sigma))]``.
|
1167 |
+
|
1168 |
+
If A is complex, ``w'[i] = 1/(w[i]-sigma)``.
|
1169 |
+
|
1170 |
+
v0 : ndarray, optional
|
1171 |
+
Starting vector for iteration.
|
1172 |
+
Default: random
|
1173 |
+
ncv : int, optional
|
1174 |
+
The number of Lanczos vectors generated
|
1175 |
+
`ncv` must be greater than `k`; it is recommended that ``ncv > 2*k``.
|
1176 |
+
Default: ``min(n, max(2*k + 1, 20))``
|
1177 |
+
which : str, ['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI'], optional
|
1178 |
+
Which `k` eigenvectors and eigenvalues to find:
|
1179 |
+
|
1180 |
+
'LM' : largest magnitude
|
1181 |
+
|
1182 |
+
'SM' : smallest magnitude
|
1183 |
+
|
1184 |
+
'LR' : largest real part
|
1185 |
+
|
1186 |
+
'SR' : smallest real part
|
1187 |
+
|
1188 |
+
'LI' : largest imaginary part
|
1189 |
+
|
1190 |
+
'SI' : smallest imaginary part
|
1191 |
+
|
1192 |
+
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
|
1193 |
+
(see discussion in 'sigma', above). ARPACK is generally better
|
1194 |
+
at finding large values than small values. If small eigenvalues are
|
1195 |
+
desired, consider using shift-invert mode for better performance.
|
1196 |
+
maxiter : int, optional
|
1197 |
+
Maximum number of Arnoldi update iterations allowed
|
1198 |
+
Default: ``n*10``
|
1199 |
+
tol : float, optional
|
1200 |
+
Relative accuracy for eigenvalues (stopping criterion)
|
1201 |
+
The default value of 0 implies machine precision.
|
1202 |
+
return_eigenvectors : bool, optional
|
1203 |
+
Return eigenvectors (True) in addition to eigenvalues
|
1204 |
+
Minv : ndarray, sparse matrix or LinearOperator, optional
|
1205 |
+
See notes in M, above.
|
1206 |
+
OPinv : ndarray, sparse matrix or LinearOperator, optional
|
1207 |
+
See notes in sigma, above.
|
1208 |
+
OPpart : {'r' or 'i'}, optional
|
1209 |
+
See notes in sigma, above
|
1210 |
+
|
1211 |
+
Returns
|
1212 |
+
-------
|
1213 |
+
w : ndarray
|
1214 |
+
Array of k eigenvalues.
|
1215 |
+
v : ndarray
|
1216 |
+
An array of `k` eigenvectors.
|
1217 |
+
``v[:, i]`` is the eigenvector corresponding to the eigenvalue w[i].
|
1218 |
+
|
1219 |
+
Raises
|
1220 |
+
------
|
1221 |
+
ArpackNoConvergence
|
1222 |
+
When the requested convergence is not obtained.
|
1223 |
+
The currently converged eigenvalues and eigenvectors can be found
|
1224 |
+
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
|
1225 |
+
object.
|
1226 |
+
|
1227 |
+
See Also
|
1228 |
+
--------
|
1229 |
+
eigsh : eigenvalues and eigenvectors for symmetric matrix A
|
1230 |
+
svds : singular value decomposition for a matrix A
|
1231 |
+
|
1232 |
+
Notes
|
1233 |
+
-----
|
1234 |
+
This function is a wrapper to the ARPACK [1]_ SNEUPD, DNEUPD, CNEUPD,
|
1235 |
+
ZNEUPD, functions which use the Implicitly Restarted Arnoldi Method to
|
1236 |
+
find the eigenvalues and eigenvectors [2]_.
|
1237 |
+
|
1238 |
+
References
|
1239 |
+
----------
|
1240 |
+
.. [1] ARPACK Software, https://github.com/opencollab/arpack-ng
|
1241 |
+
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
|
1242 |
+
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
|
1243 |
+
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
|
1244 |
+
|
1245 |
+
Examples
|
1246 |
+
--------
|
1247 |
+
Find 6 eigenvectors of the identity matrix:
|
1248 |
+
|
1249 |
+
>>> import numpy as np
|
1250 |
+
>>> from scipy.sparse.linalg import eigs
|
1251 |
+
>>> id = np.eye(13)
|
1252 |
+
>>> vals, vecs = eigs(id, k=6)
|
1253 |
+
>>> vals
|
1254 |
+
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
|
1255 |
+
>>> vecs.shape
|
1256 |
+
(13, 6)
|
1257 |
+
|
1258 |
+
"""
|
1259 |
+
if A.shape[0] != A.shape[1]:
|
1260 |
+
raise ValueError(f'expected square matrix (shape={A.shape})')
|
1261 |
+
if M is not None:
|
1262 |
+
if M.shape != A.shape:
|
1263 |
+
raise ValueError(f'wrong M dimensions {M.shape}, should be {A.shape}')
|
1264 |
+
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
|
1265 |
+
warnings.warn('M does not have the same type precision as A. '
|
1266 |
+
'This may adversely affect ARPACK convergence',
|
1267 |
+
stacklevel=2)
|
1268 |
+
|
1269 |
+
n = A.shape[0]
|
1270 |
+
|
1271 |
+
if k <= 0:
|
1272 |
+
raise ValueError("k=%d must be greater than 0." % k)
|
1273 |
+
|
1274 |
+
if k >= n - 1:
|
1275 |
+
warnings.warn("k >= N - 1 for N * N square matrix. "
|
1276 |
+
"Attempting to use scipy.linalg.eig instead.",
|
1277 |
+
RuntimeWarning, stacklevel=2)
|
1278 |
+
|
1279 |
+
if issparse(A):
|
1280 |
+
raise TypeError("Cannot use scipy.linalg.eig for sparse A with "
|
1281 |
+
"k >= N - 1. Use scipy.linalg.eig(A.toarray()) or"
|
1282 |
+
" reduce k.")
|
1283 |
+
if isinstance(A, LinearOperator):
|
1284 |
+
raise TypeError("Cannot use scipy.linalg.eig for LinearOperator "
|
1285 |
+
"A with k >= N - 1.")
|
1286 |
+
if isinstance(M, LinearOperator):
|
1287 |
+
raise TypeError("Cannot use scipy.linalg.eig for LinearOperator "
|
1288 |
+
"M with k >= N - 1.")
|
1289 |
+
|
1290 |
+
return eig(A, b=M, right=return_eigenvectors)
|
1291 |
+
|
1292 |
+
if sigma is None:
|
1293 |
+
matvec = _aslinearoperator_with_dtype(A).matvec
|
1294 |
+
|
1295 |
+
if OPinv is not None:
|
1296 |
+
raise ValueError("OPinv should not be specified "
|
1297 |
+
"with sigma = None.")
|
1298 |
+
if OPpart is not None:
|
1299 |
+
raise ValueError("OPpart should not be specified with "
|
1300 |
+
"sigma = None or complex A")
|
1301 |
+
|
1302 |
+
if M is None:
|
1303 |
+
#standard eigenvalue problem
|
1304 |
+
mode = 1
|
1305 |
+
M_matvec = None
|
1306 |
+
Minv_matvec = None
|
1307 |
+
if Minv is not None:
|
1308 |
+
raise ValueError("Minv should not be "
|
1309 |
+
"specified with M = None.")
|
1310 |
+
else:
|
1311 |
+
#general eigenvalue problem
|
1312 |
+
mode = 2
|
1313 |
+
if Minv is None:
|
1314 |
+
Minv_matvec = get_inv_matvec(M, hermitian=True, tol=tol)
|
1315 |
+
else:
|
1316 |
+
Minv = _aslinearoperator_with_dtype(Minv)
|
1317 |
+
Minv_matvec = Minv.matvec
|
1318 |
+
M_matvec = _aslinearoperator_with_dtype(M).matvec
|
1319 |
+
else:
|
1320 |
+
#sigma is not None: shift-invert mode
|
1321 |
+
if np.issubdtype(A.dtype, np.complexfloating):
|
1322 |
+
if OPpart is not None:
|
1323 |
+
raise ValueError("OPpart should not be specified "
|
1324 |
+
"with sigma=None or complex A")
|
1325 |
+
mode = 3
|
1326 |
+
elif OPpart is None or OPpart.lower() == 'r':
|
1327 |
+
mode = 3
|
1328 |
+
elif OPpart.lower() == 'i':
|
1329 |
+
if np.imag(sigma) == 0:
|
1330 |
+
raise ValueError("OPpart cannot be 'i' if sigma is real")
|
1331 |
+
mode = 4
|
1332 |
+
else:
|
1333 |
+
raise ValueError("OPpart must be one of ('r','i')")
|
1334 |
+
|
1335 |
+
matvec = _aslinearoperator_with_dtype(A).matvec
|
1336 |
+
if Minv is not None:
|
1337 |
+
raise ValueError("Minv should not be specified when sigma is")
|
1338 |
+
if OPinv is None:
|
1339 |
+
Minv_matvec = get_OPinv_matvec(A, M, sigma,
|
1340 |
+
hermitian=False, tol=tol)
|
1341 |
+
else:
|
1342 |
+
OPinv = _aslinearoperator_with_dtype(OPinv)
|
1343 |
+
Minv_matvec = OPinv.matvec
|
1344 |
+
if M is None:
|
1345 |
+
M_matvec = None
|
1346 |
+
else:
|
1347 |
+
M_matvec = _aslinearoperator_with_dtype(M).matvec
|
1348 |
+
|
1349 |
+
params = _UnsymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
|
1350 |
+
M_matvec, Minv_matvec, sigma,
|
1351 |
+
ncv, v0, maxiter, which, tol)
|
1352 |
+
|
1353 |
+
with _ARPACK_LOCK:
|
1354 |
+
while not params.converged:
|
1355 |
+
params.iterate()
|
1356 |
+
|
1357 |
+
return params.extract(return_eigenvectors)
|
1358 |
+
|
1359 |
+
|
1360 |
+
def eigsh(A, k=6, M=None, sigma=None, which='LM', v0=None,
|
1361 |
+
ncv=None, maxiter=None, tol=0, return_eigenvectors=True,
|
1362 |
+
Minv=None, OPinv=None, mode='normal'):
|
1363 |
+
"""
|
1364 |
+
Find k eigenvalues and eigenvectors of the real symmetric square matrix
|
1365 |
+
or complex Hermitian matrix A.
|
1366 |
+
|
1367 |
+
Solves ``A @ x[i] = w[i] * x[i]``, the standard eigenvalue problem for
|
1368 |
+
w[i] eigenvalues with corresponding eigenvectors x[i].
|
1369 |
+
|
1370 |
+
If M is specified, solves ``A @ x[i] = w[i] * M @ x[i]``, the
|
1371 |
+
generalized eigenvalue problem for w[i] eigenvalues
|
1372 |
+
with corresponding eigenvectors x[i].
|
1373 |
+
|
1374 |
+
Note that there is no specialized routine for the case when A is a complex
|
1375 |
+
Hermitian matrix. In this case, ``eigsh()`` will call ``eigs()`` and return the
|
1376 |
+
real parts of the eigenvalues thus obtained.
|
1377 |
+
|
1378 |
+
Parameters
|
1379 |
+
----------
|
1380 |
+
A : ndarray, sparse matrix or LinearOperator
|
1381 |
+
A square operator representing the operation ``A @ x``, where ``A`` is
|
1382 |
+
real symmetric or complex Hermitian. For buckling mode (see below)
|
1383 |
+
``A`` must additionally be positive-definite.
|
1384 |
+
k : int, optional
|
1385 |
+
The number of eigenvalues and eigenvectors desired.
|
1386 |
+
`k` must be smaller than N. It is not possible to compute all
|
1387 |
+
eigenvectors of a matrix.
|
1388 |
+
|
1389 |
+
Returns
|
1390 |
+
-------
|
1391 |
+
w : array
|
1392 |
+
Array of k eigenvalues.
|
1393 |
+
v : array
|
1394 |
+
An array representing the `k` eigenvectors. The column ``v[:, i]`` is
|
1395 |
+
the eigenvector corresponding to the eigenvalue ``w[i]``.
|
1396 |
+
|
1397 |
+
Other Parameters
|
1398 |
+
----------------
|
1399 |
+
M : An N x N matrix, array, sparse matrix, or linear operator representing
|
1400 |
+
the operation ``M @ x`` for the generalized eigenvalue problem
|
1401 |
+
|
1402 |
+
A @ x = w * M @ x.
|
1403 |
+
|
1404 |
+
M must represent a real symmetric matrix if A is real, and must
|
1405 |
+
represent a complex Hermitian matrix if A is complex. For best
|
1406 |
+
results, the data type of M should be the same as that of A.
|
1407 |
+
Additionally:
|
1408 |
+
|
1409 |
+
If sigma is None, M is symmetric positive definite.
|
1410 |
+
|
1411 |
+
If sigma is specified, M is symmetric positive semi-definite.
|
1412 |
+
|
1413 |
+
In buckling mode, M is symmetric indefinite.
|
1414 |
+
|
1415 |
+
If sigma is None, eigsh requires an operator to compute the solution
|
1416 |
+
of the linear equation ``M @ x = b``. This is done internally via a
|
1417 |
+
(sparse) LU decomposition for an explicit matrix M, or via an
|
1418 |
+
iterative solver for a general linear operator. Alternatively,
|
1419 |
+
the user can supply the matrix or operator Minv, which gives
|
1420 |
+
``x = Minv @ b = M^-1 @ b``.
|
1421 |
+
sigma : real
|
1422 |
+
Find eigenvalues near sigma using shift-invert mode. This requires
|
1423 |
+
an operator to compute the solution of the linear system
|
1424 |
+
``[A - sigma * M] x = b``, where M is the identity matrix if
|
1425 |
+
unspecified. This is computed internally via a (sparse) LU
|
1426 |
+
decomposition for explicit matrices A & M, or via an iterative
|
1427 |
+
solver if either A or M is a general linear operator.
|
1428 |
+
Alternatively, the user can supply the matrix or operator OPinv,
|
1429 |
+
which gives ``x = OPinv @ b = [A - sigma * M]^-1 @ b``.
|
1430 |
+
Note that when sigma is specified, the keyword 'which' refers to
|
1431 |
+
the shifted eigenvalues ``w'[i]`` where:
|
1432 |
+
|
1433 |
+
if mode == 'normal', ``w'[i] = 1 / (w[i] - sigma)``.
|
1434 |
+
|
1435 |
+
if mode == 'cayley', ``w'[i] = (w[i] + sigma) / (w[i] - sigma)``.
|
1436 |
+
|
1437 |
+
if mode == 'buckling', ``w'[i] = w[i] / (w[i] - sigma)``.
|
1438 |
+
|
1439 |
+
(see further discussion in 'mode' below)
|
1440 |
+
v0 : ndarray, optional
|
1441 |
+
Starting vector for iteration.
|
1442 |
+
Default: random
|
1443 |
+
ncv : int, optional
|
1444 |
+
The number of Lanczos vectors generated ncv must be greater than k and
|
1445 |
+
smaller than n; it is recommended that ``ncv > 2*k``.
|
1446 |
+
Default: ``min(n, max(2*k + 1, 20))``
|
1447 |
+
which : str ['LM' | 'SM' | 'LA' | 'SA' | 'BE']
|
1448 |
+
If A is a complex Hermitian matrix, 'BE' is invalid.
|
1449 |
+
Which `k` eigenvectors and eigenvalues to find:
|
1450 |
+
|
1451 |
+
'LM' : Largest (in magnitude) eigenvalues.
|
1452 |
+
|
1453 |
+
'SM' : Smallest (in magnitude) eigenvalues.
|
1454 |
+
|
1455 |
+
'LA' : Largest (algebraic) eigenvalues.
|
1456 |
+
|
1457 |
+
'SA' : Smallest (algebraic) eigenvalues.
|
1458 |
+
|
1459 |
+
'BE' : Half (k/2) from each end of the spectrum.
|
1460 |
+
|
1461 |
+
When k is odd, return one more (k/2+1) from the high end.
|
1462 |
+
When sigma != None, 'which' refers to the shifted eigenvalues ``w'[i]``
|
1463 |
+
(see discussion in 'sigma', above). ARPACK is generally better
|
1464 |
+
at finding large values than small values. If small eigenvalues are
|
1465 |
+
desired, consider using shift-invert mode for better performance.
|
1466 |
+
maxiter : int, optional
|
1467 |
+
Maximum number of Arnoldi update iterations allowed.
|
1468 |
+
Default: ``n*10``
|
1469 |
+
tol : float
|
1470 |
+
Relative accuracy for eigenvalues (stopping criterion).
|
1471 |
+
The default value of 0 implies machine precision.
|
1472 |
+
Minv : N x N matrix, array, sparse matrix, or LinearOperator
|
1473 |
+
See notes in M, above.
|
1474 |
+
OPinv : N x N matrix, array, sparse matrix, or LinearOperator
|
1475 |
+
See notes in sigma, above.
|
1476 |
+
return_eigenvectors : bool
|
1477 |
+
Return eigenvectors (True) in addition to eigenvalues.
|
1478 |
+
This value determines the order in which eigenvalues are sorted.
|
1479 |
+
The sort order is also dependent on the `which` variable.
|
1480 |
+
|
1481 |
+
For which = 'LM' or 'SA':
|
1482 |
+
If `return_eigenvectors` is True, eigenvalues are sorted by
|
1483 |
+
algebraic value.
|
1484 |
+
|
1485 |
+
If `return_eigenvectors` is False, eigenvalues are sorted by
|
1486 |
+
absolute value.
|
1487 |
+
|
1488 |
+
For which = 'BE' or 'LA':
|
1489 |
+
eigenvalues are always sorted by algebraic value.
|
1490 |
+
|
1491 |
+
For which = 'SM':
|
1492 |
+
If `return_eigenvectors` is True, eigenvalues are sorted by
|
1493 |
+
algebraic value.
|
1494 |
+
|
1495 |
+
If `return_eigenvectors` is False, eigenvalues are sorted by
|
1496 |
+
decreasing absolute value.
|
1497 |
+
|
1498 |
+
mode : string ['normal' | 'buckling' | 'cayley']
|
1499 |
+
Specify strategy to use for shift-invert mode. This argument applies
|
1500 |
+
only for real-valued A and sigma != None. For shift-invert mode,
|
1501 |
+
ARPACK internally solves the eigenvalue problem
|
1502 |
+
``OP @ x'[i] = w'[i] * B @ x'[i]``
|
1503 |
+
and transforms the resulting Ritz vectors x'[i] and Ritz values w'[i]
|
1504 |
+
into the desired eigenvectors and eigenvalues of the problem
|
1505 |
+
``A @ x[i] = w[i] * M @ x[i]``.
|
1506 |
+
The modes are as follows:
|
1507 |
+
|
1508 |
+
'normal' :
|
1509 |
+
OP = [A - sigma * M]^-1 @ M,
|
1510 |
+
B = M,
|
1511 |
+
w'[i] = 1 / (w[i] - sigma)
|
1512 |
+
|
1513 |
+
'buckling' :
|
1514 |
+
OP = [A - sigma * M]^-1 @ A,
|
1515 |
+
B = A,
|
1516 |
+
w'[i] = w[i] / (w[i] - sigma)
|
1517 |
+
|
1518 |
+
'cayley' :
|
1519 |
+
OP = [A - sigma * M]^-1 @ [A + sigma * M],
|
1520 |
+
B = M,
|
1521 |
+
w'[i] = (w[i] + sigma) / (w[i] - sigma)
|
1522 |
+
|
1523 |
+
The choice of mode will affect which eigenvalues are selected by
|
1524 |
+
the keyword 'which', and can also impact the stability of
|
1525 |
+
convergence (see [2] for a discussion).
|
1526 |
+
|
1527 |
+
Raises
|
1528 |
+
------
|
1529 |
+
ArpackNoConvergence
|
1530 |
+
When the requested convergence is not obtained.
|
1531 |
+
|
1532 |
+
The currently converged eigenvalues and eigenvectors can be found
|
1533 |
+
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
|
1534 |
+
object.
|
1535 |
+
|
1536 |
+
See Also
|
1537 |
+
--------
|
1538 |
+
eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A
|
1539 |
+
svds : singular value decomposition for a matrix A
|
1540 |
+
|
1541 |
+
Notes
|
1542 |
+
-----
|
1543 |
+
This function is a wrapper to the ARPACK [1]_ SSEUPD and DSEUPD
|
1544 |
+
functions which use the Implicitly Restarted Lanczos Method to
|
1545 |
+
find the eigenvalues and eigenvectors [2]_.
|
1546 |
+
|
1547 |
+
References
|
1548 |
+
----------
|
1549 |
+
.. [1] ARPACK Software, https://github.com/opencollab/arpack-ng
|
1550 |
+
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
|
1551 |
+
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
|
1552 |
+
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
|
1553 |
+
|
1554 |
+
Examples
|
1555 |
+
--------
|
1556 |
+
>>> import numpy as np
|
1557 |
+
>>> from scipy.sparse.linalg import eigsh
|
1558 |
+
>>> identity = np.eye(13)
|
1559 |
+
>>> eigenvalues, eigenvectors = eigsh(identity, k=6)
|
1560 |
+
>>> eigenvalues
|
1561 |
+
array([1., 1., 1., 1., 1., 1.])
|
1562 |
+
>>> eigenvectors.shape
|
1563 |
+
(13, 6)
|
1564 |
+
|
1565 |
+
"""
|
1566 |
+
# complex Hermitian matrices should be solved with eigs
|
1567 |
+
if np.issubdtype(A.dtype, np.complexfloating):
|
1568 |
+
if mode != 'normal':
|
1569 |
+
raise ValueError("mode=%s cannot be used with "
|
1570 |
+
"complex matrix A" % mode)
|
1571 |
+
if which == 'BE':
|
1572 |
+
raise ValueError("which='BE' cannot be used with complex matrix A")
|
1573 |
+
elif which == 'LA':
|
1574 |
+
which = 'LR'
|
1575 |
+
elif which == 'SA':
|
1576 |
+
which = 'SR'
|
1577 |
+
ret = eigs(A, k, M=M, sigma=sigma, which=which, v0=v0,
|
1578 |
+
ncv=ncv, maxiter=maxiter, tol=tol,
|
1579 |
+
return_eigenvectors=return_eigenvectors, Minv=Minv,
|
1580 |
+
OPinv=OPinv)
|
1581 |
+
|
1582 |
+
if return_eigenvectors:
|
1583 |
+
return ret[0].real, ret[1]
|
1584 |
+
else:
|
1585 |
+
return ret.real
|
1586 |
+
|
1587 |
+
if A.shape[0] != A.shape[1]:
|
1588 |
+
raise ValueError(f'expected square matrix (shape={A.shape})')
|
1589 |
+
if M is not None:
|
1590 |
+
if M.shape != A.shape:
|
1591 |
+
raise ValueError(f'wrong M dimensions {M.shape}, should be {A.shape}')
|
1592 |
+
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
|
1593 |
+
warnings.warn('M does not have the same type precision as A. '
|
1594 |
+
'This may adversely affect ARPACK convergence',
|
1595 |
+
stacklevel=2)
|
1596 |
+
|
1597 |
+
n = A.shape[0]
|
1598 |
+
|
1599 |
+
if k <= 0:
|
1600 |
+
raise ValueError("k must be greater than 0.")
|
1601 |
+
|
1602 |
+
if k >= n:
|
1603 |
+
warnings.warn("k >= N for N * N square matrix. "
|
1604 |
+
"Attempting to use scipy.linalg.eigh instead.",
|
1605 |
+
RuntimeWarning, stacklevel=2)
|
1606 |
+
|
1607 |
+
if issparse(A):
|
1608 |
+
raise TypeError("Cannot use scipy.linalg.eigh for sparse A with "
|
1609 |
+
"k >= N. Use scipy.linalg.eigh(A.toarray()) or"
|
1610 |
+
" reduce k.")
|
1611 |
+
if isinstance(A, LinearOperator):
|
1612 |
+
raise TypeError("Cannot use scipy.linalg.eigh for LinearOperator "
|
1613 |
+
"A with k >= N.")
|
1614 |
+
if isinstance(M, LinearOperator):
|
1615 |
+
raise TypeError("Cannot use scipy.linalg.eigh for LinearOperator "
|
1616 |
+
"M with k >= N.")
|
1617 |
+
|
1618 |
+
return eigh(A, b=M, eigvals_only=not return_eigenvectors)
|
1619 |
+
|
1620 |
+
if sigma is None:
|
1621 |
+
A = _aslinearoperator_with_dtype(A)
|
1622 |
+
matvec = A.matvec
|
1623 |
+
|
1624 |
+
if OPinv is not None:
|
1625 |
+
raise ValueError("OPinv should not be specified "
|
1626 |
+
"with sigma = None.")
|
1627 |
+
if M is None:
|
1628 |
+
#standard eigenvalue problem
|
1629 |
+
mode = 1
|
1630 |
+
M_matvec = None
|
1631 |
+
Minv_matvec = None
|
1632 |
+
if Minv is not None:
|
1633 |
+
raise ValueError("Minv should not be "
|
1634 |
+
"specified with M = None.")
|
1635 |
+
else:
|
1636 |
+
#general eigenvalue problem
|
1637 |
+
mode = 2
|
1638 |
+
if Minv is None:
|
1639 |
+
Minv_matvec = get_inv_matvec(M, hermitian=True, tol=tol)
|
1640 |
+
else:
|
1641 |
+
Minv = _aslinearoperator_with_dtype(Minv)
|
1642 |
+
Minv_matvec = Minv.matvec
|
1643 |
+
M_matvec = _aslinearoperator_with_dtype(M).matvec
|
1644 |
+
else:
|
1645 |
+
# sigma is not None: shift-invert mode
|
1646 |
+
if Minv is not None:
|
1647 |
+
raise ValueError("Minv should not be specified when sigma is")
|
1648 |
+
|
1649 |
+
# normal mode
|
1650 |
+
if mode == 'normal':
|
1651 |
+
mode = 3
|
1652 |
+
matvec = None
|
1653 |
+
if OPinv is None:
|
1654 |
+
Minv_matvec = get_OPinv_matvec(A, M, sigma,
|
1655 |
+
hermitian=True, tol=tol)
|
1656 |
+
else:
|
1657 |
+
OPinv = _aslinearoperator_with_dtype(OPinv)
|
1658 |
+
Minv_matvec = OPinv.matvec
|
1659 |
+
if M is None:
|
1660 |
+
M_matvec = None
|
1661 |
+
else:
|
1662 |
+
M = _aslinearoperator_with_dtype(M)
|
1663 |
+
M_matvec = M.matvec
|
1664 |
+
|
1665 |
+
# buckling mode
|
1666 |
+
elif mode == 'buckling':
|
1667 |
+
mode = 4
|
1668 |
+
if OPinv is None:
|
1669 |
+
Minv_matvec = get_OPinv_matvec(A, M, sigma,
|
1670 |
+
hermitian=True, tol=tol)
|
1671 |
+
else:
|
1672 |
+
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
|
1673 |
+
matvec = _aslinearoperator_with_dtype(A).matvec
|
1674 |
+
M_matvec = None
|
1675 |
+
|
1676 |
+
# cayley-transform mode
|
1677 |
+
elif mode == 'cayley':
|
1678 |
+
mode = 5
|
1679 |
+
matvec = _aslinearoperator_with_dtype(A).matvec
|
1680 |
+
if OPinv is None:
|
1681 |
+
Minv_matvec = get_OPinv_matvec(A, M, sigma,
|
1682 |
+
hermitian=True, tol=tol)
|
1683 |
+
else:
|
1684 |
+
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
|
1685 |
+
if M is None:
|
1686 |
+
M_matvec = None
|
1687 |
+
else:
|
1688 |
+
M_matvec = _aslinearoperator_with_dtype(M).matvec
|
1689 |
+
|
1690 |
+
# unrecognized mode
|
1691 |
+
else:
|
1692 |
+
raise ValueError("unrecognized mode '%s'" % mode)
|
1693 |
+
|
1694 |
+
params = _SymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
|
1695 |
+
M_matvec, Minv_matvec, sigma,
|
1696 |
+
ncv, v0, maxiter, which, tol)
|
1697 |
+
|
1698 |
+
with _ARPACK_LOCK:
|
1699 |
+
while not params.converged:
|
1700 |
+
params.iterate()
|
1701 |
+
|
1702 |
+
return params.extract(return_eigenvectors)
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/tests/__init__.py
ADDED
File without changes
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/tests/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (204 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/tests/__pycache__/test_arpack.cpython-310.pyc
ADDED
Binary file (17.5 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/arpack/tests/test_arpack.py
ADDED
@@ -0,0 +1,718 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
__usage__ = """
|
2 |
+
To run tests locally:
|
3 |
+
python tests/test_arpack.py [-l<int>] [-v<int>]
|
4 |
+
|
5 |
+
"""
|
6 |
+
|
7 |
+
import threading
|
8 |
+
import itertools
|
9 |
+
|
10 |
+
import numpy as np
|
11 |
+
|
12 |
+
from numpy.testing import assert_allclose, assert_equal, suppress_warnings
|
13 |
+
from pytest import raises as assert_raises
|
14 |
+
import pytest
|
15 |
+
|
16 |
+
from numpy import dot, conj, random
|
17 |
+
from scipy.linalg import eig, eigh
|
18 |
+
from scipy.sparse import csc_matrix, csr_matrix, diags, rand
|
19 |
+
from scipy.sparse.linalg import LinearOperator, aslinearoperator
|
20 |
+
from scipy.sparse.linalg._eigen.arpack import (eigs, eigsh, arpack,
|
21 |
+
ArpackNoConvergence)
|
22 |
+
|
23 |
+
|
24 |
+
from scipy._lib._gcutils import assert_deallocated, IS_PYPY
|
25 |
+
|
26 |
+
|
27 |
+
# precision for tests
|
28 |
+
_ndigits = {'f': 3, 'd': 11, 'F': 3, 'D': 11}
|
29 |
+
|
30 |
+
|
31 |
+
def _get_test_tolerance(type_char, mattype=None, D_type=None, which=None):
|
32 |
+
"""
|
33 |
+
Return tolerance values suitable for a given test:
|
34 |
+
|
35 |
+
Parameters
|
36 |
+
----------
|
37 |
+
type_char : {'f', 'd', 'F', 'D'}
|
38 |
+
Data type in ARPACK eigenvalue problem
|
39 |
+
mattype : {csr_matrix, aslinearoperator, asarray}, optional
|
40 |
+
Linear operator type
|
41 |
+
|
42 |
+
Returns
|
43 |
+
-------
|
44 |
+
tol
|
45 |
+
Tolerance to pass to the ARPACK routine
|
46 |
+
rtol
|
47 |
+
Relative tolerance for outputs
|
48 |
+
atol
|
49 |
+
Absolute tolerance for outputs
|
50 |
+
|
51 |
+
"""
|
52 |
+
|
53 |
+
rtol = {'f': 3000 * np.finfo(np.float32).eps,
|
54 |
+
'F': 3000 * np.finfo(np.float32).eps,
|
55 |
+
'd': 2000 * np.finfo(np.float64).eps,
|
56 |
+
'D': 2000 * np.finfo(np.float64).eps}[type_char]
|
57 |
+
atol = rtol
|
58 |
+
tol = 0
|
59 |
+
|
60 |
+
if mattype is aslinearoperator and type_char in ('f', 'F'):
|
61 |
+
# iterative methods in single precision: worse errors
|
62 |
+
# also: bump ARPACK tolerance so that the iterative method converges
|
63 |
+
tol = 30 * np.finfo(np.float32).eps
|
64 |
+
rtol *= 5
|
65 |
+
|
66 |
+
if mattype is csr_matrix and type_char in ('f', 'F'):
|
67 |
+
# sparse in single precision: worse errors
|
68 |
+
rtol *= 5
|
69 |
+
|
70 |
+
if (
|
71 |
+
which in ('LM', 'SM', 'LA')
|
72 |
+
and D_type.name == "gen-hermitian-Mc"
|
73 |
+
):
|
74 |
+
if type_char == 'F':
|
75 |
+
# missing case 1, 2, and more, from PR 14798
|
76 |
+
rtol *= 5
|
77 |
+
|
78 |
+
if type_char == 'D':
|
79 |
+
# missing more cases, from PR 14798
|
80 |
+
rtol *= 10
|
81 |
+
atol *= 10
|
82 |
+
|
83 |
+
return tol, rtol, atol
|
84 |
+
|
85 |
+
|
86 |
+
def generate_matrix(N, complex_=False, hermitian=False,
|
87 |
+
pos_definite=False, sparse=False):
|
88 |
+
M = np.random.random((N, N))
|
89 |
+
if complex_:
|
90 |
+
M = M + 1j * np.random.random((N, N))
|
91 |
+
|
92 |
+
if hermitian:
|
93 |
+
if pos_definite:
|
94 |
+
if sparse:
|
95 |
+
i = np.arange(N)
|
96 |
+
j = np.random.randint(N, size=N-2)
|
97 |
+
i, j = np.meshgrid(i, j)
|
98 |
+
M[i, j] = 0
|
99 |
+
M = np.dot(M.conj(), M.T)
|
100 |
+
else:
|
101 |
+
M = np.dot(M.conj(), M.T)
|
102 |
+
if sparse:
|
103 |
+
i = np.random.randint(N, size=N * N // 4)
|
104 |
+
j = np.random.randint(N, size=N * N // 4)
|
105 |
+
ind = np.nonzero(i == j)
|
106 |
+
j[ind] = (j[ind] + 1) % N
|
107 |
+
M[i, j] = 0
|
108 |
+
M[j, i] = 0
|
109 |
+
else:
|
110 |
+
if sparse:
|
111 |
+
i = np.random.randint(N, size=N * N // 2)
|
112 |
+
j = np.random.randint(N, size=N * N // 2)
|
113 |
+
M[i, j] = 0
|
114 |
+
return M
|
115 |
+
|
116 |
+
|
117 |
+
def generate_matrix_symmetric(N, pos_definite=False, sparse=False):
|
118 |
+
M = np.random.random((N, N))
|
119 |
+
|
120 |
+
M = 0.5 * (M + M.T) # Make M symmetric
|
121 |
+
|
122 |
+
if pos_definite:
|
123 |
+
Id = N * np.eye(N)
|
124 |
+
if sparse:
|
125 |
+
M = csr_matrix(M)
|
126 |
+
M += Id
|
127 |
+
else:
|
128 |
+
if sparse:
|
129 |
+
M = csr_matrix(M)
|
130 |
+
|
131 |
+
return M
|
132 |
+
|
133 |
+
|
134 |
+
def assert_allclose_cc(actual, desired, **kw):
|
135 |
+
"""Almost equal or complex conjugates almost equal"""
|
136 |
+
try:
|
137 |
+
assert_allclose(actual, desired, **kw)
|
138 |
+
except AssertionError:
|
139 |
+
assert_allclose(actual, conj(desired), **kw)
|
140 |
+
|
141 |
+
|
142 |
+
def argsort_which(eigenvalues, typ, k, which,
|
143 |
+
sigma=None, OPpart=None, mode=None):
|
144 |
+
"""Return sorted indices of eigenvalues using the "which" keyword
|
145 |
+
from eigs and eigsh"""
|
146 |
+
if sigma is None:
|
147 |
+
reval = np.round(eigenvalues, decimals=_ndigits[typ])
|
148 |
+
else:
|
149 |
+
if mode is None or mode == 'normal':
|
150 |
+
if OPpart is None:
|
151 |
+
reval = 1. / (eigenvalues - sigma)
|
152 |
+
elif OPpart == 'r':
|
153 |
+
reval = 0.5 * (1. / (eigenvalues - sigma)
|
154 |
+
+ 1. / (eigenvalues - np.conj(sigma)))
|
155 |
+
elif OPpart == 'i':
|
156 |
+
reval = -0.5j * (1. / (eigenvalues - sigma)
|
157 |
+
- 1. / (eigenvalues - np.conj(sigma)))
|
158 |
+
elif mode == 'cayley':
|
159 |
+
reval = (eigenvalues + sigma) / (eigenvalues - sigma)
|
160 |
+
elif mode == 'buckling':
|
161 |
+
reval = eigenvalues / (eigenvalues - sigma)
|
162 |
+
else:
|
163 |
+
raise ValueError("mode='%s' not recognized" % mode)
|
164 |
+
|
165 |
+
reval = np.round(reval, decimals=_ndigits[typ])
|
166 |
+
|
167 |
+
if which in ['LM', 'SM']:
|
168 |
+
ind = np.argsort(abs(reval))
|
169 |
+
elif which in ['LR', 'SR', 'LA', 'SA', 'BE']:
|
170 |
+
ind = np.argsort(np.real(reval))
|
171 |
+
elif which in ['LI', 'SI']:
|
172 |
+
# for LI,SI ARPACK returns largest,smallest abs(imaginary) why?
|
173 |
+
if typ.islower():
|
174 |
+
ind = np.argsort(abs(np.imag(reval)))
|
175 |
+
else:
|
176 |
+
ind = np.argsort(np.imag(reval))
|
177 |
+
else:
|
178 |
+
raise ValueError("which='%s' is unrecognized" % which)
|
179 |
+
|
180 |
+
if which in ['LM', 'LA', 'LR', 'LI']:
|
181 |
+
return ind[-k:]
|
182 |
+
elif which in ['SM', 'SA', 'SR', 'SI']:
|
183 |
+
return ind[:k]
|
184 |
+
elif which == 'BE':
|
185 |
+
return np.concatenate((ind[:k//2], ind[k//2-k:]))
|
186 |
+
|
187 |
+
|
188 |
+
def eval_evec(symmetric, d, typ, k, which, v0=None, sigma=None,
|
189 |
+
mattype=np.asarray, OPpart=None, mode='normal'):
|
190 |
+
general = ('bmat' in d)
|
191 |
+
|
192 |
+
if symmetric:
|
193 |
+
eigs_func = eigsh
|
194 |
+
else:
|
195 |
+
eigs_func = eigs
|
196 |
+
|
197 |
+
if general:
|
198 |
+
err = ("error for {}:general, typ={}, which={}, sigma={}, "
|
199 |
+
"mattype={}, OPpart={}, mode={}".format(eigs_func.__name__,
|
200 |
+
typ, which, sigma,
|
201 |
+
mattype.__name__,
|
202 |
+
OPpart, mode))
|
203 |
+
else:
|
204 |
+
err = ("error for {}:standard, typ={}, which={}, sigma={}, "
|
205 |
+
"mattype={}, OPpart={}, mode={}".format(eigs_func.__name__,
|
206 |
+
typ, which, sigma,
|
207 |
+
mattype.__name__,
|
208 |
+
OPpart, mode))
|
209 |
+
|
210 |
+
a = d['mat'].astype(typ)
|
211 |
+
ac = mattype(a)
|
212 |
+
|
213 |
+
if general:
|
214 |
+
b = d['bmat'].astype(typ)
|
215 |
+
bc = mattype(b)
|
216 |
+
|
217 |
+
# get exact eigenvalues
|
218 |
+
exact_eval = d['eval'].astype(typ.upper())
|
219 |
+
ind = argsort_which(exact_eval, typ, k, which,
|
220 |
+
sigma, OPpart, mode)
|
221 |
+
exact_eval = exact_eval[ind]
|
222 |
+
|
223 |
+
# compute arpack eigenvalues
|
224 |
+
kwargs = dict(which=which, v0=v0, sigma=sigma)
|
225 |
+
if eigs_func is eigsh:
|
226 |
+
kwargs['mode'] = mode
|
227 |
+
else:
|
228 |
+
kwargs['OPpart'] = OPpart
|
229 |
+
|
230 |
+
# compute suitable tolerances
|
231 |
+
kwargs['tol'], rtol, atol = _get_test_tolerance(typ, mattype, d, which)
|
232 |
+
# on rare occasions, ARPACK routines return results that are proper
|
233 |
+
# eigenvalues and -vectors, but not necessarily the ones requested in
|
234 |
+
# the parameter which. This is inherent to the Krylov methods, and
|
235 |
+
# should not be treated as a failure. If such a rare situation
|
236 |
+
# occurs, the calculation is tried again (but at most a few times).
|
237 |
+
ntries = 0
|
238 |
+
while ntries < 5:
|
239 |
+
# solve
|
240 |
+
if general:
|
241 |
+
try:
|
242 |
+
eigenvalues, evec = eigs_func(ac, k, bc, **kwargs)
|
243 |
+
except ArpackNoConvergence:
|
244 |
+
kwargs['maxiter'] = 20*a.shape[0]
|
245 |
+
eigenvalues, evec = eigs_func(ac, k, bc, **kwargs)
|
246 |
+
else:
|
247 |
+
try:
|
248 |
+
eigenvalues, evec = eigs_func(ac, k, **kwargs)
|
249 |
+
except ArpackNoConvergence:
|
250 |
+
kwargs['maxiter'] = 20*a.shape[0]
|
251 |
+
eigenvalues, evec = eigs_func(ac, k, **kwargs)
|
252 |
+
|
253 |
+
ind = argsort_which(eigenvalues, typ, k, which,
|
254 |
+
sigma, OPpart, mode)
|
255 |
+
eigenvalues = eigenvalues[ind]
|
256 |
+
evec = evec[:, ind]
|
257 |
+
|
258 |
+
try:
|
259 |
+
# check eigenvalues
|
260 |
+
assert_allclose_cc(eigenvalues, exact_eval, rtol=rtol, atol=atol,
|
261 |
+
err_msg=err)
|
262 |
+
check_evecs = True
|
263 |
+
except AssertionError:
|
264 |
+
check_evecs = False
|
265 |
+
ntries += 1
|
266 |
+
|
267 |
+
if check_evecs:
|
268 |
+
# check eigenvectors
|
269 |
+
LHS = np.dot(a, evec)
|
270 |
+
if general:
|
271 |
+
RHS = eigenvalues * np.dot(b, evec)
|
272 |
+
else:
|
273 |
+
RHS = eigenvalues * evec
|
274 |
+
|
275 |
+
assert_allclose(LHS, RHS, rtol=rtol, atol=atol, err_msg=err)
|
276 |
+
break
|
277 |
+
|
278 |
+
# check eigenvalues
|
279 |
+
assert_allclose_cc(eigenvalues, exact_eval, rtol=rtol, atol=atol, err_msg=err)
|
280 |
+
|
281 |
+
|
282 |
+
class DictWithRepr(dict):
|
283 |
+
def __init__(self, name):
|
284 |
+
self.name = name
|
285 |
+
|
286 |
+
def __repr__(self):
|
287 |
+
return "<%s>" % self.name
|
288 |
+
|
289 |
+
|
290 |
+
class SymmetricParams:
|
291 |
+
def __init__(self):
|
292 |
+
self.eigs = eigsh
|
293 |
+
self.which = ['LM', 'SM', 'LA', 'SA', 'BE']
|
294 |
+
self.mattypes = [csr_matrix, aslinearoperator, np.asarray]
|
295 |
+
self.sigmas_modes = {None: ['normal'],
|
296 |
+
0.5: ['normal', 'buckling', 'cayley']}
|
297 |
+
|
298 |
+
# generate matrices
|
299 |
+
# these should all be float32 so that the eigenvalues
|
300 |
+
# are the same in float32 and float64
|
301 |
+
N = 6
|
302 |
+
np.random.seed(2300)
|
303 |
+
Ar = generate_matrix(N, hermitian=True,
|
304 |
+
pos_definite=True).astype('f').astype('d')
|
305 |
+
M = generate_matrix(N, hermitian=True,
|
306 |
+
pos_definite=True).astype('f').astype('d')
|
307 |
+
Ac = generate_matrix(N, hermitian=True, pos_definite=True,
|
308 |
+
complex_=True).astype('F').astype('D')
|
309 |
+
Mc = generate_matrix(N, hermitian=True, pos_definite=True,
|
310 |
+
complex_=True).astype('F').astype('D')
|
311 |
+
v0 = np.random.random(N)
|
312 |
+
|
313 |
+
# standard symmetric problem
|
314 |
+
SS = DictWithRepr("std-symmetric")
|
315 |
+
SS['mat'] = Ar
|
316 |
+
SS['v0'] = v0
|
317 |
+
SS['eval'] = eigh(SS['mat'], eigvals_only=True)
|
318 |
+
|
319 |
+
# general symmetric problem
|
320 |
+
GS = DictWithRepr("gen-symmetric")
|
321 |
+
GS['mat'] = Ar
|
322 |
+
GS['bmat'] = M
|
323 |
+
GS['v0'] = v0
|
324 |
+
GS['eval'] = eigh(GS['mat'], GS['bmat'], eigvals_only=True)
|
325 |
+
|
326 |
+
# standard hermitian problem
|
327 |
+
SH = DictWithRepr("std-hermitian")
|
328 |
+
SH['mat'] = Ac
|
329 |
+
SH['v0'] = v0
|
330 |
+
SH['eval'] = eigh(SH['mat'], eigvals_only=True)
|
331 |
+
|
332 |
+
# general hermitian problem
|
333 |
+
GH = DictWithRepr("gen-hermitian")
|
334 |
+
GH['mat'] = Ac
|
335 |
+
GH['bmat'] = M
|
336 |
+
GH['v0'] = v0
|
337 |
+
GH['eval'] = eigh(GH['mat'], GH['bmat'], eigvals_only=True)
|
338 |
+
|
339 |
+
# general hermitian problem with hermitian M
|
340 |
+
GHc = DictWithRepr("gen-hermitian-Mc")
|
341 |
+
GHc['mat'] = Ac
|
342 |
+
GHc['bmat'] = Mc
|
343 |
+
GHc['v0'] = v0
|
344 |
+
GHc['eval'] = eigh(GHc['mat'], GHc['bmat'], eigvals_only=True)
|
345 |
+
|
346 |
+
self.real_test_cases = [SS, GS]
|
347 |
+
self.complex_test_cases = [SH, GH, GHc]
|
348 |
+
|
349 |
+
|
350 |
+
class NonSymmetricParams:
|
351 |
+
def __init__(self):
|
352 |
+
self.eigs = eigs
|
353 |
+
self.which = ['LM', 'LR', 'LI'] # , 'SM', 'LR', 'SR', 'LI', 'SI']
|
354 |
+
self.mattypes = [csr_matrix, aslinearoperator, np.asarray]
|
355 |
+
self.sigmas_OPparts = {None: [None],
|
356 |
+
0.1: ['r'],
|
357 |
+
0.1 + 0.1j: ['r', 'i']}
|
358 |
+
|
359 |
+
# generate matrices
|
360 |
+
# these should all be float32 so that the eigenvalues
|
361 |
+
# are the same in float32 and float64
|
362 |
+
N = 6
|
363 |
+
np.random.seed(2300)
|
364 |
+
Ar = generate_matrix(N).astype('f').astype('d')
|
365 |
+
M = generate_matrix(N, hermitian=True,
|
366 |
+
pos_definite=True).astype('f').astype('d')
|
367 |
+
Ac = generate_matrix(N, complex_=True).astype('F').astype('D')
|
368 |
+
v0 = np.random.random(N)
|
369 |
+
|
370 |
+
# standard real nonsymmetric problem
|
371 |
+
SNR = DictWithRepr("std-real-nonsym")
|
372 |
+
SNR['mat'] = Ar
|
373 |
+
SNR['v0'] = v0
|
374 |
+
SNR['eval'] = eig(SNR['mat'], left=False, right=False)
|
375 |
+
|
376 |
+
# general real nonsymmetric problem
|
377 |
+
GNR = DictWithRepr("gen-real-nonsym")
|
378 |
+
GNR['mat'] = Ar
|
379 |
+
GNR['bmat'] = M
|
380 |
+
GNR['v0'] = v0
|
381 |
+
GNR['eval'] = eig(GNR['mat'], GNR['bmat'], left=False, right=False)
|
382 |
+
|
383 |
+
# standard complex nonsymmetric problem
|
384 |
+
SNC = DictWithRepr("std-cmplx-nonsym")
|
385 |
+
SNC['mat'] = Ac
|
386 |
+
SNC['v0'] = v0
|
387 |
+
SNC['eval'] = eig(SNC['mat'], left=False, right=False)
|
388 |
+
|
389 |
+
# general complex nonsymmetric problem
|
390 |
+
GNC = DictWithRepr("gen-cmplx-nonsym")
|
391 |
+
GNC['mat'] = Ac
|
392 |
+
GNC['bmat'] = M
|
393 |
+
GNC['v0'] = v0
|
394 |
+
GNC['eval'] = eig(GNC['mat'], GNC['bmat'], left=False, right=False)
|
395 |
+
|
396 |
+
self.real_test_cases = [SNR, GNR]
|
397 |
+
self.complex_test_cases = [SNC, GNC]
|
398 |
+
|
399 |
+
|
400 |
+
def test_symmetric_modes():
|
401 |
+
params = SymmetricParams()
|
402 |
+
k = 2
|
403 |
+
symmetric = True
|
404 |
+
for D in params.real_test_cases:
|
405 |
+
for typ in 'fd':
|
406 |
+
for which in params.which:
|
407 |
+
for mattype in params.mattypes:
|
408 |
+
for (sigma, modes) in params.sigmas_modes.items():
|
409 |
+
for mode in modes:
|
410 |
+
eval_evec(symmetric, D, typ, k, which,
|
411 |
+
None, sigma, mattype, None, mode)
|
412 |
+
|
413 |
+
|
414 |
+
def test_hermitian_modes():
|
415 |
+
params = SymmetricParams()
|
416 |
+
k = 2
|
417 |
+
symmetric = True
|
418 |
+
for D in params.complex_test_cases:
|
419 |
+
for typ in 'FD':
|
420 |
+
for which in params.which:
|
421 |
+
if which == 'BE':
|
422 |
+
continue # BE invalid for complex
|
423 |
+
for mattype in params.mattypes:
|
424 |
+
for sigma in params.sigmas_modes:
|
425 |
+
eval_evec(symmetric, D, typ, k, which,
|
426 |
+
None, sigma, mattype)
|
427 |
+
|
428 |
+
|
429 |
+
def test_symmetric_starting_vector():
|
430 |
+
params = SymmetricParams()
|
431 |
+
symmetric = True
|
432 |
+
for k in [1, 2, 3, 4, 5]:
|
433 |
+
for D in params.real_test_cases:
|
434 |
+
for typ in 'fd':
|
435 |
+
v0 = random.rand(len(D['v0'])).astype(typ)
|
436 |
+
eval_evec(symmetric, D, typ, k, 'LM', v0)
|
437 |
+
|
438 |
+
|
439 |
+
def test_symmetric_no_convergence():
|
440 |
+
np.random.seed(1234)
|
441 |
+
m = generate_matrix(30, hermitian=True, pos_definite=True)
|
442 |
+
tol, rtol, atol = _get_test_tolerance('d')
|
443 |
+
try:
|
444 |
+
w, v = eigsh(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol, ncv=9)
|
445 |
+
raise AssertionError("Spurious no-error exit")
|
446 |
+
except ArpackNoConvergence as err:
|
447 |
+
k = len(err.eigenvalues)
|
448 |
+
if k <= 0:
|
449 |
+
raise AssertionError("Spurious no-eigenvalues-found case") from err
|
450 |
+
w, v = err.eigenvalues, err.eigenvectors
|
451 |
+
assert_allclose(dot(m, v), w * v, rtol=rtol, atol=atol)
|
452 |
+
|
453 |
+
|
454 |
+
def test_real_nonsymmetric_modes():
|
455 |
+
params = NonSymmetricParams()
|
456 |
+
k = 2
|
457 |
+
symmetric = False
|
458 |
+
for D in params.real_test_cases:
|
459 |
+
for typ in 'fd':
|
460 |
+
for which in params.which:
|
461 |
+
for mattype in params.mattypes:
|
462 |
+
for sigma, OPparts in params.sigmas_OPparts.items():
|
463 |
+
for OPpart in OPparts:
|
464 |
+
eval_evec(symmetric, D, typ, k, which,
|
465 |
+
None, sigma, mattype, OPpart)
|
466 |
+
|
467 |
+
|
468 |
+
def test_complex_nonsymmetric_modes():
|
469 |
+
params = NonSymmetricParams()
|
470 |
+
k = 2
|
471 |
+
symmetric = False
|
472 |
+
for D in params.complex_test_cases:
|
473 |
+
for typ in 'DF':
|
474 |
+
for which in params.which:
|
475 |
+
for mattype in params.mattypes:
|
476 |
+
for sigma in params.sigmas_OPparts:
|
477 |
+
eval_evec(symmetric, D, typ, k, which,
|
478 |
+
None, sigma, mattype)
|
479 |
+
|
480 |
+
|
481 |
+
def test_standard_nonsymmetric_starting_vector():
|
482 |
+
params = NonSymmetricParams()
|
483 |
+
sigma = None
|
484 |
+
symmetric = False
|
485 |
+
for k in [1, 2, 3, 4]:
|
486 |
+
for d in params.complex_test_cases:
|
487 |
+
for typ in 'FD':
|
488 |
+
A = d['mat']
|
489 |
+
n = A.shape[0]
|
490 |
+
v0 = random.rand(n).astype(typ)
|
491 |
+
eval_evec(symmetric, d, typ, k, "LM", v0, sigma)
|
492 |
+
|
493 |
+
|
494 |
+
def test_general_nonsymmetric_starting_vector():
|
495 |
+
params = NonSymmetricParams()
|
496 |
+
sigma = None
|
497 |
+
symmetric = False
|
498 |
+
for k in [1, 2, 3, 4]:
|
499 |
+
for d in params.complex_test_cases:
|
500 |
+
for typ in 'FD':
|
501 |
+
A = d['mat']
|
502 |
+
n = A.shape[0]
|
503 |
+
v0 = random.rand(n).astype(typ)
|
504 |
+
eval_evec(symmetric, d, typ, k, "LM", v0, sigma)
|
505 |
+
|
506 |
+
|
507 |
+
def test_standard_nonsymmetric_no_convergence():
|
508 |
+
np.random.seed(1234)
|
509 |
+
m = generate_matrix(30, complex_=True)
|
510 |
+
tol, rtol, atol = _get_test_tolerance('d')
|
511 |
+
try:
|
512 |
+
w, v = eigs(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol)
|
513 |
+
raise AssertionError("Spurious no-error exit")
|
514 |
+
except ArpackNoConvergence as err:
|
515 |
+
k = len(err.eigenvalues)
|
516 |
+
if k <= 0:
|
517 |
+
raise AssertionError("Spurious no-eigenvalues-found case") from err
|
518 |
+
w, v = err.eigenvalues, err.eigenvectors
|
519 |
+
for ww, vv in zip(w, v.T):
|
520 |
+
assert_allclose(dot(m, vv), ww * vv, rtol=rtol, atol=atol)
|
521 |
+
|
522 |
+
|
523 |
+
def test_eigen_bad_shapes():
|
524 |
+
# A is not square.
|
525 |
+
A = csc_matrix(np.zeros((2, 3)))
|
526 |
+
assert_raises(ValueError, eigs, A)
|
527 |
+
|
528 |
+
|
529 |
+
def test_eigen_bad_kwargs():
|
530 |
+
# Test eigen on wrong keyword argument
|
531 |
+
A = csc_matrix(np.zeros((8, 8)))
|
532 |
+
assert_raises(ValueError, eigs, A, which='XX')
|
533 |
+
|
534 |
+
|
535 |
+
def test_ticket_1459_arpack_crash():
|
536 |
+
for dtype in [np.float32, np.float64]:
|
537 |
+
# This test does not seem to catch the issue for float32,
|
538 |
+
# but we made the same fix there, just to be sure
|
539 |
+
|
540 |
+
N = 6
|
541 |
+
k = 2
|
542 |
+
|
543 |
+
np.random.seed(2301)
|
544 |
+
A = np.random.random((N, N)).astype(dtype)
|
545 |
+
v0 = np.array([-0.71063568258907849895, -0.83185111795729227424,
|
546 |
+
-0.34365925382227402451, 0.46122533684552280420,
|
547 |
+
-0.58001341115969040629, -0.78844877570084292984e-01],
|
548 |
+
dtype=dtype)
|
549 |
+
|
550 |
+
# Should not crash:
|
551 |
+
evals, evecs = eigs(A, k, v0=v0)
|
552 |
+
|
553 |
+
|
554 |
+
@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
|
555 |
+
def test_linearoperator_deallocation():
|
556 |
+
# Check that the linear operators used by the Arpack wrappers are
|
557 |
+
# deallocatable by reference counting -- they are big objects, so
|
558 |
+
# Python's cyclic GC may not collect them fast enough before
|
559 |
+
# running out of memory if eigs/eigsh are called in a tight loop.
|
560 |
+
|
561 |
+
M_d = np.eye(10)
|
562 |
+
M_s = csc_matrix(M_d)
|
563 |
+
M_o = aslinearoperator(M_d)
|
564 |
+
|
565 |
+
with assert_deallocated(lambda: arpack.SpLuInv(M_s)):
|
566 |
+
pass
|
567 |
+
with assert_deallocated(lambda: arpack.LuInv(M_d)):
|
568 |
+
pass
|
569 |
+
with assert_deallocated(lambda: arpack.IterInv(M_s)):
|
570 |
+
pass
|
571 |
+
with assert_deallocated(lambda: arpack.IterOpInv(M_o, None, 0.3)):
|
572 |
+
pass
|
573 |
+
with assert_deallocated(lambda: arpack.IterOpInv(M_o, M_o, 0.3)):
|
574 |
+
pass
|
575 |
+
|
576 |
+
def test_parallel_threads():
|
577 |
+
results = []
|
578 |
+
v0 = np.random.rand(50)
|
579 |
+
|
580 |
+
def worker():
|
581 |
+
x = diags([1, -2, 1], [-1, 0, 1], shape=(50, 50))
|
582 |
+
w, v = eigs(x, k=3, v0=v0)
|
583 |
+
results.append(w)
|
584 |
+
|
585 |
+
w, v = eigsh(x, k=3, v0=v0)
|
586 |
+
results.append(w)
|
587 |
+
|
588 |
+
threads = [threading.Thread(target=worker) for k in range(10)]
|
589 |
+
for t in threads:
|
590 |
+
t.start()
|
591 |
+
for t in threads:
|
592 |
+
t.join()
|
593 |
+
|
594 |
+
worker()
|
595 |
+
|
596 |
+
for r in results:
|
597 |
+
assert_allclose(r, results[-1])
|
598 |
+
|
599 |
+
|
600 |
+
def test_reentering():
|
601 |
+
# Just some linear operator that calls eigs recursively
|
602 |
+
def A_matvec(x):
|
603 |
+
x = diags([1, -2, 1], [-1, 0, 1], shape=(50, 50))
|
604 |
+
w, v = eigs(x, k=1)
|
605 |
+
return v / w[0]
|
606 |
+
A = LinearOperator(matvec=A_matvec, dtype=float, shape=(50, 50))
|
607 |
+
|
608 |
+
# The Fortran code is not reentrant, so this fails (gracefully, not crashing)
|
609 |
+
assert_raises(RuntimeError, eigs, A, k=1)
|
610 |
+
assert_raises(RuntimeError, eigsh, A, k=1)
|
611 |
+
|
612 |
+
|
613 |
+
def test_regression_arpackng_1315():
|
614 |
+
# Check that issue arpack-ng/#1315 is not present.
|
615 |
+
# Adapted from arpack-ng/TESTS/bug_1315_single.c
|
616 |
+
# If this fails, then the installed ARPACK library is faulty.
|
617 |
+
|
618 |
+
for dtype in [np.float32, np.float64]:
|
619 |
+
np.random.seed(1234)
|
620 |
+
|
621 |
+
w0 = np.arange(1, 1000+1).astype(dtype)
|
622 |
+
A = diags([w0], [0], shape=(1000, 1000))
|
623 |
+
|
624 |
+
v0 = np.random.rand(1000).astype(dtype)
|
625 |
+
w, v = eigs(A, k=9, ncv=2*9+1, which="LM", v0=v0)
|
626 |
+
|
627 |
+
assert_allclose(np.sort(w), np.sort(w0[-9:]),
|
628 |
+
rtol=1e-4)
|
629 |
+
|
630 |
+
|
631 |
+
def test_eigs_for_k_greater():
|
632 |
+
# Test eigs() for k beyond limits.
|
633 |
+
A_sparse = diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)) # sparse
|
634 |
+
A = generate_matrix(4, sparse=False)
|
635 |
+
M_dense = np.random.random((4, 4))
|
636 |
+
M_sparse = generate_matrix(4, sparse=True)
|
637 |
+
M_linop = aslinearoperator(M_dense)
|
638 |
+
eig_tuple1 = eig(A, b=M_dense)
|
639 |
+
eig_tuple2 = eig(A, b=M_sparse)
|
640 |
+
|
641 |
+
with suppress_warnings() as sup:
|
642 |
+
sup.filter(RuntimeWarning)
|
643 |
+
|
644 |
+
assert_equal(eigs(A, M=M_dense, k=3), eig_tuple1)
|
645 |
+
assert_equal(eigs(A, M=M_dense, k=4), eig_tuple1)
|
646 |
+
assert_equal(eigs(A, M=M_dense, k=5), eig_tuple1)
|
647 |
+
assert_equal(eigs(A, M=M_sparse, k=5), eig_tuple2)
|
648 |
+
|
649 |
+
# M as LinearOperator
|
650 |
+
assert_raises(TypeError, eigs, A, M=M_linop, k=3)
|
651 |
+
|
652 |
+
# Test 'A' for different types
|
653 |
+
assert_raises(TypeError, eigs, aslinearoperator(A), k=3)
|
654 |
+
assert_raises(TypeError, eigs, A_sparse, k=3)
|
655 |
+
|
656 |
+
|
657 |
+
def test_eigsh_for_k_greater():
|
658 |
+
# Test eigsh() for k beyond limits.
|
659 |
+
A_sparse = diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)) # sparse
|
660 |
+
A = generate_matrix(4, sparse=False)
|
661 |
+
M_dense = generate_matrix_symmetric(4, pos_definite=True)
|
662 |
+
M_sparse = generate_matrix_symmetric(4, pos_definite=True, sparse=True)
|
663 |
+
M_linop = aslinearoperator(M_dense)
|
664 |
+
eig_tuple1 = eigh(A, b=M_dense)
|
665 |
+
eig_tuple2 = eigh(A, b=M_sparse)
|
666 |
+
|
667 |
+
with suppress_warnings() as sup:
|
668 |
+
sup.filter(RuntimeWarning)
|
669 |
+
|
670 |
+
assert_equal(eigsh(A, M=M_dense, k=4), eig_tuple1)
|
671 |
+
assert_equal(eigsh(A, M=M_dense, k=5), eig_tuple1)
|
672 |
+
assert_equal(eigsh(A, M=M_sparse, k=5), eig_tuple2)
|
673 |
+
|
674 |
+
# M as LinearOperator
|
675 |
+
assert_raises(TypeError, eigsh, A, M=M_linop, k=4)
|
676 |
+
|
677 |
+
# Test 'A' for different types
|
678 |
+
assert_raises(TypeError, eigsh, aslinearoperator(A), k=4)
|
679 |
+
assert_raises(TypeError, eigsh, A_sparse, M=M_dense, k=4)
|
680 |
+
|
681 |
+
|
682 |
+
def test_real_eigs_real_k_subset():
|
683 |
+
np.random.seed(1)
|
684 |
+
|
685 |
+
n = 10
|
686 |
+
A = rand(n, n, density=0.5)
|
687 |
+
A.data *= 2
|
688 |
+
A.data -= 1
|
689 |
+
|
690 |
+
v0 = np.ones(n)
|
691 |
+
|
692 |
+
whichs = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
|
693 |
+
dtypes = [np.float32, np.float64]
|
694 |
+
|
695 |
+
for which, sigma, dtype in itertools.product(whichs, [None, 0, 5], dtypes):
|
696 |
+
prev_w = np.array([], dtype=dtype)
|
697 |
+
eps = np.finfo(dtype).eps
|
698 |
+
for k in range(1, 9):
|
699 |
+
w, z = eigs(A.astype(dtype), k=k, which=which, sigma=sigma,
|
700 |
+
v0=v0.astype(dtype), tol=0)
|
701 |
+
assert_allclose(np.linalg.norm(A.dot(z) - z * w), 0, atol=np.sqrt(eps))
|
702 |
+
|
703 |
+
# Check that the set of eigenvalues for `k` is a subset of that for `k+1`
|
704 |
+
dist = abs(prev_w[:,None] - w).min(axis=1)
|
705 |
+
assert_allclose(dist, 0, atol=np.sqrt(eps))
|
706 |
+
|
707 |
+
prev_w = w
|
708 |
+
|
709 |
+
# Check sort order
|
710 |
+
if sigma is None:
|
711 |
+
d = w
|
712 |
+
else:
|
713 |
+
d = 1 / (w - sigma)
|
714 |
+
|
715 |
+
if which == 'LM':
|
716 |
+
# ARPACK is systematic for 'LM', but sort order
|
717 |
+
# appears not well defined for other modes
|
718 |
+
assert np.all(np.diff(abs(d)) <= 1e-6)
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/__init__.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG)
|
3 |
+
|
4 |
+
LOBPCG is a preconditioned eigensolver for large symmetric positive definite
|
5 |
+
(SPD) generalized eigenproblems.
|
6 |
+
|
7 |
+
Call the function lobpcg - see help for lobpcg.lobpcg.
|
8 |
+
|
9 |
+
"""
|
10 |
+
from .lobpcg import *
|
11 |
+
|
12 |
+
__all__ = [s for s in dir() if not s.startswith('_')]
|
13 |
+
|
14 |
+
from scipy._lib._testutils import PytestTester
|
15 |
+
test = PytestTester(__name__)
|
16 |
+
del PytestTester
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (731 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/__pycache__/lobpcg.cpython-310.pyc
ADDED
Binary file (25.4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/lobpcg.py
ADDED
@@ -0,0 +1,1112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG).
|
3 |
+
|
4 |
+
References
|
5 |
+
----------
|
6 |
+
.. [1] A. V. Knyazev (2001),
|
7 |
+
Toward the Optimal Preconditioned Eigensolver: Locally Optimal
|
8 |
+
Block Preconditioned Conjugate Gradient Method.
|
9 |
+
SIAM Journal on Scientific Computing 23, no. 2,
|
10 |
+
pp. 517-541. :doi:`10.1137/S1064827500366124`
|
11 |
+
|
12 |
+
.. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov (2007),
|
13 |
+
Block Locally Optimal Preconditioned Eigenvalue Xolvers (BLOPEX)
|
14 |
+
in hypre and PETSc. :arxiv:`0705.2626`
|
15 |
+
|
16 |
+
.. [3] A. V. Knyazev's C and MATLAB implementations:
|
17 |
+
https://github.com/lobpcg/blopex
|
18 |
+
"""
|
19 |
+
|
20 |
+
import warnings
|
21 |
+
import numpy as np
|
22 |
+
from scipy.linalg import (inv, eigh, cho_factor, cho_solve,
|
23 |
+
cholesky, LinAlgError)
|
24 |
+
from scipy.sparse.linalg import LinearOperator
|
25 |
+
from scipy.sparse import issparse
|
26 |
+
|
27 |
+
__all__ = ["lobpcg"]
|
28 |
+
|
29 |
+
|
30 |
+
def _report_nonhermitian(M, name):
|
31 |
+
"""
|
32 |
+
Report if `M` is not a Hermitian matrix given its type.
|
33 |
+
"""
|
34 |
+
from scipy.linalg import norm
|
35 |
+
|
36 |
+
md = M - M.T.conj()
|
37 |
+
nmd = norm(md, 1)
|
38 |
+
tol = 10 * np.finfo(M.dtype).eps
|
39 |
+
tol = max(tol, tol * norm(M, 1))
|
40 |
+
if nmd > tol:
|
41 |
+
warnings.warn(
|
42 |
+
f"Matrix {name} of the type {M.dtype} is not Hermitian: "
|
43 |
+
f"condition: {nmd} < {tol} fails.",
|
44 |
+
UserWarning, stacklevel=4
|
45 |
+
)
|
46 |
+
|
47 |
+
def _as2d(ar):
|
48 |
+
"""
|
49 |
+
If the input array is 2D return it, if it is 1D, append a dimension,
|
50 |
+
making it a column vector.
|
51 |
+
"""
|
52 |
+
if ar.ndim == 2:
|
53 |
+
return ar
|
54 |
+
else: # Assume 1!
|
55 |
+
aux = np.asarray(ar)
|
56 |
+
aux.shape = (ar.shape[0], 1)
|
57 |
+
return aux
|
58 |
+
|
59 |
+
|
60 |
+
def _makeMatMat(m):
|
61 |
+
if m is None:
|
62 |
+
return None
|
63 |
+
elif callable(m):
|
64 |
+
return lambda v: m(v)
|
65 |
+
else:
|
66 |
+
return lambda v: m @ v
|
67 |
+
|
68 |
+
|
69 |
+
def _matmul_inplace(x, y, verbosityLevel=0):
|
70 |
+
"""Perform 'np.matmul' in-place if possible.
|
71 |
+
|
72 |
+
If some sufficient conditions for inplace matmul are met, do so.
|
73 |
+
Otherwise try inplace update and fall back to overwrite if that fails.
|
74 |
+
"""
|
75 |
+
if x.flags["CARRAY"] and x.shape[1] == y.shape[1] and x.dtype == y.dtype:
|
76 |
+
# conditions where we can guarantee that inplace updates will work;
|
77 |
+
# i.e. x is not a view/slice, x & y have compatible dtypes, and the
|
78 |
+
# shape of the result of x @ y matches the shape of x.
|
79 |
+
np.matmul(x, y, out=x)
|
80 |
+
else:
|
81 |
+
# ideally, we'd have an exhaustive list of conditions above when
|
82 |
+
# inplace updates are possible; since we don't, we opportunistically
|
83 |
+
# try if it works, and fall back to overwriting if necessary
|
84 |
+
try:
|
85 |
+
np.matmul(x, y, out=x)
|
86 |
+
except Exception:
|
87 |
+
if verbosityLevel:
|
88 |
+
warnings.warn(
|
89 |
+
"Inplace update of x = x @ y failed, "
|
90 |
+
"x needs to be overwritten.",
|
91 |
+
UserWarning, stacklevel=3
|
92 |
+
)
|
93 |
+
x = x @ y
|
94 |
+
return x
|
95 |
+
|
96 |
+
|
97 |
+
def _applyConstraints(blockVectorV, factYBY, blockVectorBY, blockVectorY):
|
98 |
+
"""Changes blockVectorV in-place."""
|
99 |
+
YBV = blockVectorBY.T.conj() @ blockVectorV
|
100 |
+
tmp = cho_solve(factYBY, YBV)
|
101 |
+
blockVectorV -= blockVectorY @ tmp
|
102 |
+
|
103 |
+
|
104 |
+
def _b_orthonormalize(B, blockVectorV, blockVectorBV=None,
|
105 |
+
verbosityLevel=0):
|
106 |
+
"""in-place B-orthonormalize the given block vector using Cholesky."""
|
107 |
+
if blockVectorBV is None:
|
108 |
+
if B is None:
|
109 |
+
blockVectorBV = blockVectorV
|
110 |
+
else:
|
111 |
+
try:
|
112 |
+
blockVectorBV = B(blockVectorV)
|
113 |
+
except Exception as e:
|
114 |
+
if verbosityLevel:
|
115 |
+
warnings.warn(
|
116 |
+
f"Secondary MatMul call failed with error\n"
|
117 |
+
f"{e}\n",
|
118 |
+
UserWarning, stacklevel=3
|
119 |
+
)
|
120 |
+
return None, None, None
|
121 |
+
if blockVectorBV.shape != blockVectorV.shape:
|
122 |
+
raise ValueError(
|
123 |
+
f"The shape {blockVectorV.shape} "
|
124 |
+
f"of the orthogonalized matrix not preserved\n"
|
125 |
+
f"and changed to {blockVectorBV.shape} "
|
126 |
+
f"after multiplying by the secondary matrix.\n"
|
127 |
+
)
|
128 |
+
|
129 |
+
VBV = blockVectorV.T.conj() @ blockVectorBV
|
130 |
+
try:
|
131 |
+
# VBV is a Cholesky factor from now on...
|
132 |
+
VBV = cholesky(VBV, overwrite_a=True)
|
133 |
+
VBV = inv(VBV, overwrite_a=True)
|
134 |
+
blockVectorV = _matmul_inplace(
|
135 |
+
blockVectorV, VBV,
|
136 |
+
verbosityLevel=verbosityLevel
|
137 |
+
)
|
138 |
+
if B is not None:
|
139 |
+
blockVectorBV = _matmul_inplace(
|
140 |
+
blockVectorBV, VBV,
|
141 |
+
verbosityLevel=verbosityLevel
|
142 |
+
)
|
143 |
+
return blockVectorV, blockVectorBV, VBV
|
144 |
+
except LinAlgError:
|
145 |
+
if verbosityLevel:
|
146 |
+
warnings.warn(
|
147 |
+
"Cholesky has failed.",
|
148 |
+
UserWarning, stacklevel=3
|
149 |
+
)
|
150 |
+
return None, None, None
|
151 |
+
|
152 |
+
|
153 |
+
def _get_indx(_lambda, num, largest):
|
154 |
+
"""Get `num` indices into `_lambda` depending on `largest` option."""
|
155 |
+
ii = np.argsort(_lambda)
|
156 |
+
if largest:
|
157 |
+
ii = ii[:-num - 1:-1]
|
158 |
+
else:
|
159 |
+
ii = ii[:num]
|
160 |
+
|
161 |
+
return ii
|
162 |
+
|
163 |
+
|
164 |
+
def _handle_gramA_gramB_verbosity(gramA, gramB, verbosityLevel):
|
165 |
+
if verbosityLevel:
|
166 |
+
_report_nonhermitian(gramA, "gramA")
|
167 |
+
_report_nonhermitian(gramB, "gramB")
|
168 |
+
|
169 |
+
|
170 |
+
def lobpcg(
|
171 |
+
A,
|
172 |
+
X,
|
173 |
+
B=None,
|
174 |
+
M=None,
|
175 |
+
Y=None,
|
176 |
+
tol=None,
|
177 |
+
maxiter=None,
|
178 |
+
largest=True,
|
179 |
+
verbosityLevel=0,
|
180 |
+
retLambdaHistory=False,
|
181 |
+
retResidualNormsHistory=False,
|
182 |
+
restartControl=20,
|
183 |
+
):
|
184 |
+
"""Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG).
|
185 |
+
|
186 |
+
LOBPCG is a preconditioned eigensolver for large real symmetric and complex
|
187 |
+
Hermitian definite generalized eigenproblems.
|
188 |
+
|
189 |
+
Parameters
|
190 |
+
----------
|
191 |
+
A : {sparse matrix, ndarray, LinearOperator, callable object}
|
192 |
+
The Hermitian linear operator of the problem, usually given by a
|
193 |
+
sparse matrix. Often called the "stiffness matrix".
|
194 |
+
X : ndarray, float32 or float64
|
195 |
+
Initial approximation to the ``k`` eigenvectors (non-sparse).
|
196 |
+
If `A` has ``shape=(n,n)`` then `X` must have ``shape=(n,k)``.
|
197 |
+
B : {sparse matrix, ndarray, LinearOperator, callable object}
|
198 |
+
Optional. By default ``B = None``, which is equivalent to identity.
|
199 |
+
The right hand side operator in a generalized eigenproblem if present.
|
200 |
+
Often called the "mass matrix". Must be Hermitian positive definite.
|
201 |
+
M : {sparse matrix, ndarray, LinearOperator, callable object}
|
202 |
+
Optional. By default ``M = None``, which is equivalent to identity.
|
203 |
+
Preconditioner aiming to accelerate convergence.
|
204 |
+
Y : ndarray, float32 or float64, default: None
|
205 |
+
An ``n-by-sizeY`` ndarray of constraints with ``sizeY < n``.
|
206 |
+
The iterations will be performed in the ``B``-orthogonal complement
|
207 |
+
of the column-space of `Y`. `Y` must be full rank if present.
|
208 |
+
tol : scalar, optional
|
209 |
+
The default is ``tol=n*sqrt(eps)``.
|
210 |
+
Solver tolerance for the stopping criterion.
|
211 |
+
maxiter : int, default: 20
|
212 |
+
Maximum number of iterations.
|
213 |
+
largest : bool, default: True
|
214 |
+
When True, solve for the largest eigenvalues, otherwise the smallest.
|
215 |
+
verbosityLevel : int, optional
|
216 |
+
By default ``verbosityLevel=0`` no output.
|
217 |
+
Controls the solver standard/screen output.
|
218 |
+
retLambdaHistory : bool, default: False
|
219 |
+
Whether to return iterative eigenvalue history.
|
220 |
+
retResidualNormsHistory : bool, default: False
|
221 |
+
Whether to return iterative history of residual norms.
|
222 |
+
restartControl : int, optional.
|
223 |
+
Iterations restart if the residuals jump ``2**restartControl`` times
|
224 |
+
compared to the smallest recorded in ``retResidualNormsHistory``.
|
225 |
+
The default is ``restartControl=20``, making the restarts rare for
|
226 |
+
backward compatibility.
|
227 |
+
|
228 |
+
Returns
|
229 |
+
-------
|
230 |
+
lambda : ndarray of the shape ``(k, )``.
|
231 |
+
Array of ``k`` approximate eigenvalues.
|
232 |
+
v : ndarray of the same shape as ``X.shape``.
|
233 |
+
An array of ``k`` approximate eigenvectors.
|
234 |
+
lambdaHistory : ndarray, optional.
|
235 |
+
The eigenvalue history, if `retLambdaHistory` is ``True``.
|
236 |
+
ResidualNormsHistory : ndarray, optional.
|
237 |
+
The history of residual norms, if `retResidualNormsHistory`
|
238 |
+
is ``True``.
|
239 |
+
|
240 |
+
Notes
|
241 |
+
-----
|
242 |
+
The iterative loop runs ``maxit=maxiter`` (20 if ``maxit=None``)
|
243 |
+
iterations at most and finishes earlier if the tolerance is met.
|
244 |
+
Breaking backward compatibility with the previous version, LOBPCG
|
245 |
+
now returns the block of iterative vectors with the best accuracy rather
|
246 |
+
than the last one iterated, as a cure for possible divergence.
|
247 |
+
|
248 |
+
If ``X.dtype == np.float32`` and user-provided operations/multiplications
|
249 |
+
by `A`, `B`, and `M` all preserve the ``np.float32`` data type,
|
250 |
+
all the calculations and the output are in ``np.float32``.
|
251 |
+
|
252 |
+
The size of the iteration history output equals to the number of the best
|
253 |
+
(limited by `maxit`) iterations plus 3: initial, final, and postprocessing.
|
254 |
+
|
255 |
+
If both `retLambdaHistory` and `retResidualNormsHistory` are ``True``,
|
256 |
+
the return tuple has the following format
|
257 |
+
``(lambda, V, lambda history, residual norms history)``.
|
258 |
+
|
259 |
+
In the following ``n`` denotes the matrix size and ``k`` the number
|
260 |
+
of required eigenvalues (smallest or largest).
|
261 |
+
|
262 |
+
The LOBPCG code internally solves eigenproblems of the size ``3k`` on every
|
263 |
+
iteration by calling the dense eigensolver `eigh`, so if ``k`` is not
|
264 |
+
small enough compared to ``n``, it makes no sense to call the LOBPCG code.
|
265 |
+
Moreover, if one calls the LOBPCG algorithm for ``5k > n``, it would likely
|
266 |
+
break internally, so the code calls the standard function `eigh` instead.
|
267 |
+
It is not that ``n`` should be large for the LOBPCG to work, but rather the
|
268 |
+
ratio ``n / k`` should be large. It you call LOBPCG with ``k=1``
|
269 |
+
and ``n=10``, it works though ``n`` is small. The method is intended
|
270 |
+
for extremely large ``n / k``.
|
271 |
+
|
272 |
+
The convergence speed depends basically on three factors:
|
273 |
+
|
274 |
+
1. Quality of the initial approximations `X` to the seeking eigenvectors.
|
275 |
+
Randomly distributed around the origin vectors work well if no better
|
276 |
+
choice is known.
|
277 |
+
|
278 |
+
2. Relative separation of the desired eigenvalues from the rest
|
279 |
+
of the eigenvalues. One can vary ``k`` to improve the separation.
|
280 |
+
|
281 |
+
3. Proper preconditioning to shrink the spectral spread.
|
282 |
+
For example, a rod vibration test problem (under tests
|
283 |
+
directory) is ill-conditioned for large ``n``, so convergence will be
|
284 |
+
slow, unless efficient preconditioning is used. For this specific
|
285 |
+
problem, a good simple preconditioner function would be a linear solve
|
286 |
+
for `A`, which is easy to code since `A` is tridiagonal.
|
287 |
+
|
288 |
+
References
|
289 |
+
----------
|
290 |
+
.. [1] A. V. Knyazev (2001),
|
291 |
+
Toward the Optimal Preconditioned Eigensolver: Locally Optimal
|
292 |
+
Block Preconditioned Conjugate Gradient Method.
|
293 |
+
SIAM Journal on Scientific Computing 23, no. 2,
|
294 |
+
pp. 517-541. :doi:`10.1137/S1064827500366124`
|
295 |
+
|
296 |
+
.. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov
|
297 |
+
(2007), Block Locally Optimal Preconditioned Eigenvalue Xolvers
|
298 |
+
(BLOPEX) in hypre and PETSc. :arxiv:`0705.2626`
|
299 |
+
|
300 |
+
.. [3] A. V. Knyazev's C and MATLAB implementations:
|
301 |
+
https://github.com/lobpcg/blopex
|
302 |
+
|
303 |
+
Examples
|
304 |
+
--------
|
305 |
+
Our first example is minimalistic - find the largest eigenvalue of
|
306 |
+
a diagonal matrix by solving the non-generalized eigenvalue problem
|
307 |
+
``A x = lambda x`` without constraints or preconditioning.
|
308 |
+
|
309 |
+
>>> import numpy as np
|
310 |
+
>>> from scipy.sparse import spdiags
|
311 |
+
>>> from scipy.sparse.linalg import LinearOperator, aslinearoperator
|
312 |
+
>>> from scipy.sparse.linalg import lobpcg
|
313 |
+
|
314 |
+
The square matrix size is
|
315 |
+
|
316 |
+
>>> n = 100
|
317 |
+
|
318 |
+
and its diagonal entries are 1, ..., 100 defined by
|
319 |
+
|
320 |
+
>>> vals = np.arange(1, n + 1).astype(np.int16)
|
321 |
+
|
322 |
+
The first mandatory input parameter in this test is
|
323 |
+
the sparse diagonal matrix `A`
|
324 |
+
of the eigenvalue problem ``A x = lambda x`` to solve.
|
325 |
+
|
326 |
+
>>> A = spdiags(vals, 0, n, n)
|
327 |
+
>>> A = A.astype(np.int16)
|
328 |
+
>>> A.toarray()
|
329 |
+
array([[ 1, 0, 0, ..., 0, 0, 0],
|
330 |
+
[ 0, 2, 0, ..., 0, 0, 0],
|
331 |
+
[ 0, 0, 3, ..., 0, 0, 0],
|
332 |
+
...,
|
333 |
+
[ 0, 0, 0, ..., 98, 0, 0],
|
334 |
+
[ 0, 0, 0, ..., 0, 99, 0],
|
335 |
+
[ 0, 0, 0, ..., 0, 0, 100]], dtype=int16)
|
336 |
+
|
337 |
+
The second mandatory input parameter `X` is a 2D array with the
|
338 |
+
row dimension determining the number of requested eigenvalues.
|
339 |
+
`X` is an initial guess for targeted eigenvectors.
|
340 |
+
`X` must have linearly independent columns.
|
341 |
+
If no initial approximations available, randomly oriented vectors
|
342 |
+
commonly work best, e.g., with components normally distributed
|
343 |
+
around zero or uniformly distributed on the interval [-1 1].
|
344 |
+
Setting the initial approximations to dtype ``np.float32``
|
345 |
+
forces all iterative values to dtype ``np.float32`` speeding up
|
346 |
+
the run while still allowing accurate eigenvalue computations.
|
347 |
+
|
348 |
+
>>> k = 1
|
349 |
+
>>> rng = np.random.default_rng()
|
350 |
+
>>> X = rng.normal(size=(n, k))
|
351 |
+
>>> X = X.astype(np.float32)
|
352 |
+
|
353 |
+
>>> eigenvalues, _ = lobpcg(A, X, maxiter=60)
|
354 |
+
>>> eigenvalues
|
355 |
+
array([100.])
|
356 |
+
>>> eigenvalues.dtype
|
357 |
+
dtype('float32')
|
358 |
+
|
359 |
+
`lobpcg` needs only access the matrix product with `A` rather
|
360 |
+
then the matrix itself. Since the matrix `A` is diagonal in
|
361 |
+
this example, one can write a function of the matrix product
|
362 |
+
``A @ X`` using the diagonal values ``vals`` only, e.g., by
|
363 |
+
element-wise multiplication with broadcasting in the lambda-function
|
364 |
+
|
365 |
+
>>> A_lambda = lambda X: vals[:, np.newaxis] * X
|
366 |
+
|
367 |
+
or the regular function
|
368 |
+
|
369 |
+
>>> def A_matmat(X):
|
370 |
+
... return vals[:, np.newaxis] * X
|
371 |
+
|
372 |
+
and use the handle to one of these callables as an input
|
373 |
+
|
374 |
+
>>> eigenvalues, _ = lobpcg(A_lambda, X, maxiter=60)
|
375 |
+
>>> eigenvalues
|
376 |
+
array([100.])
|
377 |
+
>>> eigenvalues, _ = lobpcg(A_matmat, X, maxiter=60)
|
378 |
+
>>> eigenvalues
|
379 |
+
array([100.])
|
380 |
+
|
381 |
+
The traditional callable `LinearOperator` is no longer
|
382 |
+
necessary but still supported as the input to `lobpcg`.
|
383 |
+
Specifying ``matmat=A_matmat`` explicitly improves performance.
|
384 |
+
|
385 |
+
>>> A_lo = LinearOperator((n, n), matvec=A_matmat, matmat=A_matmat, dtype=np.int16)
|
386 |
+
>>> eigenvalues, _ = lobpcg(A_lo, X, maxiter=80)
|
387 |
+
>>> eigenvalues
|
388 |
+
array([100.])
|
389 |
+
|
390 |
+
The least efficient callable option is `aslinearoperator`:
|
391 |
+
|
392 |
+
>>> eigenvalues, _ = lobpcg(aslinearoperator(A), X, maxiter=80)
|
393 |
+
>>> eigenvalues
|
394 |
+
array([100.])
|
395 |
+
|
396 |
+
We now switch to computing the three smallest eigenvalues specifying
|
397 |
+
|
398 |
+
>>> k = 3
|
399 |
+
>>> X = np.random.default_rng().normal(size=(n, k))
|
400 |
+
|
401 |
+
and ``largest=False`` parameter
|
402 |
+
|
403 |
+
>>> eigenvalues, _ = lobpcg(A, X, largest=False, maxiter=80)
|
404 |
+
>>> print(eigenvalues)
|
405 |
+
[1. 2. 3.]
|
406 |
+
|
407 |
+
The next example illustrates computing 3 smallest eigenvalues of
|
408 |
+
the same matrix `A` given by the function handle ``A_matmat`` but
|
409 |
+
with constraints and preconditioning.
|
410 |
+
|
411 |
+
Constraints - an optional input parameter is a 2D array comprising
|
412 |
+
of column vectors that the eigenvectors must be orthogonal to
|
413 |
+
|
414 |
+
>>> Y = np.eye(n, 3)
|
415 |
+
|
416 |
+
The preconditioner acts as the inverse of `A` in this example, but
|
417 |
+
in the reduced precision ``np.float32`` even though the initial `X`
|
418 |
+
and thus all iterates and the output are in full ``np.float64``.
|
419 |
+
|
420 |
+
>>> inv_vals = 1./vals
|
421 |
+
>>> inv_vals = inv_vals.astype(np.float32)
|
422 |
+
>>> M = lambda X: inv_vals[:, np.newaxis] * X
|
423 |
+
|
424 |
+
Let us now solve the eigenvalue problem for the matrix `A` first
|
425 |
+
without preconditioning requesting 80 iterations
|
426 |
+
|
427 |
+
>>> eigenvalues, _ = lobpcg(A_matmat, X, Y=Y, largest=False, maxiter=80)
|
428 |
+
>>> eigenvalues
|
429 |
+
array([4., 5., 6.])
|
430 |
+
>>> eigenvalues.dtype
|
431 |
+
dtype('float64')
|
432 |
+
|
433 |
+
With preconditioning we need only 20 iterations from the same `X`
|
434 |
+
|
435 |
+
>>> eigenvalues, _ = lobpcg(A_matmat, X, Y=Y, M=M, largest=False, maxiter=20)
|
436 |
+
>>> eigenvalues
|
437 |
+
array([4., 5., 6.])
|
438 |
+
|
439 |
+
Note that the vectors passed in `Y` are the eigenvectors of the 3
|
440 |
+
smallest eigenvalues. The results returned above are orthogonal to those.
|
441 |
+
|
442 |
+
The primary matrix `A` may be indefinite, e.g., after shifting
|
443 |
+
``vals`` by 50 from 1, ..., 100 to -49, ..., 50, we still can compute
|
444 |
+
the 3 smallest or largest eigenvalues.
|
445 |
+
|
446 |
+
>>> vals = vals - 50
|
447 |
+
>>> X = rng.normal(size=(n, k))
|
448 |
+
>>> eigenvalues, _ = lobpcg(A_matmat, X, largest=False, maxiter=99)
|
449 |
+
>>> eigenvalues
|
450 |
+
array([-49., -48., -47.])
|
451 |
+
>>> eigenvalues, _ = lobpcg(A_matmat, X, largest=True, maxiter=99)
|
452 |
+
>>> eigenvalues
|
453 |
+
array([50., 49., 48.])
|
454 |
+
|
455 |
+
"""
|
456 |
+
blockVectorX = X
|
457 |
+
bestblockVectorX = blockVectorX
|
458 |
+
blockVectorY = Y
|
459 |
+
residualTolerance = tol
|
460 |
+
if maxiter is None:
|
461 |
+
maxiter = 20
|
462 |
+
|
463 |
+
bestIterationNumber = maxiter
|
464 |
+
|
465 |
+
sizeY = 0
|
466 |
+
if blockVectorY is not None:
|
467 |
+
if len(blockVectorY.shape) != 2:
|
468 |
+
warnings.warn(
|
469 |
+
f"Expected rank-2 array for argument Y, instead got "
|
470 |
+
f"{len(blockVectorY.shape)}, "
|
471 |
+
f"so ignore it and use no constraints.",
|
472 |
+
UserWarning, stacklevel=2
|
473 |
+
)
|
474 |
+
blockVectorY = None
|
475 |
+
else:
|
476 |
+
sizeY = blockVectorY.shape[1]
|
477 |
+
|
478 |
+
# Block size.
|
479 |
+
if blockVectorX is None:
|
480 |
+
raise ValueError("The mandatory initial matrix X cannot be None")
|
481 |
+
if len(blockVectorX.shape) != 2:
|
482 |
+
raise ValueError("expected rank-2 array for argument X")
|
483 |
+
|
484 |
+
n, sizeX = blockVectorX.shape
|
485 |
+
|
486 |
+
# Data type of iterates, determined by X, must be inexact
|
487 |
+
if not np.issubdtype(blockVectorX.dtype, np.inexact):
|
488 |
+
warnings.warn(
|
489 |
+
f"Data type for argument X is {blockVectorX.dtype}, "
|
490 |
+
f"which is not inexact, so casted to np.float32.",
|
491 |
+
UserWarning, stacklevel=2
|
492 |
+
)
|
493 |
+
blockVectorX = np.asarray(blockVectorX, dtype=np.float32)
|
494 |
+
|
495 |
+
if retLambdaHistory:
|
496 |
+
lambdaHistory = np.zeros((maxiter + 3, sizeX),
|
497 |
+
dtype=blockVectorX.dtype)
|
498 |
+
if retResidualNormsHistory:
|
499 |
+
residualNormsHistory = np.zeros((maxiter + 3, sizeX),
|
500 |
+
dtype=blockVectorX.dtype)
|
501 |
+
|
502 |
+
if verbosityLevel:
|
503 |
+
aux = "Solving "
|
504 |
+
if B is None:
|
505 |
+
aux += "standard"
|
506 |
+
else:
|
507 |
+
aux += "generalized"
|
508 |
+
aux += " eigenvalue problem with"
|
509 |
+
if M is None:
|
510 |
+
aux += "out"
|
511 |
+
aux += " preconditioning\n\n"
|
512 |
+
aux += "matrix size %d\n" % n
|
513 |
+
aux += "block size %d\n\n" % sizeX
|
514 |
+
if blockVectorY is None:
|
515 |
+
aux += "No constraints\n\n"
|
516 |
+
else:
|
517 |
+
if sizeY > 1:
|
518 |
+
aux += "%d constraints\n\n" % sizeY
|
519 |
+
else:
|
520 |
+
aux += "%d constraint\n\n" % sizeY
|
521 |
+
print(aux)
|
522 |
+
|
523 |
+
if (n - sizeY) < (5 * sizeX):
|
524 |
+
warnings.warn(
|
525 |
+
f"The problem size {n} minus the constraints size {sizeY} "
|
526 |
+
f"is too small relative to the block size {sizeX}. "
|
527 |
+
f"Using a dense eigensolver instead of LOBPCG iterations."
|
528 |
+
f"No output of the history of the iterations.",
|
529 |
+
UserWarning, stacklevel=2
|
530 |
+
)
|
531 |
+
|
532 |
+
sizeX = min(sizeX, n)
|
533 |
+
|
534 |
+
if blockVectorY is not None:
|
535 |
+
raise NotImplementedError(
|
536 |
+
"The dense eigensolver does not support constraints."
|
537 |
+
)
|
538 |
+
|
539 |
+
# Define the closed range of indices of eigenvalues to return.
|
540 |
+
if largest:
|
541 |
+
eigvals = (n - sizeX, n - 1)
|
542 |
+
else:
|
543 |
+
eigvals = (0, sizeX - 1)
|
544 |
+
|
545 |
+
try:
|
546 |
+
if isinstance(A, LinearOperator):
|
547 |
+
A = A(np.eye(n, dtype=int))
|
548 |
+
elif callable(A):
|
549 |
+
A = A(np.eye(n, dtype=int))
|
550 |
+
if A.shape != (n, n):
|
551 |
+
raise ValueError(
|
552 |
+
f"The shape {A.shape} of the primary matrix\n"
|
553 |
+
f"defined by a callable object is wrong.\n"
|
554 |
+
)
|
555 |
+
elif issparse(A):
|
556 |
+
A = A.toarray()
|
557 |
+
else:
|
558 |
+
A = np.asarray(A)
|
559 |
+
except Exception as e:
|
560 |
+
raise Exception(
|
561 |
+
f"Primary MatMul call failed with error\n"
|
562 |
+
f"{e}\n")
|
563 |
+
|
564 |
+
if B is not None:
|
565 |
+
try:
|
566 |
+
if isinstance(B, LinearOperator):
|
567 |
+
B = B(np.eye(n, dtype=int))
|
568 |
+
elif callable(B):
|
569 |
+
B = B(np.eye(n, dtype=int))
|
570 |
+
if B.shape != (n, n):
|
571 |
+
raise ValueError(
|
572 |
+
f"The shape {B.shape} of the secondary matrix\n"
|
573 |
+
f"defined by a callable object is wrong.\n"
|
574 |
+
)
|
575 |
+
elif issparse(B):
|
576 |
+
B = B.toarray()
|
577 |
+
else:
|
578 |
+
B = np.asarray(B)
|
579 |
+
except Exception as e:
|
580 |
+
raise Exception(
|
581 |
+
f"Secondary MatMul call failed with error\n"
|
582 |
+
f"{e}\n")
|
583 |
+
|
584 |
+
try:
|
585 |
+
vals, vecs = eigh(A,
|
586 |
+
B,
|
587 |
+
subset_by_index=eigvals,
|
588 |
+
check_finite=False)
|
589 |
+
if largest:
|
590 |
+
# Reverse order to be compatible with eigs() in 'LM' mode.
|
591 |
+
vals = vals[::-1]
|
592 |
+
vecs = vecs[:, ::-1]
|
593 |
+
|
594 |
+
return vals, vecs
|
595 |
+
except Exception as e:
|
596 |
+
raise Exception(
|
597 |
+
f"Dense eigensolver failed with error\n"
|
598 |
+
f"{e}\n"
|
599 |
+
)
|
600 |
+
|
601 |
+
if (residualTolerance is None) or (residualTolerance <= 0.0):
|
602 |
+
residualTolerance = np.sqrt(np.finfo(blockVectorX.dtype).eps) * n
|
603 |
+
|
604 |
+
A = _makeMatMat(A)
|
605 |
+
B = _makeMatMat(B)
|
606 |
+
M = _makeMatMat(M)
|
607 |
+
|
608 |
+
# Apply constraints to X.
|
609 |
+
if blockVectorY is not None:
|
610 |
+
|
611 |
+
if B is not None:
|
612 |
+
blockVectorBY = B(blockVectorY)
|
613 |
+
if blockVectorBY.shape != blockVectorY.shape:
|
614 |
+
raise ValueError(
|
615 |
+
f"The shape {blockVectorY.shape} "
|
616 |
+
f"of the constraint not preserved\n"
|
617 |
+
f"and changed to {blockVectorBY.shape} "
|
618 |
+
f"after multiplying by the secondary matrix.\n"
|
619 |
+
)
|
620 |
+
else:
|
621 |
+
blockVectorBY = blockVectorY
|
622 |
+
|
623 |
+
# gramYBY is a dense array.
|
624 |
+
gramYBY = blockVectorY.T.conj() @ blockVectorBY
|
625 |
+
try:
|
626 |
+
# gramYBY is a Cholesky factor from now on...
|
627 |
+
gramYBY = cho_factor(gramYBY, overwrite_a=True)
|
628 |
+
except LinAlgError as e:
|
629 |
+
raise ValueError("Linearly dependent constraints") from e
|
630 |
+
|
631 |
+
_applyConstraints(blockVectorX, gramYBY, blockVectorBY, blockVectorY)
|
632 |
+
|
633 |
+
##
|
634 |
+
# B-orthonormalize X.
|
635 |
+
blockVectorX, blockVectorBX, _ = _b_orthonormalize(
|
636 |
+
B, blockVectorX, verbosityLevel=verbosityLevel)
|
637 |
+
if blockVectorX is None:
|
638 |
+
raise ValueError("Linearly dependent initial approximations")
|
639 |
+
|
640 |
+
##
|
641 |
+
# Compute the initial Ritz vectors: solve the eigenproblem.
|
642 |
+
blockVectorAX = A(blockVectorX)
|
643 |
+
if blockVectorAX.shape != blockVectorX.shape:
|
644 |
+
raise ValueError(
|
645 |
+
f"The shape {blockVectorX.shape} "
|
646 |
+
f"of the initial approximations not preserved\n"
|
647 |
+
f"and changed to {blockVectorAX.shape} "
|
648 |
+
f"after multiplying by the primary matrix.\n"
|
649 |
+
)
|
650 |
+
|
651 |
+
gramXAX = blockVectorX.T.conj() @ blockVectorAX
|
652 |
+
|
653 |
+
_lambda, eigBlockVector = eigh(gramXAX, check_finite=False)
|
654 |
+
ii = _get_indx(_lambda, sizeX, largest)
|
655 |
+
_lambda = _lambda[ii]
|
656 |
+
if retLambdaHistory:
|
657 |
+
lambdaHistory[0, :] = _lambda
|
658 |
+
|
659 |
+
eigBlockVector = np.asarray(eigBlockVector[:, ii])
|
660 |
+
blockVectorX = _matmul_inplace(
|
661 |
+
blockVectorX, eigBlockVector,
|
662 |
+
verbosityLevel=verbosityLevel
|
663 |
+
)
|
664 |
+
blockVectorAX = _matmul_inplace(
|
665 |
+
blockVectorAX, eigBlockVector,
|
666 |
+
verbosityLevel=verbosityLevel
|
667 |
+
)
|
668 |
+
if B is not None:
|
669 |
+
blockVectorBX = _matmul_inplace(
|
670 |
+
blockVectorBX, eigBlockVector,
|
671 |
+
verbosityLevel=verbosityLevel
|
672 |
+
)
|
673 |
+
|
674 |
+
##
|
675 |
+
# Active index set.
|
676 |
+
activeMask = np.ones((sizeX,), dtype=bool)
|
677 |
+
|
678 |
+
##
|
679 |
+
# Main iteration loop.
|
680 |
+
|
681 |
+
blockVectorP = None # set during iteration
|
682 |
+
blockVectorAP = None
|
683 |
+
blockVectorBP = None
|
684 |
+
|
685 |
+
smallestResidualNorm = np.abs(np.finfo(blockVectorX.dtype).max)
|
686 |
+
|
687 |
+
iterationNumber = -1
|
688 |
+
restart = True
|
689 |
+
forcedRestart = False
|
690 |
+
explicitGramFlag = False
|
691 |
+
while iterationNumber < maxiter:
|
692 |
+
iterationNumber += 1
|
693 |
+
|
694 |
+
if B is not None:
|
695 |
+
aux = blockVectorBX * _lambda[np.newaxis, :]
|
696 |
+
else:
|
697 |
+
aux = blockVectorX * _lambda[np.newaxis, :]
|
698 |
+
|
699 |
+
blockVectorR = blockVectorAX - aux
|
700 |
+
|
701 |
+
aux = np.sum(blockVectorR.conj() * blockVectorR, 0)
|
702 |
+
residualNorms = np.sqrt(np.abs(aux))
|
703 |
+
if retResidualNormsHistory:
|
704 |
+
residualNormsHistory[iterationNumber, :] = residualNorms
|
705 |
+
residualNorm = np.sum(np.abs(residualNorms)) / sizeX
|
706 |
+
|
707 |
+
if residualNorm < smallestResidualNorm:
|
708 |
+
smallestResidualNorm = residualNorm
|
709 |
+
bestIterationNumber = iterationNumber
|
710 |
+
bestblockVectorX = blockVectorX
|
711 |
+
elif residualNorm > 2**restartControl * smallestResidualNorm:
|
712 |
+
forcedRestart = True
|
713 |
+
blockVectorAX = A(blockVectorX)
|
714 |
+
if blockVectorAX.shape != blockVectorX.shape:
|
715 |
+
raise ValueError(
|
716 |
+
f"The shape {blockVectorX.shape} "
|
717 |
+
f"of the restarted iterate not preserved\n"
|
718 |
+
f"and changed to {blockVectorAX.shape} "
|
719 |
+
f"after multiplying by the primary matrix.\n"
|
720 |
+
)
|
721 |
+
if B is not None:
|
722 |
+
blockVectorBX = B(blockVectorX)
|
723 |
+
if blockVectorBX.shape != blockVectorX.shape:
|
724 |
+
raise ValueError(
|
725 |
+
f"The shape {blockVectorX.shape} "
|
726 |
+
f"of the restarted iterate not preserved\n"
|
727 |
+
f"and changed to {blockVectorBX.shape} "
|
728 |
+
f"after multiplying by the secondary matrix.\n"
|
729 |
+
)
|
730 |
+
|
731 |
+
ii = np.where(residualNorms > residualTolerance, True, False)
|
732 |
+
activeMask = activeMask & ii
|
733 |
+
currentBlockSize = activeMask.sum()
|
734 |
+
|
735 |
+
if verbosityLevel:
|
736 |
+
print(f"iteration {iterationNumber}")
|
737 |
+
print(f"current block size: {currentBlockSize}")
|
738 |
+
print(f"eigenvalue(s):\n{_lambda}")
|
739 |
+
print(f"residual norm(s):\n{residualNorms}")
|
740 |
+
|
741 |
+
if currentBlockSize == 0:
|
742 |
+
break
|
743 |
+
|
744 |
+
activeBlockVectorR = _as2d(blockVectorR[:, activeMask])
|
745 |
+
|
746 |
+
if iterationNumber > 0:
|
747 |
+
activeBlockVectorP = _as2d(blockVectorP[:, activeMask])
|
748 |
+
activeBlockVectorAP = _as2d(blockVectorAP[:, activeMask])
|
749 |
+
if B is not None:
|
750 |
+
activeBlockVectorBP = _as2d(blockVectorBP[:, activeMask])
|
751 |
+
|
752 |
+
if M is not None:
|
753 |
+
# Apply preconditioner T to the active residuals.
|
754 |
+
activeBlockVectorR = M(activeBlockVectorR)
|
755 |
+
|
756 |
+
##
|
757 |
+
# Apply constraints to the preconditioned residuals.
|
758 |
+
if blockVectorY is not None:
|
759 |
+
_applyConstraints(activeBlockVectorR,
|
760 |
+
gramYBY,
|
761 |
+
blockVectorBY,
|
762 |
+
blockVectorY)
|
763 |
+
|
764 |
+
##
|
765 |
+
# B-orthogonalize the preconditioned residuals to X.
|
766 |
+
if B is not None:
|
767 |
+
activeBlockVectorR = activeBlockVectorR - (
|
768 |
+
blockVectorX @
|
769 |
+
(blockVectorBX.T.conj() @ activeBlockVectorR)
|
770 |
+
)
|
771 |
+
else:
|
772 |
+
activeBlockVectorR = activeBlockVectorR - (
|
773 |
+
blockVectorX @
|
774 |
+
(blockVectorX.T.conj() @ activeBlockVectorR)
|
775 |
+
)
|
776 |
+
|
777 |
+
##
|
778 |
+
# B-orthonormalize the preconditioned residuals.
|
779 |
+
aux = _b_orthonormalize(
|
780 |
+
B, activeBlockVectorR, verbosityLevel=verbosityLevel)
|
781 |
+
activeBlockVectorR, activeBlockVectorBR, _ = aux
|
782 |
+
|
783 |
+
if activeBlockVectorR is None:
|
784 |
+
warnings.warn(
|
785 |
+
f"Failed at iteration {iterationNumber} with accuracies "
|
786 |
+
f"{residualNorms}\n not reaching the requested "
|
787 |
+
f"tolerance {residualTolerance}.",
|
788 |
+
UserWarning, stacklevel=2
|
789 |
+
)
|
790 |
+
break
|
791 |
+
activeBlockVectorAR = A(activeBlockVectorR)
|
792 |
+
|
793 |
+
if iterationNumber > 0:
|
794 |
+
if B is not None:
|
795 |
+
aux = _b_orthonormalize(
|
796 |
+
B, activeBlockVectorP, activeBlockVectorBP,
|
797 |
+
verbosityLevel=verbosityLevel
|
798 |
+
)
|
799 |
+
activeBlockVectorP, activeBlockVectorBP, invR = aux
|
800 |
+
else:
|
801 |
+
aux = _b_orthonormalize(B, activeBlockVectorP,
|
802 |
+
verbosityLevel=verbosityLevel)
|
803 |
+
activeBlockVectorP, _, invR = aux
|
804 |
+
# Function _b_orthonormalize returns None if Cholesky fails
|
805 |
+
if activeBlockVectorP is not None:
|
806 |
+
activeBlockVectorAP = _matmul_inplace(
|
807 |
+
activeBlockVectorAP, invR,
|
808 |
+
verbosityLevel=verbosityLevel
|
809 |
+
)
|
810 |
+
restart = forcedRestart
|
811 |
+
else:
|
812 |
+
restart = True
|
813 |
+
|
814 |
+
##
|
815 |
+
# Perform the Rayleigh Ritz Procedure:
|
816 |
+
# Compute symmetric Gram matrices:
|
817 |
+
|
818 |
+
if activeBlockVectorAR.dtype == "float32":
|
819 |
+
myeps = 1
|
820 |
+
else:
|
821 |
+
myeps = np.sqrt(np.finfo(activeBlockVectorR.dtype).eps)
|
822 |
+
|
823 |
+
if residualNorms.max() > myeps and not explicitGramFlag:
|
824 |
+
explicitGramFlag = False
|
825 |
+
else:
|
826 |
+
# Once explicitGramFlag, forever explicitGramFlag.
|
827 |
+
explicitGramFlag = True
|
828 |
+
|
829 |
+
# Shared memory assignments to simplify the code
|
830 |
+
if B is None:
|
831 |
+
blockVectorBX = blockVectorX
|
832 |
+
activeBlockVectorBR = activeBlockVectorR
|
833 |
+
if not restart:
|
834 |
+
activeBlockVectorBP = activeBlockVectorP
|
835 |
+
|
836 |
+
# Common submatrices:
|
837 |
+
gramXAR = np.dot(blockVectorX.T.conj(), activeBlockVectorAR)
|
838 |
+
gramRAR = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAR)
|
839 |
+
|
840 |
+
gramDtype = activeBlockVectorAR.dtype
|
841 |
+
if explicitGramFlag:
|
842 |
+
gramRAR = (gramRAR + gramRAR.T.conj()) / 2
|
843 |
+
gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX)
|
844 |
+
gramXAX = (gramXAX + gramXAX.T.conj()) / 2
|
845 |
+
gramXBX = np.dot(blockVectorX.T.conj(), blockVectorBX)
|
846 |
+
gramRBR = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorBR)
|
847 |
+
gramXBR = np.dot(blockVectorX.T.conj(), activeBlockVectorBR)
|
848 |
+
else:
|
849 |
+
gramXAX = np.diag(_lambda).astype(gramDtype)
|
850 |
+
gramXBX = np.eye(sizeX, dtype=gramDtype)
|
851 |
+
gramRBR = np.eye(currentBlockSize, dtype=gramDtype)
|
852 |
+
gramXBR = np.zeros((sizeX, currentBlockSize), dtype=gramDtype)
|
853 |
+
|
854 |
+
if not restart:
|
855 |
+
gramXAP = np.dot(blockVectorX.T.conj(), activeBlockVectorAP)
|
856 |
+
gramRAP = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAP)
|
857 |
+
gramPAP = np.dot(activeBlockVectorP.T.conj(), activeBlockVectorAP)
|
858 |
+
gramXBP = np.dot(blockVectorX.T.conj(), activeBlockVectorBP)
|
859 |
+
gramRBP = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorBP)
|
860 |
+
if explicitGramFlag:
|
861 |
+
gramPAP = (gramPAP + gramPAP.T.conj()) / 2
|
862 |
+
gramPBP = np.dot(activeBlockVectorP.T.conj(),
|
863 |
+
activeBlockVectorBP)
|
864 |
+
else:
|
865 |
+
gramPBP = np.eye(currentBlockSize, dtype=gramDtype)
|
866 |
+
|
867 |
+
gramA = np.block(
|
868 |
+
[
|
869 |
+
[gramXAX, gramXAR, gramXAP],
|
870 |
+
[gramXAR.T.conj(), gramRAR, gramRAP],
|
871 |
+
[gramXAP.T.conj(), gramRAP.T.conj(), gramPAP],
|
872 |
+
]
|
873 |
+
)
|
874 |
+
gramB = np.block(
|
875 |
+
[
|
876 |
+
[gramXBX, gramXBR, gramXBP],
|
877 |
+
[gramXBR.T.conj(), gramRBR, gramRBP],
|
878 |
+
[gramXBP.T.conj(), gramRBP.T.conj(), gramPBP],
|
879 |
+
]
|
880 |
+
)
|
881 |
+
|
882 |
+
_handle_gramA_gramB_verbosity(gramA, gramB, verbosityLevel)
|
883 |
+
|
884 |
+
try:
|
885 |
+
_lambda, eigBlockVector = eigh(gramA,
|
886 |
+
gramB,
|
887 |
+
check_finite=False)
|
888 |
+
except LinAlgError as e:
|
889 |
+
# raise ValueError("eigh failed in lobpcg iterations") from e
|
890 |
+
if verbosityLevel:
|
891 |
+
warnings.warn(
|
892 |
+
f"eigh failed at iteration {iterationNumber} \n"
|
893 |
+
f"with error {e} causing a restart.\n",
|
894 |
+
UserWarning, stacklevel=2
|
895 |
+
)
|
896 |
+
# try again after dropping the direction vectors P from RR
|
897 |
+
restart = True
|
898 |
+
|
899 |
+
if restart:
|
900 |
+
gramA = np.block([[gramXAX, gramXAR], [gramXAR.T.conj(), gramRAR]])
|
901 |
+
gramB = np.block([[gramXBX, gramXBR], [gramXBR.T.conj(), gramRBR]])
|
902 |
+
|
903 |
+
_handle_gramA_gramB_verbosity(gramA, gramB, verbosityLevel)
|
904 |
+
|
905 |
+
try:
|
906 |
+
_lambda, eigBlockVector = eigh(gramA,
|
907 |
+
gramB,
|
908 |
+
check_finite=False)
|
909 |
+
except LinAlgError as e:
|
910 |
+
# raise ValueError("eigh failed in lobpcg iterations") from e
|
911 |
+
warnings.warn(
|
912 |
+
f"eigh failed at iteration {iterationNumber} with error\n"
|
913 |
+
f"{e}\n",
|
914 |
+
UserWarning, stacklevel=2
|
915 |
+
)
|
916 |
+
break
|
917 |
+
|
918 |
+
ii = _get_indx(_lambda, sizeX, largest)
|
919 |
+
_lambda = _lambda[ii]
|
920 |
+
eigBlockVector = eigBlockVector[:, ii]
|
921 |
+
if retLambdaHistory:
|
922 |
+
lambdaHistory[iterationNumber + 1, :] = _lambda
|
923 |
+
|
924 |
+
# Compute Ritz vectors.
|
925 |
+
if B is not None:
|
926 |
+
if not restart:
|
927 |
+
eigBlockVectorX = eigBlockVector[:sizeX]
|
928 |
+
eigBlockVectorR = eigBlockVector[sizeX:
|
929 |
+
sizeX + currentBlockSize]
|
930 |
+
eigBlockVectorP = eigBlockVector[sizeX + currentBlockSize:]
|
931 |
+
|
932 |
+
pp = np.dot(activeBlockVectorR, eigBlockVectorR)
|
933 |
+
pp += np.dot(activeBlockVectorP, eigBlockVectorP)
|
934 |
+
|
935 |
+
app = np.dot(activeBlockVectorAR, eigBlockVectorR)
|
936 |
+
app += np.dot(activeBlockVectorAP, eigBlockVectorP)
|
937 |
+
|
938 |
+
bpp = np.dot(activeBlockVectorBR, eigBlockVectorR)
|
939 |
+
bpp += np.dot(activeBlockVectorBP, eigBlockVectorP)
|
940 |
+
else:
|
941 |
+
eigBlockVectorX = eigBlockVector[:sizeX]
|
942 |
+
eigBlockVectorR = eigBlockVector[sizeX:]
|
943 |
+
|
944 |
+
pp = np.dot(activeBlockVectorR, eigBlockVectorR)
|
945 |
+
app = np.dot(activeBlockVectorAR, eigBlockVectorR)
|
946 |
+
bpp = np.dot(activeBlockVectorBR, eigBlockVectorR)
|
947 |
+
|
948 |
+
blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp
|
949 |
+
blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app
|
950 |
+
blockVectorBX = np.dot(blockVectorBX, eigBlockVectorX) + bpp
|
951 |
+
|
952 |
+
blockVectorP, blockVectorAP, blockVectorBP = pp, app, bpp
|
953 |
+
|
954 |
+
else:
|
955 |
+
if not restart:
|
956 |
+
eigBlockVectorX = eigBlockVector[:sizeX]
|
957 |
+
eigBlockVectorR = eigBlockVector[sizeX:
|
958 |
+
sizeX + currentBlockSize]
|
959 |
+
eigBlockVectorP = eigBlockVector[sizeX + currentBlockSize:]
|
960 |
+
|
961 |
+
pp = np.dot(activeBlockVectorR, eigBlockVectorR)
|
962 |
+
pp += np.dot(activeBlockVectorP, eigBlockVectorP)
|
963 |
+
|
964 |
+
app = np.dot(activeBlockVectorAR, eigBlockVectorR)
|
965 |
+
app += np.dot(activeBlockVectorAP, eigBlockVectorP)
|
966 |
+
else:
|
967 |
+
eigBlockVectorX = eigBlockVector[:sizeX]
|
968 |
+
eigBlockVectorR = eigBlockVector[sizeX:]
|
969 |
+
|
970 |
+
pp = np.dot(activeBlockVectorR, eigBlockVectorR)
|
971 |
+
app = np.dot(activeBlockVectorAR, eigBlockVectorR)
|
972 |
+
|
973 |
+
blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp
|
974 |
+
blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app
|
975 |
+
|
976 |
+
blockVectorP, blockVectorAP = pp, app
|
977 |
+
|
978 |
+
if B is not None:
|
979 |
+
aux = blockVectorBX * _lambda[np.newaxis, :]
|
980 |
+
else:
|
981 |
+
aux = blockVectorX * _lambda[np.newaxis, :]
|
982 |
+
|
983 |
+
blockVectorR = blockVectorAX - aux
|
984 |
+
|
985 |
+
aux = np.sum(blockVectorR.conj() * blockVectorR, 0)
|
986 |
+
residualNorms = np.sqrt(np.abs(aux))
|
987 |
+
# Use old lambda in case of early loop exit.
|
988 |
+
if retLambdaHistory:
|
989 |
+
lambdaHistory[iterationNumber + 1, :] = _lambda
|
990 |
+
if retResidualNormsHistory:
|
991 |
+
residualNormsHistory[iterationNumber + 1, :] = residualNorms
|
992 |
+
residualNorm = np.sum(np.abs(residualNorms)) / sizeX
|
993 |
+
if residualNorm < smallestResidualNorm:
|
994 |
+
smallestResidualNorm = residualNorm
|
995 |
+
bestIterationNumber = iterationNumber + 1
|
996 |
+
bestblockVectorX = blockVectorX
|
997 |
+
|
998 |
+
if np.max(np.abs(residualNorms)) > residualTolerance:
|
999 |
+
warnings.warn(
|
1000 |
+
f"Exited at iteration {iterationNumber} with accuracies \n"
|
1001 |
+
f"{residualNorms}\n"
|
1002 |
+
f"not reaching the requested tolerance {residualTolerance}.\n"
|
1003 |
+
f"Use iteration {bestIterationNumber} instead with accuracy \n"
|
1004 |
+
f"{smallestResidualNorm}.\n",
|
1005 |
+
UserWarning, stacklevel=2
|
1006 |
+
)
|
1007 |
+
|
1008 |
+
if verbosityLevel:
|
1009 |
+
print(f"Final iterative eigenvalue(s):\n{_lambda}")
|
1010 |
+
print(f"Final iterative residual norm(s):\n{residualNorms}")
|
1011 |
+
|
1012 |
+
blockVectorX = bestblockVectorX
|
1013 |
+
# Making eigenvectors "exactly" satisfy the blockVectorY constrains
|
1014 |
+
if blockVectorY is not None:
|
1015 |
+
_applyConstraints(blockVectorX,
|
1016 |
+
gramYBY,
|
1017 |
+
blockVectorBY,
|
1018 |
+
blockVectorY)
|
1019 |
+
|
1020 |
+
# Making eigenvectors "exactly" othonormalized by final "exact" RR
|
1021 |
+
blockVectorAX = A(blockVectorX)
|
1022 |
+
if blockVectorAX.shape != blockVectorX.shape:
|
1023 |
+
raise ValueError(
|
1024 |
+
f"The shape {blockVectorX.shape} "
|
1025 |
+
f"of the postprocessing iterate not preserved\n"
|
1026 |
+
f"and changed to {blockVectorAX.shape} "
|
1027 |
+
f"after multiplying by the primary matrix.\n"
|
1028 |
+
)
|
1029 |
+
gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX)
|
1030 |
+
|
1031 |
+
blockVectorBX = blockVectorX
|
1032 |
+
if B is not None:
|
1033 |
+
blockVectorBX = B(blockVectorX)
|
1034 |
+
if blockVectorBX.shape != blockVectorX.shape:
|
1035 |
+
raise ValueError(
|
1036 |
+
f"The shape {blockVectorX.shape} "
|
1037 |
+
f"of the postprocessing iterate not preserved\n"
|
1038 |
+
f"and changed to {blockVectorBX.shape} "
|
1039 |
+
f"after multiplying by the secondary matrix.\n"
|
1040 |
+
)
|
1041 |
+
|
1042 |
+
gramXBX = np.dot(blockVectorX.T.conj(), blockVectorBX)
|
1043 |
+
_handle_gramA_gramB_verbosity(gramXAX, gramXBX, verbosityLevel)
|
1044 |
+
gramXAX = (gramXAX + gramXAX.T.conj()) / 2
|
1045 |
+
gramXBX = (gramXBX + gramXBX.T.conj()) / 2
|
1046 |
+
try:
|
1047 |
+
_lambda, eigBlockVector = eigh(gramXAX,
|
1048 |
+
gramXBX,
|
1049 |
+
check_finite=False)
|
1050 |
+
except LinAlgError as e:
|
1051 |
+
raise ValueError("eigh has failed in lobpcg postprocessing") from e
|
1052 |
+
|
1053 |
+
ii = _get_indx(_lambda, sizeX, largest)
|
1054 |
+
_lambda = _lambda[ii]
|
1055 |
+
eigBlockVector = np.asarray(eigBlockVector[:, ii])
|
1056 |
+
|
1057 |
+
blockVectorX = np.dot(blockVectorX, eigBlockVector)
|
1058 |
+
blockVectorAX = np.dot(blockVectorAX, eigBlockVector)
|
1059 |
+
|
1060 |
+
if B is not None:
|
1061 |
+
blockVectorBX = np.dot(blockVectorBX, eigBlockVector)
|
1062 |
+
aux = blockVectorBX * _lambda[np.newaxis, :]
|
1063 |
+
else:
|
1064 |
+
aux = blockVectorX * _lambda[np.newaxis, :]
|
1065 |
+
|
1066 |
+
blockVectorR = blockVectorAX - aux
|
1067 |
+
|
1068 |
+
aux = np.sum(blockVectorR.conj() * blockVectorR, 0)
|
1069 |
+
residualNorms = np.sqrt(np.abs(aux))
|
1070 |
+
|
1071 |
+
if retLambdaHistory:
|
1072 |
+
lambdaHistory[bestIterationNumber + 1, :] = _lambda
|
1073 |
+
if retResidualNormsHistory:
|
1074 |
+
residualNormsHistory[bestIterationNumber + 1, :] = residualNorms
|
1075 |
+
|
1076 |
+
if retLambdaHistory:
|
1077 |
+
lambdaHistory = lambdaHistory[
|
1078 |
+
: bestIterationNumber + 2, :]
|
1079 |
+
if retResidualNormsHistory:
|
1080 |
+
residualNormsHistory = residualNormsHistory[
|
1081 |
+
: bestIterationNumber + 2, :]
|
1082 |
+
|
1083 |
+
if np.max(np.abs(residualNorms)) > residualTolerance:
|
1084 |
+
warnings.warn(
|
1085 |
+
f"Exited postprocessing with accuracies \n"
|
1086 |
+
f"{residualNorms}\n"
|
1087 |
+
f"not reaching the requested tolerance {residualTolerance}.",
|
1088 |
+
UserWarning, stacklevel=2
|
1089 |
+
)
|
1090 |
+
|
1091 |
+
if verbosityLevel:
|
1092 |
+
print(f"Final postprocessing eigenvalue(s):\n{_lambda}")
|
1093 |
+
print(f"Final residual norm(s):\n{residualNorms}")
|
1094 |
+
|
1095 |
+
if retLambdaHistory:
|
1096 |
+
lambdaHistory = np.vsplit(lambdaHistory, np.shape(lambdaHistory)[0])
|
1097 |
+
lambdaHistory = [np.squeeze(i) for i in lambdaHistory]
|
1098 |
+
if retResidualNormsHistory:
|
1099 |
+
residualNormsHistory = np.vsplit(residualNormsHistory,
|
1100 |
+
np.shape(residualNormsHistory)[0])
|
1101 |
+
residualNormsHistory = [np.squeeze(i) for i in residualNormsHistory]
|
1102 |
+
|
1103 |
+
if retLambdaHistory:
|
1104 |
+
if retResidualNormsHistory:
|
1105 |
+
return _lambda, blockVectorX, lambdaHistory, residualNormsHistory
|
1106 |
+
else:
|
1107 |
+
return _lambda, blockVectorX, lambdaHistory
|
1108 |
+
else:
|
1109 |
+
if retResidualNormsHistory:
|
1110 |
+
return _lambda, blockVectorX, residualNormsHistory
|
1111 |
+
else:
|
1112 |
+
return _lambda, blockVectorX
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/tests/__init__.py
ADDED
File without changes
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/tests/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (204 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/tests/__pycache__/test_lobpcg.cpython-310.pyc
ADDED
Binary file (19.5 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/lobpcg/tests/test_lobpcg.py
ADDED
@@ -0,0 +1,645 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" Test functions for the sparse.linalg._eigen.lobpcg module
|
2 |
+
"""
|
3 |
+
|
4 |
+
import itertools
|
5 |
+
import platform
|
6 |
+
import sys
|
7 |
+
import pytest
|
8 |
+
import numpy as np
|
9 |
+
from numpy import ones, r_, diag
|
10 |
+
from numpy.testing import (assert_almost_equal, assert_equal,
|
11 |
+
assert_allclose, assert_array_less)
|
12 |
+
|
13 |
+
from scipy import sparse
|
14 |
+
from scipy.linalg import eig, eigh, toeplitz, orth
|
15 |
+
from scipy.sparse import spdiags, diags, eye, csr_matrix
|
16 |
+
from scipy.sparse.linalg import eigs, LinearOperator
|
17 |
+
from scipy.sparse.linalg._eigen.lobpcg import lobpcg
|
18 |
+
from scipy.sparse.linalg._eigen.lobpcg.lobpcg import _b_orthonormalize
|
19 |
+
from scipy._lib._util import np_long, np_ulong
|
20 |
+
|
21 |
+
_IS_32BIT = (sys.maxsize < 2**32)
|
22 |
+
|
23 |
+
INT_DTYPES = {np.intc, np_long, np.longlong, np.uintc, np_ulong, np.ulonglong}
|
24 |
+
# np.half is unsupported on many test systems so excluded
|
25 |
+
REAL_DTYPES = {np.float32, np.float64, np.longdouble}
|
26 |
+
COMPLEX_DTYPES = {np.complex64, np.complex128, np.clongdouble}
|
27 |
+
# use sorted list to ensure fixed order of tests
|
28 |
+
VDTYPES = sorted(REAL_DTYPES ^ COMPLEX_DTYPES, key=str)
|
29 |
+
MDTYPES = sorted(INT_DTYPES ^ REAL_DTYPES ^ COMPLEX_DTYPES, key=str)
|
30 |
+
|
31 |
+
|
32 |
+
def sign_align(A, B):
|
33 |
+
"""Align signs of columns of A match those of B: column-wise remove
|
34 |
+
sign of A by multiplying with its sign then multiply in sign of B.
|
35 |
+
"""
|
36 |
+
return np.array([col_A * np.sign(col_A[0]) * np.sign(col_B[0])
|
37 |
+
for col_A, col_B in zip(A.T, B.T)]).T
|
38 |
+
|
39 |
+
def ElasticRod(n):
|
40 |
+
"""Build the matrices for the generalized eigenvalue problem of the
|
41 |
+
fixed-free elastic rod vibration model.
|
42 |
+
"""
|
43 |
+
L = 1.0
|
44 |
+
le = L/n
|
45 |
+
rho = 7.85e3
|
46 |
+
S = 1.e-4
|
47 |
+
E = 2.1e11
|
48 |
+
mass = rho*S*le/6.
|
49 |
+
k = E*S/le
|
50 |
+
A = k*(diag(r_[2.*ones(n-1), 1])-diag(ones(n-1), 1)-diag(ones(n-1), -1))
|
51 |
+
B = mass*(diag(r_[4.*ones(n-1), 2])+diag(ones(n-1), 1)+diag(ones(n-1), -1))
|
52 |
+
return A, B
|
53 |
+
|
54 |
+
|
55 |
+
def MikotaPair(n):
|
56 |
+
"""Build a pair of full diagonal matrices for the generalized eigenvalue
|
57 |
+
problem. The Mikota pair acts as a nice test since the eigenvalues are the
|
58 |
+
squares of the integers n, n=1,2,...
|
59 |
+
"""
|
60 |
+
x = np.arange(1, n+1)
|
61 |
+
B = diag(1./x)
|
62 |
+
y = np.arange(n-1, 0, -1)
|
63 |
+
z = np.arange(2*n-1, 0, -2)
|
64 |
+
A = diag(z)-diag(y, -1)-diag(y, 1)
|
65 |
+
return A, B
|
66 |
+
|
67 |
+
|
68 |
+
def compare_solutions(A, B, m):
|
69 |
+
"""Check eig vs. lobpcg consistency.
|
70 |
+
"""
|
71 |
+
n = A.shape[0]
|
72 |
+
rnd = np.random.RandomState(0)
|
73 |
+
V = rnd.random((n, m))
|
74 |
+
X = orth(V)
|
75 |
+
eigvals, _ = lobpcg(A, X, B=B, tol=1e-2, maxiter=50, largest=False)
|
76 |
+
eigvals.sort()
|
77 |
+
w, _ = eig(A, b=B)
|
78 |
+
w.sort()
|
79 |
+
assert_almost_equal(w[:int(m/2)], eigvals[:int(m/2)], decimal=2)
|
80 |
+
|
81 |
+
|
82 |
+
def test_Small():
|
83 |
+
A, B = ElasticRod(10)
|
84 |
+
with pytest.warns(UserWarning, match="The problem size"):
|
85 |
+
compare_solutions(A, B, 10)
|
86 |
+
A, B = MikotaPair(10)
|
87 |
+
with pytest.warns(UserWarning, match="The problem size"):
|
88 |
+
compare_solutions(A, B, 10)
|
89 |
+
|
90 |
+
|
91 |
+
def test_ElasticRod():
|
92 |
+
A, B = ElasticRod(20)
|
93 |
+
msg = "Exited at iteration.*|Exited postprocessing with accuracies.*"
|
94 |
+
with pytest.warns(UserWarning, match=msg):
|
95 |
+
compare_solutions(A, B, 2)
|
96 |
+
|
97 |
+
|
98 |
+
def test_MikotaPair():
|
99 |
+
A, B = MikotaPair(20)
|
100 |
+
compare_solutions(A, B, 2)
|
101 |
+
|
102 |
+
|
103 |
+
@pytest.mark.parametrize("n", [50])
|
104 |
+
@pytest.mark.parametrize("m", [1, 2, 10])
|
105 |
+
@pytest.mark.parametrize("Vdtype", sorted(REAL_DTYPES, key=str))
|
106 |
+
@pytest.mark.parametrize("Bdtype", sorted(REAL_DTYPES, key=str))
|
107 |
+
@pytest.mark.parametrize("BVdtype", sorted(REAL_DTYPES, key=str))
|
108 |
+
def test_b_orthonormalize(n, m, Vdtype, Bdtype, BVdtype):
|
109 |
+
"""Test B-orthonormalization by Cholesky with callable 'B'.
|
110 |
+
The function '_b_orthonormalize' is key in LOBPCG but may
|
111 |
+
lead to numerical instabilities. The input vectors are often
|
112 |
+
badly scaled, so the function needs scale-invariant Cholesky;
|
113 |
+
see https://netlib.org/lapack/lawnspdf/lawn14.pdf.
|
114 |
+
"""
|
115 |
+
rnd = np.random.RandomState(0)
|
116 |
+
X = rnd.standard_normal((n, m)).astype(Vdtype)
|
117 |
+
Xcopy = np.copy(X)
|
118 |
+
vals = np.arange(1, n+1, dtype=float)
|
119 |
+
B = diags([vals], [0], (n, n)).astype(Bdtype)
|
120 |
+
BX = B @ X
|
121 |
+
BX = BX.astype(BVdtype)
|
122 |
+
dtype = min(X.dtype, B.dtype, BX.dtype)
|
123 |
+
# np.longdouble tol cannot be achieved on most systems
|
124 |
+
atol = m * n * max(np.finfo(dtype).eps, np.finfo(np.float64).eps)
|
125 |
+
|
126 |
+
Xo, BXo, _ = _b_orthonormalize(lambda v: B @ v, X, BX)
|
127 |
+
# Check in-place.
|
128 |
+
assert_equal(X, Xo)
|
129 |
+
assert_equal(id(X), id(Xo))
|
130 |
+
assert_equal(BX, BXo)
|
131 |
+
assert_equal(id(BX), id(BXo))
|
132 |
+
# Check BXo.
|
133 |
+
assert_allclose(B @ Xo, BXo, atol=atol, rtol=atol)
|
134 |
+
# Check B-orthonormality
|
135 |
+
assert_allclose(Xo.T.conj() @ B @ Xo, np.identity(m),
|
136 |
+
atol=atol, rtol=atol)
|
137 |
+
# Repeat without BX in outputs
|
138 |
+
X = np.copy(Xcopy)
|
139 |
+
Xo1, BXo1, _ = _b_orthonormalize(lambda v: B @ v, X)
|
140 |
+
assert_allclose(Xo, Xo1, atol=atol, rtol=atol)
|
141 |
+
assert_allclose(BXo, BXo1, atol=atol, rtol=atol)
|
142 |
+
# Check in-place.
|
143 |
+
assert_equal(X, Xo1)
|
144 |
+
assert_equal(id(X), id(Xo1))
|
145 |
+
# Check BXo1.
|
146 |
+
assert_allclose(B @ Xo1, BXo1, atol=atol, rtol=atol)
|
147 |
+
|
148 |
+
# Introduce column-scaling in X.
|
149 |
+
scaling = 1.0 / np.geomspace(10, 1e10, num=m)
|
150 |
+
X = Xcopy * scaling
|
151 |
+
X = X.astype(Vdtype)
|
152 |
+
BX = B @ X
|
153 |
+
BX = BX.astype(BVdtype)
|
154 |
+
# Check scaling-invariance of Cholesky-based orthonormalization
|
155 |
+
Xo1, BXo1, _ = _b_orthonormalize(lambda v: B @ v, X, BX)
|
156 |
+
# The output should be the same, up the signs of the columns.
|
157 |
+
Xo1 = sign_align(Xo1, Xo)
|
158 |
+
assert_allclose(Xo, Xo1, atol=atol, rtol=atol)
|
159 |
+
BXo1 = sign_align(BXo1, BXo)
|
160 |
+
assert_allclose(BXo, BXo1, atol=atol, rtol=atol)
|
161 |
+
|
162 |
+
|
163 |
+
@pytest.mark.filterwarnings("ignore:Exited at iteration 0")
|
164 |
+
@pytest.mark.filterwarnings("ignore:Exited postprocessing")
|
165 |
+
def test_nonhermitian_warning(capsys):
|
166 |
+
"""Check the warning of a Ritz matrix being not Hermitian
|
167 |
+
by feeding a non-Hermitian input matrix.
|
168 |
+
Also check stdout since verbosityLevel=1 and lack of stderr.
|
169 |
+
"""
|
170 |
+
n = 10
|
171 |
+
X = np.arange(n * 2).reshape(n, 2).astype(np.float32)
|
172 |
+
A = np.arange(n * n).reshape(n, n).astype(np.float32)
|
173 |
+
with pytest.warns(UserWarning, match="Matrix gramA"):
|
174 |
+
_, _ = lobpcg(A, X, verbosityLevel=1, maxiter=0)
|
175 |
+
out, err = capsys.readouterr() # Capture output
|
176 |
+
assert out.startswith("Solving standard eigenvalue") # Test stdout
|
177 |
+
assert err == '' # Test empty stderr
|
178 |
+
# Make the matrix symmetric and the UserWarning disappears.
|
179 |
+
A += A.T
|
180 |
+
_, _ = lobpcg(A, X, verbosityLevel=1, maxiter=0)
|
181 |
+
out, err = capsys.readouterr() # Capture output
|
182 |
+
assert out.startswith("Solving standard eigenvalue") # Test stdout
|
183 |
+
assert err == '' # Test empty stderr
|
184 |
+
|
185 |
+
|
186 |
+
def test_regression():
|
187 |
+
"""Check the eigenvalue of the identity matrix is one.
|
188 |
+
"""
|
189 |
+
# https://mail.python.org/pipermail/scipy-user/2010-October/026944.html
|
190 |
+
n = 10
|
191 |
+
X = np.ones((n, 1))
|
192 |
+
A = np.identity(n)
|
193 |
+
w, _ = lobpcg(A, X)
|
194 |
+
assert_allclose(w, [1])
|
195 |
+
|
196 |
+
|
197 |
+
@pytest.mark.filterwarnings("ignore:The problem size")
|
198 |
+
@pytest.mark.parametrize('n, m, m_excluded', [(30, 4, 3), (4, 2, 0)])
|
199 |
+
def test_diagonal(n, m, m_excluded):
|
200 |
+
"""Test ``m - m_excluded`` eigenvalues and eigenvectors of
|
201 |
+
diagonal matrices of the size ``n`` varying matrix formats:
|
202 |
+
dense array, spare matrix, and ``LinearOperator`` for both
|
203 |
+
matrixes in the generalized eigenvalue problem ``Av = cBv``
|
204 |
+
and for the preconditioner.
|
205 |
+
"""
|
206 |
+
rnd = np.random.RandomState(0)
|
207 |
+
|
208 |
+
# Define the generalized eigenvalue problem Av = cBv
|
209 |
+
# where (c, v) is a generalized eigenpair,
|
210 |
+
# A is the diagonal matrix whose entries are 1,...n,
|
211 |
+
# B is the identity matrix.
|
212 |
+
vals = np.arange(1, n+1, dtype=float)
|
213 |
+
A_s = diags([vals], [0], (n, n))
|
214 |
+
A_a = A_s.toarray()
|
215 |
+
|
216 |
+
def A_f(x):
|
217 |
+
return A_s @ x
|
218 |
+
|
219 |
+
A_lo = LinearOperator(matvec=A_f,
|
220 |
+
matmat=A_f,
|
221 |
+
shape=(n, n), dtype=float)
|
222 |
+
|
223 |
+
B_a = eye(n)
|
224 |
+
B_s = csr_matrix(B_a)
|
225 |
+
|
226 |
+
def B_f(x):
|
227 |
+
return B_a @ x
|
228 |
+
|
229 |
+
B_lo = LinearOperator(matvec=B_f,
|
230 |
+
matmat=B_f,
|
231 |
+
shape=(n, n), dtype=float)
|
232 |
+
|
233 |
+
# Let the preconditioner M be the inverse of A.
|
234 |
+
M_s = diags([1./vals], [0], (n, n))
|
235 |
+
M_a = M_s.toarray()
|
236 |
+
|
237 |
+
def M_f(x):
|
238 |
+
return M_s @ x
|
239 |
+
|
240 |
+
M_lo = LinearOperator(matvec=M_f,
|
241 |
+
matmat=M_f,
|
242 |
+
shape=(n, n), dtype=float)
|
243 |
+
|
244 |
+
# Pick random initial vectors.
|
245 |
+
X = rnd.normal(size=(n, m))
|
246 |
+
|
247 |
+
# Require that the returned eigenvectors be in the orthogonal complement
|
248 |
+
# of the first few standard basis vectors.
|
249 |
+
if m_excluded > 0:
|
250 |
+
Y = np.eye(n, m_excluded)
|
251 |
+
else:
|
252 |
+
Y = None
|
253 |
+
|
254 |
+
for A in [A_a, A_s, A_lo]:
|
255 |
+
for B in [B_a, B_s, B_lo]:
|
256 |
+
for M in [M_a, M_s, M_lo]:
|
257 |
+
eigvals, vecs = lobpcg(A, X, B, M=M, Y=Y,
|
258 |
+
maxiter=40, largest=False)
|
259 |
+
|
260 |
+
assert_allclose(eigvals, np.arange(1+m_excluded,
|
261 |
+
1+m_excluded+m))
|
262 |
+
_check_eigen(A, eigvals, vecs, rtol=1e-3, atol=1e-3)
|
263 |
+
|
264 |
+
|
265 |
+
def _check_eigen(M, w, V, rtol=1e-8, atol=1e-14):
|
266 |
+
"""Check if the eigenvalue residual is small.
|
267 |
+
"""
|
268 |
+
mult_wV = np.multiply(w, V)
|
269 |
+
dot_MV = M.dot(V)
|
270 |
+
assert_allclose(mult_wV, dot_MV, rtol=rtol, atol=atol)
|
271 |
+
|
272 |
+
|
273 |
+
def _check_fiedler(n, p):
|
274 |
+
"""Check the Fiedler vector computation.
|
275 |
+
"""
|
276 |
+
# This is not necessarily the recommended way to find the Fiedler vector.
|
277 |
+
col = np.zeros(n)
|
278 |
+
col[1] = 1
|
279 |
+
A = toeplitz(col)
|
280 |
+
D = np.diag(A.sum(axis=1))
|
281 |
+
L = D - A
|
282 |
+
# Compute the full eigendecomposition using tricks, e.g.
|
283 |
+
# http://www.cs.yale.edu/homes/spielman/561/2009/lect02-09.pdf
|
284 |
+
tmp = np.pi * np.arange(n) / n
|
285 |
+
analytic_w = 2 * (1 - np.cos(tmp))
|
286 |
+
analytic_V = np.cos(np.outer(np.arange(n) + 1/2, tmp))
|
287 |
+
_check_eigen(L, analytic_w, analytic_V)
|
288 |
+
# Compute the full eigendecomposition using eigh.
|
289 |
+
eigh_w, eigh_V = eigh(L)
|
290 |
+
_check_eigen(L, eigh_w, eigh_V)
|
291 |
+
# Check that the first eigenvalue is near zero and that the rest agree.
|
292 |
+
assert_array_less(np.abs([eigh_w[0], analytic_w[0]]), 1e-14)
|
293 |
+
assert_allclose(eigh_w[1:], analytic_w[1:])
|
294 |
+
|
295 |
+
# Check small lobpcg eigenvalues.
|
296 |
+
X = analytic_V[:, :p]
|
297 |
+
lobpcg_w, lobpcg_V = lobpcg(L, X, largest=False)
|
298 |
+
assert_equal(lobpcg_w.shape, (p,))
|
299 |
+
assert_equal(lobpcg_V.shape, (n, p))
|
300 |
+
_check_eigen(L, lobpcg_w, lobpcg_V)
|
301 |
+
assert_array_less(np.abs(np.min(lobpcg_w)), 1e-14)
|
302 |
+
assert_allclose(np.sort(lobpcg_w)[1:], analytic_w[1:p])
|
303 |
+
|
304 |
+
# Check large lobpcg eigenvalues.
|
305 |
+
X = analytic_V[:, -p:]
|
306 |
+
lobpcg_w, lobpcg_V = lobpcg(L, X, largest=True)
|
307 |
+
assert_equal(lobpcg_w.shape, (p,))
|
308 |
+
assert_equal(lobpcg_V.shape, (n, p))
|
309 |
+
_check_eigen(L, lobpcg_w, lobpcg_V)
|
310 |
+
assert_allclose(np.sort(lobpcg_w), analytic_w[-p:])
|
311 |
+
|
312 |
+
# Look for the Fiedler vector using good but not exactly correct guesses.
|
313 |
+
fiedler_guess = np.concatenate((np.ones(n//2), -np.ones(n-n//2)))
|
314 |
+
X = np.vstack((np.ones(n), fiedler_guess)).T
|
315 |
+
lobpcg_w, _ = lobpcg(L, X, largest=False)
|
316 |
+
# Mathematically, the smaller eigenvalue should be zero
|
317 |
+
# and the larger should be the algebraic connectivity.
|
318 |
+
lobpcg_w = np.sort(lobpcg_w)
|
319 |
+
assert_allclose(lobpcg_w, analytic_w[:2], atol=1e-14)
|
320 |
+
|
321 |
+
|
322 |
+
def test_fiedler_small_8():
|
323 |
+
"""Check the dense workaround path for small matrices.
|
324 |
+
"""
|
325 |
+
# This triggers the dense path because 8 < 2*5.
|
326 |
+
with pytest.warns(UserWarning, match="The problem size"):
|
327 |
+
_check_fiedler(8, 2)
|
328 |
+
|
329 |
+
|
330 |
+
def test_fiedler_large_12():
|
331 |
+
"""Check the dense workaround path avoided for non-small matrices.
|
332 |
+
"""
|
333 |
+
# This does not trigger the dense path, because 2*5 <= 12.
|
334 |
+
_check_fiedler(12, 2)
|
335 |
+
|
336 |
+
|
337 |
+
@pytest.mark.filterwarnings("ignore:Failed at iteration")
|
338 |
+
@pytest.mark.filterwarnings("ignore:Exited at iteration")
|
339 |
+
@pytest.mark.filterwarnings("ignore:Exited postprocessing")
|
340 |
+
def test_failure_to_run_iterations():
|
341 |
+
"""Check that the code exits gracefully without breaking. Issue #10974.
|
342 |
+
The code may or not issue a warning, filtered out. Issue #15935, #17954.
|
343 |
+
"""
|
344 |
+
rnd = np.random.RandomState(0)
|
345 |
+
X = rnd.standard_normal((100, 10))
|
346 |
+
A = X @ X.T
|
347 |
+
Q = rnd.standard_normal((X.shape[0], 4))
|
348 |
+
eigenvalues, _ = lobpcg(A, Q, maxiter=40, tol=1e-12)
|
349 |
+
assert np.max(eigenvalues) > 0
|
350 |
+
|
351 |
+
|
352 |
+
def test_failure_to_run_iterations_nonsymmetric():
|
353 |
+
"""Check that the code exists gracefully without breaking
|
354 |
+
if the matrix in not symmetric.
|
355 |
+
"""
|
356 |
+
A = np.zeros((10, 10))
|
357 |
+
A[0, 1] = 1
|
358 |
+
Q = np.ones((10, 1))
|
359 |
+
msg = "Exited at iteration 2|Exited postprocessing with accuracies.*"
|
360 |
+
with pytest.warns(UserWarning, match=msg):
|
361 |
+
eigenvalues, _ = lobpcg(A, Q, maxiter=20)
|
362 |
+
assert np.max(eigenvalues) > 0
|
363 |
+
|
364 |
+
|
365 |
+
@pytest.mark.filterwarnings("ignore:The problem size")
|
366 |
+
def test_hermitian():
|
367 |
+
"""Check complex-value Hermitian cases.
|
368 |
+
"""
|
369 |
+
rnd = np.random.RandomState(0)
|
370 |
+
|
371 |
+
sizes = [3, 12]
|
372 |
+
ks = [1, 2]
|
373 |
+
gens = [True, False]
|
374 |
+
|
375 |
+
for s, k, gen, dh, dx, db in (
|
376 |
+
itertools.product(sizes, ks, gens, gens, gens, gens)
|
377 |
+
):
|
378 |
+
H = rnd.random((s, s)) + 1.j * rnd.random((s, s))
|
379 |
+
H = 10 * np.eye(s) + H + H.T.conj()
|
380 |
+
H = H.astype(np.complex128) if dh else H.astype(np.complex64)
|
381 |
+
|
382 |
+
X = rnd.standard_normal((s, k))
|
383 |
+
X = X + 1.j * rnd.standard_normal((s, k))
|
384 |
+
X = X.astype(np.complex128) if dx else X.astype(np.complex64)
|
385 |
+
|
386 |
+
if not gen:
|
387 |
+
B = np.eye(s)
|
388 |
+
w, v = lobpcg(H, X, maxiter=99, verbosityLevel=0)
|
389 |
+
# Also test mixing complex H with real B.
|
390 |
+
wb, _ = lobpcg(H, X, B, maxiter=99, verbosityLevel=0)
|
391 |
+
assert_allclose(w, wb, rtol=1e-6)
|
392 |
+
w0, _ = eigh(H)
|
393 |
+
else:
|
394 |
+
B = rnd.random((s, s)) + 1.j * rnd.random((s, s))
|
395 |
+
B = 10 * np.eye(s) + B.dot(B.T.conj())
|
396 |
+
B = B.astype(np.complex128) if db else B.astype(np.complex64)
|
397 |
+
w, v = lobpcg(H, X, B, maxiter=99, verbosityLevel=0)
|
398 |
+
w0, _ = eigh(H, B)
|
399 |
+
|
400 |
+
for wx, vx in zip(w, v.T):
|
401 |
+
# Check eigenvector
|
402 |
+
assert_allclose(np.linalg.norm(H.dot(vx) - B.dot(vx) * wx)
|
403 |
+
/ np.linalg.norm(H.dot(vx)),
|
404 |
+
0, atol=5e-2, rtol=0)
|
405 |
+
|
406 |
+
# Compare eigenvalues
|
407 |
+
j = np.argmin(abs(w0 - wx))
|
408 |
+
assert_allclose(wx, w0[j], rtol=1e-4)
|
409 |
+
|
410 |
+
|
411 |
+
# The n=5 case tests the alternative small matrix code path that uses eigh().
|
412 |
+
@pytest.mark.filterwarnings("ignore:The problem size")
|
413 |
+
@pytest.mark.parametrize('n, atol', [(20, 1e-3), (5, 1e-8)])
|
414 |
+
def test_eigs_consistency(n, atol):
|
415 |
+
"""Check eigs vs. lobpcg consistency.
|
416 |
+
"""
|
417 |
+
vals = np.arange(1, n+1, dtype=np.float64)
|
418 |
+
A = spdiags(vals, 0, n, n)
|
419 |
+
rnd = np.random.RandomState(0)
|
420 |
+
X = rnd.standard_normal((n, 2))
|
421 |
+
lvals, lvecs = lobpcg(A, X, largest=True, maxiter=100)
|
422 |
+
vals, _ = eigs(A, k=2)
|
423 |
+
|
424 |
+
_check_eigen(A, lvals, lvecs, atol=atol, rtol=0)
|
425 |
+
assert_allclose(np.sort(vals), np.sort(lvals), atol=1e-14)
|
426 |
+
|
427 |
+
|
428 |
+
def test_verbosity():
|
429 |
+
"""Check that nonzero verbosity level code runs.
|
430 |
+
"""
|
431 |
+
rnd = np.random.RandomState(0)
|
432 |
+
X = rnd.standard_normal((10, 10))
|
433 |
+
A = X @ X.T
|
434 |
+
Q = rnd.standard_normal((X.shape[0], 1))
|
435 |
+
msg = "Exited at iteration.*|Exited postprocessing with accuracies.*"
|
436 |
+
with pytest.warns(UserWarning, match=msg):
|
437 |
+
_, _ = lobpcg(A, Q, maxiter=3, verbosityLevel=9)
|
438 |
+
|
439 |
+
|
440 |
+
@pytest.mark.xfail(_IS_32BIT and sys.platform == 'win32',
|
441 |
+
reason="tolerance violation on windows")
|
442 |
+
@pytest.mark.xfail(platform.machine() == 'ppc64le',
|
443 |
+
reason="fails on ppc64le")
|
444 |
+
@pytest.mark.filterwarnings("ignore:Exited postprocessing")
|
445 |
+
def test_tolerance_float32():
|
446 |
+
"""Check lobpcg for attainable tolerance in float32.
|
447 |
+
"""
|
448 |
+
rnd = np.random.RandomState(0)
|
449 |
+
n = 50
|
450 |
+
m = 3
|
451 |
+
vals = -np.arange(1, n + 1)
|
452 |
+
A = diags([vals], [0], (n, n))
|
453 |
+
A = A.astype(np.float32)
|
454 |
+
X = rnd.standard_normal((n, m))
|
455 |
+
X = X.astype(np.float32)
|
456 |
+
eigvals, _ = lobpcg(A, X, tol=1.25e-5, maxiter=50, verbosityLevel=0)
|
457 |
+
assert_allclose(eigvals, -np.arange(1, 1 + m), atol=2e-5, rtol=1e-5)
|
458 |
+
|
459 |
+
|
460 |
+
@pytest.mark.parametrize("vdtype", VDTYPES)
|
461 |
+
@pytest.mark.parametrize("mdtype", MDTYPES)
|
462 |
+
@pytest.mark.parametrize("arr_type", [np.array,
|
463 |
+
sparse.csr_matrix,
|
464 |
+
sparse.coo_matrix])
|
465 |
+
def test_dtypes(vdtype, mdtype, arr_type):
|
466 |
+
"""Test lobpcg in various dtypes.
|
467 |
+
"""
|
468 |
+
rnd = np.random.RandomState(0)
|
469 |
+
n = 12
|
470 |
+
m = 2
|
471 |
+
A = arr_type(np.diag(np.arange(1, n + 1)).astype(mdtype))
|
472 |
+
X = rnd.random((n, m))
|
473 |
+
X = X.astype(vdtype)
|
474 |
+
eigvals, eigvecs = lobpcg(A, X, tol=1e-2, largest=False)
|
475 |
+
assert_allclose(eigvals, np.arange(1, 1 + m), atol=1e-1)
|
476 |
+
# eigenvectors must be nearly real in any case
|
477 |
+
assert_allclose(np.sum(np.abs(eigvecs - eigvecs.conj())), 0, atol=1e-2)
|
478 |
+
|
479 |
+
|
480 |
+
@pytest.mark.filterwarnings("ignore:Exited at iteration")
|
481 |
+
@pytest.mark.filterwarnings("ignore:Exited postprocessing")
|
482 |
+
def test_inplace_warning():
|
483 |
+
"""Check lobpcg gives a warning in '_b_orthonormalize'
|
484 |
+
that in-place orthogonalization is impossible due to dtype mismatch.
|
485 |
+
"""
|
486 |
+
rnd = np.random.RandomState(0)
|
487 |
+
n = 6
|
488 |
+
m = 1
|
489 |
+
vals = -np.arange(1, n + 1)
|
490 |
+
A = diags([vals], [0], (n, n))
|
491 |
+
A = A.astype(np.cdouble)
|
492 |
+
X = rnd.standard_normal((n, m))
|
493 |
+
with pytest.warns(UserWarning, match="Inplace update"):
|
494 |
+
eigvals, _ = lobpcg(A, X, maxiter=2, verbosityLevel=1)
|
495 |
+
|
496 |
+
|
497 |
+
def test_maxit():
|
498 |
+
"""Check lobpcg if maxit=maxiter runs maxiter iterations and
|
499 |
+
if maxit=None runs 20 iterations (the default)
|
500 |
+
by checking the size of the iteration history output, which should
|
501 |
+
be the number of iterations plus 3 (initial, final, and postprocessing)
|
502 |
+
typically when maxiter is small and the choice of the best is passive.
|
503 |
+
"""
|
504 |
+
rnd = np.random.RandomState(0)
|
505 |
+
n = 50
|
506 |
+
m = 4
|
507 |
+
vals = -np.arange(1, n + 1)
|
508 |
+
A = diags([vals], [0], (n, n))
|
509 |
+
A = A.astype(np.float32)
|
510 |
+
X = rnd.standard_normal((n, m))
|
511 |
+
X = X.astype(np.float64)
|
512 |
+
msg = "Exited at iteration.*|Exited postprocessing with accuracies.*"
|
513 |
+
for maxiter in range(1, 4):
|
514 |
+
with pytest.warns(UserWarning, match=msg):
|
515 |
+
_, _, l_h, r_h = lobpcg(A, X, tol=1e-8, maxiter=maxiter,
|
516 |
+
retLambdaHistory=True,
|
517 |
+
retResidualNormsHistory=True)
|
518 |
+
assert_allclose(np.shape(l_h)[0], maxiter+3)
|
519 |
+
assert_allclose(np.shape(r_h)[0], maxiter+3)
|
520 |
+
with pytest.warns(UserWarning, match=msg):
|
521 |
+
l, _, l_h, r_h = lobpcg(A, X, tol=1e-8,
|
522 |
+
retLambdaHistory=True,
|
523 |
+
retResidualNormsHistory=True)
|
524 |
+
assert_allclose(np.shape(l_h)[0], 20+3)
|
525 |
+
assert_allclose(np.shape(r_h)[0], 20+3)
|
526 |
+
# Check that eigenvalue output is the last one in history
|
527 |
+
assert_allclose(l, l_h[-1])
|
528 |
+
# Make sure that both history outputs are lists
|
529 |
+
assert isinstance(l_h, list)
|
530 |
+
assert isinstance(r_h, list)
|
531 |
+
# Make sure that both history lists are arrays-like
|
532 |
+
assert_allclose(np.shape(l_h), np.shape(np.asarray(l_h)))
|
533 |
+
assert_allclose(np.shape(r_h), np.shape(np.asarray(r_h)))
|
534 |
+
|
535 |
+
|
536 |
+
@pytest.mark.slow
|
537 |
+
@pytest.mark.parametrize("n", [15])
|
538 |
+
@pytest.mark.parametrize("m", [1, 2])
|
539 |
+
@pytest.mark.filterwarnings("ignore:Exited at iteration")
|
540 |
+
@pytest.mark.filterwarnings("ignore:Exited postprocessing")
|
541 |
+
def test_diagonal_data_types(n, m):
|
542 |
+
"""Check lobpcg for diagonal matrices for all matrix types.
|
543 |
+
Constraints are imposed, so a dense eigensolver eig cannot run.
|
544 |
+
"""
|
545 |
+
rnd = np.random.RandomState(0)
|
546 |
+
# Define the generalized eigenvalue problem Av = cBv
|
547 |
+
# where (c, v) is a generalized eigenpair,
|
548 |
+
# and where we choose A and B to be diagonal.
|
549 |
+
vals = np.arange(1, n + 1)
|
550 |
+
|
551 |
+
# list_sparse_format = ['bsr', 'coo', 'csc', 'csr', 'dia', 'dok', 'lil']
|
552 |
+
list_sparse_format = ['coo']
|
553 |
+
sparse_formats = len(list_sparse_format)
|
554 |
+
for s_f_i, s_f in enumerate(list_sparse_format):
|
555 |
+
|
556 |
+
As64 = diags([vals * vals], [0], (n, n), format=s_f)
|
557 |
+
As32 = As64.astype(np.float32)
|
558 |
+
Af64 = As64.toarray()
|
559 |
+
Af32 = Af64.astype(np.float32)
|
560 |
+
|
561 |
+
def As32f(x):
|
562 |
+
return As32 @ x
|
563 |
+
As32LO = LinearOperator(matvec=As32f,
|
564 |
+
matmat=As32f,
|
565 |
+
shape=(n, n),
|
566 |
+
dtype=As32.dtype)
|
567 |
+
|
568 |
+
listA = [Af64, As64, Af32, As32, As32f, As32LO, lambda v: As32 @ v]
|
569 |
+
|
570 |
+
Bs64 = diags([vals], [0], (n, n), format=s_f)
|
571 |
+
Bf64 = Bs64.toarray()
|
572 |
+
Bs32 = Bs64.astype(np.float32)
|
573 |
+
|
574 |
+
def Bs32f(x):
|
575 |
+
return Bs32 @ x
|
576 |
+
Bs32LO = LinearOperator(matvec=Bs32f,
|
577 |
+
matmat=Bs32f,
|
578 |
+
shape=(n, n),
|
579 |
+
dtype=Bs32.dtype)
|
580 |
+
listB = [Bf64, Bs64, Bs32, Bs32f, Bs32LO, lambda v: Bs32 @ v]
|
581 |
+
|
582 |
+
# Define the preconditioner function as LinearOperator.
|
583 |
+
Ms64 = diags([1./vals], [0], (n, n), format=s_f)
|
584 |
+
|
585 |
+
def Ms64precond(x):
|
586 |
+
return Ms64 @ x
|
587 |
+
Ms64precondLO = LinearOperator(matvec=Ms64precond,
|
588 |
+
matmat=Ms64precond,
|
589 |
+
shape=(n, n),
|
590 |
+
dtype=Ms64.dtype)
|
591 |
+
Mf64 = Ms64.toarray()
|
592 |
+
|
593 |
+
def Mf64precond(x):
|
594 |
+
return Mf64 @ x
|
595 |
+
Mf64precondLO = LinearOperator(matvec=Mf64precond,
|
596 |
+
matmat=Mf64precond,
|
597 |
+
shape=(n, n),
|
598 |
+
dtype=Mf64.dtype)
|
599 |
+
Ms32 = Ms64.astype(np.float32)
|
600 |
+
|
601 |
+
def Ms32precond(x):
|
602 |
+
return Ms32 @ x
|
603 |
+
Ms32precondLO = LinearOperator(matvec=Ms32precond,
|
604 |
+
matmat=Ms32precond,
|
605 |
+
shape=(n, n),
|
606 |
+
dtype=Ms32.dtype)
|
607 |
+
Mf32 = Ms32.toarray()
|
608 |
+
|
609 |
+
def Mf32precond(x):
|
610 |
+
return Mf32 @ x
|
611 |
+
Mf32precondLO = LinearOperator(matvec=Mf32precond,
|
612 |
+
matmat=Mf32precond,
|
613 |
+
shape=(n, n),
|
614 |
+
dtype=Mf32.dtype)
|
615 |
+
listM = [None, Ms64, Ms64precondLO, Mf64precondLO, Ms64precond,
|
616 |
+
Ms32, Ms32precondLO, Mf32precondLO, Ms32precond]
|
617 |
+
|
618 |
+
# Setup matrix of the initial approximation to the eigenvectors
|
619 |
+
# (cannot be sparse array).
|
620 |
+
Xf64 = rnd.random((n, m))
|
621 |
+
Xf32 = Xf64.astype(np.float32)
|
622 |
+
listX = [Xf64, Xf32]
|
623 |
+
|
624 |
+
# Require that the returned eigenvectors be in the orthogonal complement
|
625 |
+
# of the first few standard basis vectors (cannot be sparse array).
|
626 |
+
m_excluded = 3
|
627 |
+
Yf64 = np.eye(n, m_excluded, dtype=float)
|
628 |
+
Yf32 = np.eye(n, m_excluded, dtype=np.float32)
|
629 |
+
listY = [Yf64, Yf32]
|
630 |
+
|
631 |
+
tests = list(itertools.product(listA, listB, listM, listX, listY))
|
632 |
+
# This is one of the slower tests because there are >1,000 configs
|
633 |
+
# to test here, instead of checking product of all input, output types
|
634 |
+
# test each configuration for the first sparse format, and then
|
635 |
+
# for one additional sparse format. this takes 2/7=30% as long as
|
636 |
+
# testing all configurations for all sparse formats.
|
637 |
+
if s_f_i > 0:
|
638 |
+
tests = tests[s_f_i - 1::sparse_formats-1]
|
639 |
+
|
640 |
+
for A, B, M, X, Y in tests:
|
641 |
+
eigvals, _ = lobpcg(A, X, B=B, M=M, Y=Y, tol=1e-4,
|
642 |
+
maxiter=100, largest=False)
|
643 |
+
assert_allclose(eigvals,
|
644 |
+
np.arange(1 + m_excluded, 1 + m_excluded + m),
|
645 |
+
atol=1e-5)
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/tests/__init__.py
ADDED
File without changes
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/tests/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (197 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/tests/__pycache__/test_svds.cpython-310.pyc
ADDED
Binary file (24.6 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_eigen/tests/test_svds.py
ADDED
@@ -0,0 +1,862 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import copy
|
3 |
+
import numpy as np
|
4 |
+
|
5 |
+
from numpy.testing import assert_allclose, assert_equal, assert_array_equal
|
6 |
+
import pytest
|
7 |
+
|
8 |
+
from scipy.linalg import svd, null_space
|
9 |
+
from scipy.sparse import csc_matrix, issparse, spdiags, random
|
10 |
+
from scipy.sparse.linalg import LinearOperator, aslinearoperator
|
11 |
+
from scipy.sparse.linalg import svds
|
12 |
+
from scipy.sparse.linalg._eigen.arpack import ArpackNoConvergence
|
13 |
+
|
14 |
+
|
15 |
+
# --- Helper Functions / Classes ---
|
16 |
+
|
17 |
+
|
18 |
+
def sorted_svd(m, k, which='LM'):
|
19 |
+
# Compute svd of a dense matrix m, and return singular vectors/values
|
20 |
+
# sorted.
|
21 |
+
if issparse(m):
|
22 |
+
m = m.toarray()
|
23 |
+
u, s, vh = svd(m)
|
24 |
+
if which == 'LM':
|
25 |
+
ii = np.argsort(s)[-k:]
|
26 |
+
elif which == 'SM':
|
27 |
+
ii = np.argsort(s)[:k]
|
28 |
+
else:
|
29 |
+
raise ValueError(f"unknown which={which!r}")
|
30 |
+
|
31 |
+
return u[:, ii], s[ii], vh[ii]
|
32 |
+
|
33 |
+
|
34 |
+
def _check_svds(A, k, u, s, vh, which="LM", check_usvh_A=False,
|
35 |
+
check_svd=True, atol=1e-10, rtol=1e-7):
|
36 |
+
n, m = A.shape
|
37 |
+
|
38 |
+
# Check shapes.
|
39 |
+
assert_equal(u.shape, (n, k))
|
40 |
+
assert_equal(s.shape, (k,))
|
41 |
+
assert_equal(vh.shape, (k, m))
|
42 |
+
|
43 |
+
# Check that the original matrix can be reconstituted.
|
44 |
+
A_rebuilt = (u*s).dot(vh)
|
45 |
+
assert_equal(A_rebuilt.shape, A.shape)
|
46 |
+
if check_usvh_A:
|
47 |
+
assert_allclose(A_rebuilt, A, atol=atol, rtol=rtol)
|
48 |
+
|
49 |
+
# Check that u is a semi-orthogonal matrix.
|
50 |
+
uh_u = np.dot(u.T.conj(), u)
|
51 |
+
assert_equal(uh_u.shape, (k, k))
|
52 |
+
assert_allclose(uh_u, np.identity(k), atol=atol, rtol=rtol)
|
53 |
+
|
54 |
+
# Check that vh is a semi-orthogonal matrix.
|
55 |
+
vh_v = np.dot(vh, vh.T.conj())
|
56 |
+
assert_equal(vh_v.shape, (k, k))
|
57 |
+
assert_allclose(vh_v, np.identity(k), atol=atol, rtol=rtol)
|
58 |
+
|
59 |
+
# Check that scipy.sparse.linalg.svds ~ scipy.linalg.svd
|
60 |
+
if check_svd:
|
61 |
+
u2, s2, vh2 = sorted_svd(A, k, which)
|
62 |
+
assert_allclose(np.abs(u), np.abs(u2), atol=atol, rtol=rtol)
|
63 |
+
assert_allclose(s, s2, atol=atol, rtol=rtol)
|
64 |
+
assert_allclose(np.abs(vh), np.abs(vh2), atol=atol, rtol=rtol)
|
65 |
+
|
66 |
+
|
67 |
+
def _check_svds_n(A, k, u, s, vh, which="LM", check_res=True,
|
68 |
+
check_svd=True, atol=1e-10, rtol=1e-7):
|
69 |
+
n, m = A.shape
|
70 |
+
|
71 |
+
# Check shapes.
|
72 |
+
assert_equal(u.shape, (n, k))
|
73 |
+
assert_equal(s.shape, (k,))
|
74 |
+
assert_equal(vh.shape, (k, m))
|
75 |
+
|
76 |
+
# Check that u is a semi-orthogonal matrix.
|
77 |
+
uh_u = np.dot(u.T.conj(), u)
|
78 |
+
assert_equal(uh_u.shape, (k, k))
|
79 |
+
error = np.sum(np.abs(uh_u - np.identity(k))) / (k * k)
|
80 |
+
assert_allclose(error, 0.0, atol=atol, rtol=rtol)
|
81 |
+
|
82 |
+
# Check that vh is a semi-orthogonal matrix.
|
83 |
+
vh_v = np.dot(vh, vh.T.conj())
|
84 |
+
assert_equal(vh_v.shape, (k, k))
|
85 |
+
error = np.sum(np.abs(vh_v - np.identity(k))) / (k * k)
|
86 |
+
assert_allclose(error, 0.0, atol=atol, rtol=rtol)
|
87 |
+
|
88 |
+
# Check residuals
|
89 |
+
if check_res:
|
90 |
+
ru = A.T.conj() @ u - vh.T.conj() * s
|
91 |
+
rus = np.sum(np.abs(ru)) / (n * k)
|
92 |
+
rvh = A @ vh.T.conj() - u * s
|
93 |
+
rvhs = np.sum(np.abs(rvh)) / (m * k)
|
94 |
+
assert_allclose(rus, 0.0, atol=atol, rtol=rtol)
|
95 |
+
assert_allclose(rvhs, 0.0, atol=atol, rtol=rtol)
|
96 |
+
|
97 |
+
# Check that scipy.sparse.linalg.svds ~ scipy.linalg.svd
|
98 |
+
if check_svd:
|
99 |
+
u2, s2, vh2 = sorted_svd(A, k, which)
|
100 |
+
assert_allclose(s, s2, atol=atol, rtol=rtol)
|
101 |
+
A_rebuilt_svd = (u2*s2).dot(vh2)
|
102 |
+
A_rebuilt = (u*s).dot(vh)
|
103 |
+
assert_equal(A_rebuilt.shape, A.shape)
|
104 |
+
error = np.sum(np.abs(A_rebuilt_svd - A_rebuilt)) / (k * k)
|
105 |
+
assert_allclose(error, 0.0, atol=atol, rtol=rtol)
|
106 |
+
|
107 |
+
|
108 |
+
class CheckingLinearOperator(LinearOperator):
|
109 |
+
def __init__(self, A):
|
110 |
+
self.A = A
|
111 |
+
self.dtype = A.dtype
|
112 |
+
self.shape = A.shape
|
113 |
+
|
114 |
+
def _matvec(self, x):
|
115 |
+
assert_equal(max(x.shape), np.size(x))
|
116 |
+
return self.A.dot(x)
|
117 |
+
|
118 |
+
def _rmatvec(self, x):
|
119 |
+
assert_equal(max(x.shape), np.size(x))
|
120 |
+
return self.A.T.conjugate().dot(x)
|
121 |
+
|
122 |
+
|
123 |
+
# --- Test Input Validation ---
|
124 |
+
# Tests input validation on parameters `k` and `which`.
|
125 |
+
# Needs better input validation checks for all other parameters.
|
126 |
+
|
127 |
+
class SVDSCommonTests:
|
128 |
+
|
129 |
+
solver = None
|
130 |
+
|
131 |
+
# some of these IV tests could run only once, say with solver=None
|
132 |
+
|
133 |
+
_A_empty_msg = "`A` must not be empty."
|
134 |
+
_A_dtype_msg = "`A` must be of floating or complex floating data type"
|
135 |
+
_A_type_msg = "type not understood"
|
136 |
+
_A_ndim_msg = "array must have ndim <= 2"
|
137 |
+
_A_validation_inputs = [
|
138 |
+
(np.asarray([[]]), ValueError, _A_empty_msg),
|
139 |
+
(np.asarray([[1, 2], [3, 4]]), ValueError, _A_dtype_msg),
|
140 |
+
("hi", TypeError, _A_type_msg),
|
141 |
+
(np.asarray([[[1., 2.], [3., 4.]]]), ValueError, _A_ndim_msg)]
|
142 |
+
|
143 |
+
@pytest.mark.parametrize("args", _A_validation_inputs)
|
144 |
+
def test_svds_input_validation_A(self, args):
|
145 |
+
A, error_type, message = args
|
146 |
+
with pytest.raises(error_type, match=message):
|
147 |
+
svds(A, k=1, solver=self.solver)
|
148 |
+
|
149 |
+
@pytest.mark.parametrize("k", [-1, 0, 3, 4, 5, 1.5, "1"])
|
150 |
+
def test_svds_input_validation_k_1(self, k):
|
151 |
+
rng = np.random.default_rng(0)
|
152 |
+
A = rng.random((4, 3))
|
153 |
+
|
154 |
+
# propack can do complete SVD
|
155 |
+
if self.solver == 'propack' and k == 3:
|
156 |
+
res = svds(A, k=k, solver=self.solver, random_state=0)
|
157 |
+
_check_svds(A, k, *res, check_usvh_A=True, check_svd=True)
|
158 |
+
return
|
159 |
+
|
160 |
+
message = ("`k` must be an integer satisfying")
|
161 |
+
with pytest.raises(ValueError, match=message):
|
162 |
+
svds(A, k=k, solver=self.solver)
|
163 |
+
|
164 |
+
def test_svds_input_validation_k_2(self):
|
165 |
+
# I think the stack trace is reasonable when `k` can't be converted
|
166 |
+
# to an int.
|
167 |
+
message = "int() argument must be a"
|
168 |
+
with pytest.raises(TypeError, match=re.escape(message)):
|
169 |
+
svds(np.eye(10), k=[], solver=self.solver)
|
170 |
+
|
171 |
+
message = "invalid literal for int()"
|
172 |
+
with pytest.raises(ValueError, match=message):
|
173 |
+
svds(np.eye(10), k="hi", solver=self.solver)
|
174 |
+
|
175 |
+
@pytest.mark.parametrize("tol", (-1, np.inf, np.nan))
|
176 |
+
def test_svds_input_validation_tol_1(self, tol):
|
177 |
+
message = "`tol` must be a non-negative floating point value."
|
178 |
+
with pytest.raises(ValueError, match=message):
|
179 |
+
svds(np.eye(10), tol=tol, solver=self.solver)
|
180 |
+
|
181 |
+
@pytest.mark.parametrize("tol", ([], 'hi'))
|
182 |
+
def test_svds_input_validation_tol_2(self, tol):
|
183 |
+
# I think the stack trace is reasonable here
|
184 |
+
message = "'<' not supported between instances"
|
185 |
+
with pytest.raises(TypeError, match=message):
|
186 |
+
svds(np.eye(10), tol=tol, solver=self.solver)
|
187 |
+
|
188 |
+
@pytest.mark.parametrize("which", ('LA', 'SA', 'ekki', 0))
|
189 |
+
def test_svds_input_validation_which(self, which):
|
190 |
+
# Regression test for a github issue.
|
191 |
+
# https://github.com/scipy/scipy/issues/4590
|
192 |
+
# Function was not checking for eigenvalue type and unintended
|
193 |
+
# values could be returned.
|
194 |
+
with pytest.raises(ValueError, match="`which` must be in"):
|
195 |
+
svds(np.eye(10), which=which, solver=self.solver)
|
196 |
+
|
197 |
+
@pytest.mark.parametrize("transpose", (True, False))
|
198 |
+
@pytest.mark.parametrize("n", range(4, 9))
|
199 |
+
def test_svds_input_validation_v0_1(self, transpose, n):
|
200 |
+
rng = np.random.default_rng(0)
|
201 |
+
A = rng.random((5, 7))
|
202 |
+
v0 = rng.random(n)
|
203 |
+
if transpose:
|
204 |
+
A = A.T
|
205 |
+
k = 2
|
206 |
+
message = "`v0` must have shape"
|
207 |
+
|
208 |
+
required_length = (A.shape[0] if self.solver == 'propack'
|
209 |
+
else min(A.shape))
|
210 |
+
if n != required_length:
|
211 |
+
with pytest.raises(ValueError, match=message):
|
212 |
+
svds(A, k=k, v0=v0, solver=self.solver)
|
213 |
+
|
214 |
+
def test_svds_input_validation_v0_2(self):
|
215 |
+
A = np.ones((10, 10))
|
216 |
+
v0 = np.ones((1, 10))
|
217 |
+
message = "`v0` must have shape"
|
218 |
+
with pytest.raises(ValueError, match=message):
|
219 |
+
svds(A, k=1, v0=v0, solver=self.solver)
|
220 |
+
|
221 |
+
@pytest.mark.parametrize("v0", ("hi", 1, np.ones(10, dtype=int)))
|
222 |
+
def test_svds_input_validation_v0_3(self, v0):
|
223 |
+
A = np.ones((10, 10))
|
224 |
+
message = "`v0` must be of floating or complex floating data type."
|
225 |
+
with pytest.raises(ValueError, match=message):
|
226 |
+
svds(A, k=1, v0=v0, solver=self.solver)
|
227 |
+
|
228 |
+
@pytest.mark.parametrize("maxiter", (-1, 0, 5.5))
|
229 |
+
def test_svds_input_validation_maxiter_1(self, maxiter):
|
230 |
+
message = ("`maxiter` must be a positive integer.")
|
231 |
+
with pytest.raises(ValueError, match=message):
|
232 |
+
svds(np.eye(10), maxiter=maxiter, solver=self.solver)
|
233 |
+
|
234 |
+
def test_svds_input_validation_maxiter_2(self):
|
235 |
+
# I think the stack trace is reasonable when `k` can't be converted
|
236 |
+
# to an int.
|
237 |
+
message = "int() argument must be a"
|
238 |
+
with pytest.raises(TypeError, match=re.escape(message)):
|
239 |
+
svds(np.eye(10), maxiter=[], solver=self.solver)
|
240 |
+
|
241 |
+
message = "invalid literal for int()"
|
242 |
+
with pytest.raises(ValueError, match=message):
|
243 |
+
svds(np.eye(10), maxiter="hi", solver=self.solver)
|
244 |
+
|
245 |
+
@pytest.mark.parametrize("rsv", ('ekki', 10))
|
246 |
+
def test_svds_input_validation_return_singular_vectors(self, rsv):
|
247 |
+
message = "`return_singular_vectors` must be in"
|
248 |
+
with pytest.raises(ValueError, match=message):
|
249 |
+
svds(np.eye(10), return_singular_vectors=rsv, solver=self.solver)
|
250 |
+
|
251 |
+
# --- Test Parameters ---
|
252 |
+
|
253 |
+
@pytest.mark.parametrize("k", [3, 5])
|
254 |
+
@pytest.mark.parametrize("which", ["LM", "SM"])
|
255 |
+
def test_svds_parameter_k_which(self, k, which):
|
256 |
+
# check that the `k` parameter sets the number of eigenvalues/
|
257 |
+
# eigenvectors returned.
|
258 |
+
# Also check that the `which` parameter sets whether the largest or
|
259 |
+
# smallest eigenvalues are returned
|
260 |
+
rng = np.random.default_rng(0)
|
261 |
+
A = rng.random((10, 10))
|
262 |
+
if self.solver == 'lobpcg':
|
263 |
+
with pytest.warns(UserWarning, match="The problem size"):
|
264 |
+
res = svds(A, k=k, which=which, solver=self.solver,
|
265 |
+
random_state=0)
|
266 |
+
else:
|
267 |
+
res = svds(A, k=k, which=which, solver=self.solver,
|
268 |
+
random_state=0)
|
269 |
+
_check_svds(A, k, *res, which=which, atol=8e-10)
|
270 |
+
|
271 |
+
@pytest.mark.filterwarnings("ignore:Exited",
|
272 |
+
reason="Ignore LOBPCG early exit.")
|
273 |
+
# loop instead of parametrize for simplicity
|
274 |
+
def test_svds_parameter_tol(self):
|
275 |
+
# check the effect of the `tol` parameter on solver accuracy by solving
|
276 |
+
# the same problem with varying `tol` and comparing the eigenvalues
|
277 |
+
# against ground truth computed
|
278 |
+
n = 100 # matrix size
|
279 |
+
k = 3 # number of eigenvalues to check
|
280 |
+
|
281 |
+
# generate a random, sparse-ish matrix
|
282 |
+
# effect isn't apparent for matrices that are too small
|
283 |
+
rng = np.random.default_rng(0)
|
284 |
+
A = rng.random((n, n))
|
285 |
+
A[A > .1] = 0
|
286 |
+
A = A @ A.T
|
287 |
+
|
288 |
+
_, s, _ = svd(A) # calculate ground truth
|
289 |
+
|
290 |
+
# calculate the error as a function of `tol`
|
291 |
+
A = csc_matrix(A)
|
292 |
+
|
293 |
+
def err(tol):
|
294 |
+
_, s2, _ = svds(A, k=k, v0=np.ones(n), maxiter=1000,
|
295 |
+
solver=self.solver, tol=tol, random_state=0)
|
296 |
+
return np.linalg.norm((s2 - s[k-1::-1])/s[k-1::-1])
|
297 |
+
|
298 |
+
tols = [1e-4, 1e-2, 1e0] # tolerance levels to check
|
299 |
+
# for 'arpack' and 'propack', accuracies make discrete steps
|
300 |
+
accuracies = {'propack': [1e-12, 1e-6, 1e-4],
|
301 |
+
'arpack': [2.5e-15, 1e-10, 1e-10],
|
302 |
+
'lobpcg': [2e-12, 4e-2, 2]}
|
303 |
+
|
304 |
+
for tol, accuracy in zip(tols, accuracies[self.solver]):
|
305 |
+
error = err(tol)
|
306 |
+
assert error < accuracy
|
307 |
+
|
308 |
+
def test_svd_v0(self):
|
309 |
+
# check that the `v0` parameter affects the solution
|
310 |
+
n = 100
|
311 |
+
k = 1
|
312 |
+
# If k != 1, LOBPCG needs more initial vectors, which are generated
|
313 |
+
# with random_state, so it does not pass w/ k >= 2.
|
314 |
+
# For some other values of `n`, the AssertionErrors are not raised
|
315 |
+
# with different v0s, which is reasonable.
|
316 |
+
|
317 |
+
rng = np.random.default_rng(0)
|
318 |
+
A = rng.random((n, n))
|
319 |
+
|
320 |
+
# with the same v0, solutions are the same, and they are accurate
|
321 |
+
# v0 takes precedence over random_state
|
322 |
+
v0a = rng.random(n)
|
323 |
+
res1a = svds(A, k, v0=v0a, solver=self.solver, random_state=0)
|
324 |
+
res2a = svds(A, k, v0=v0a, solver=self.solver, random_state=1)
|
325 |
+
for idx in range(3):
|
326 |
+
assert_allclose(res1a[idx], res2a[idx], rtol=1e-15, atol=2e-16)
|
327 |
+
_check_svds(A, k, *res1a)
|
328 |
+
|
329 |
+
# with the same v0, solutions are the same, and they are accurate
|
330 |
+
v0b = rng.random(n)
|
331 |
+
res1b = svds(A, k, v0=v0b, solver=self.solver, random_state=2)
|
332 |
+
res2b = svds(A, k, v0=v0b, solver=self.solver, random_state=3)
|
333 |
+
for idx in range(3):
|
334 |
+
assert_allclose(res1b[idx], res2b[idx], rtol=1e-15, atol=2e-16)
|
335 |
+
_check_svds(A, k, *res1b)
|
336 |
+
|
337 |
+
# with different v0, solutions can be numerically different
|
338 |
+
message = "Arrays are not equal"
|
339 |
+
with pytest.raises(AssertionError, match=message):
|
340 |
+
assert_equal(res1a, res1b)
|
341 |
+
|
342 |
+
def test_svd_random_state(self):
|
343 |
+
# check that the `random_state` parameter affects the solution
|
344 |
+
# Admittedly, `n` and `k` are chosen so that all solver pass all
|
345 |
+
# these checks. That's a tall order, since LOBPCG doesn't want to
|
346 |
+
# achieve the desired accuracy and ARPACK often returns the same
|
347 |
+
# singular values/vectors for different v0.
|
348 |
+
n = 100
|
349 |
+
k = 1
|
350 |
+
|
351 |
+
rng = np.random.default_rng(0)
|
352 |
+
A = rng.random((n, n))
|
353 |
+
|
354 |
+
# with the same random_state, solutions are the same and accurate
|
355 |
+
res1a = svds(A, k, solver=self.solver, random_state=0)
|
356 |
+
res2a = svds(A, k, solver=self.solver, random_state=0)
|
357 |
+
for idx in range(3):
|
358 |
+
assert_allclose(res1a[idx], res2a[idx], rtol=1e-15, atol=2e-16)
|
359 |
+
_check_svds(A, k, *res1a)
|
360 |
+
|
361 |
+
# with the same random_state, solutions are the same and accurate
|
362 |
+
res1b = svds(A, k, solver=self.solver, random_state=1)
|
363 |
+
res2b = svds(A, k, solver=self.solver, random_state=1)
|
364 |
+
for idx in range(3):
|
365 |
+
assert_allclose(res1b[idx], res2b[idx], rtol=1e-15, atol=2e-16)
|
366 |
+
_check_svds(A, k, *res1b)
|
367 |
+
|
368 |
+
# with different random_state, solutions can be numerically different
|
369 |
+
message = "Arrays are not equal"
|
370 |
+
with pytest.raises(AssertionError, match=message):
|
371 |
+
assert_equal(res1a, res1b)
|
372 |
+
|
373 |
+
@pytest.mark.parametrize("random_state", (0, 1,
|
374 |
+
np.random.RandomState(0),
|
375 |
+
np.random.default_rng(0)))
|
376 |
+
def test_svd_random_state_2(self, random_state):
|
377 |
+
n = 100
|
378 |
+
k = 1
|
379 |
+
|
380 |
+
rng = np.random.default_rng(0)
|
381 |
+
A = rng.random((n, n))
|
382 |
+
|
383 |
+
random_state_2 = copy.deepcopy(random_state)
|
384 |
+
|
385 |
+
# with the same random_state, solutions are the same and accurate
|
386 |
+
res1a = svds(A, k, solver=self.solver, random_state=random_state)
|
387 |
+
res2a = svds(A, k, solver=self.solver, random_state=random_state_2)
|
388 |
+
for idx in range(3):
|
389 |
+
assert_allclose(res1a[idx], res2a[idx], rtol=1e-15, atol=2e-16)
|
390 |
+
_check_svds(A, k, *res1a)
|
391 |
+
|
392 |
+
@pytest.mark.parametrize("random_state", (None,
|
393 |
+
np.random.RandomState(0),
|
394 |
+
np.random.default_rng(0)))
|
395 |
+
@pytest.mark.filterwarnings("ignore:Exited",
|
396 |
+
reason="Ignore LOBPCG early exit.")
|
397 |
+
def test_svd_random_state_3(self, random_state):
|
398 |
+
n = 100
|
399 |
+
k = 5
|
400 |
+
|
401 |
+
rng = np.random.default_rng(0)
|
402 |
+
A = rng.random((n, n))
|
403 |
+
|
404 |
+
random_state = copy.deepcopy(random_state)
|
405 |
+
|
406 |
+
# random_state in different state produces accurate - but not
|
407 |
+
# not necessarily identical - results
|
408 |
+
res1a = svds(A, k, solver=self.solver, random_state=random_state, maxiter=1000)
|
409 |
+
res2a = svds(A, k, solver=self.solver, random_state=random_state, maxiter=1000)
|
410 |
+
_check_svds(A, k, *res1a, atol=2e-7)
|
411 |
+
_check_svds(A, k, *res2a, atol=2e-7)
|
412 |
+
|
413 |
+
message = "Arrays are not equal"
|
414 |
+
with pytest.raises(AssertionError, match=message):
|
415 |
+
assert_equal(res1a, res2a)
|
416 |
+
|
417 |
+
@pytest.mark.filterwarnings("ignore:Exited postprocessing")
|
418 |
+
def test_svd_maxiter(self):
|
419 |
+
# check that maxiter works as expected: should not return accurate
|
420 |
+
# solution after 1 iteration, but should with default `maxiter`
|
421 |
+
A = np.diag(np.arange(9)).astype(np.float64)
|
422 |
+
k = 1
|
423 |
+
u, s, vh = sorted_svd(A, k)
|
424 |
+
# Use default maxiter by default
|
425 |
+
maxiter = None
|
426 |
+
|
427 |
+
if self.solver == 'arpack':
|
428 |
+
message = "ARPACK error -1: No convergence"
|
429 |
+
with pytest.raises(ArpackNoConvergence, match=message):
|
430 |
+
svds(A, k, ncv=3, maxiter=1, solver=self.solver)
|
431 |
+
elif self.solver == 'lobpcg':
|
432 |
+
# Set maxiter higher so test passes without changing
|
433 |
+
# default and breaking backward compatibility (gh-20221)
|
434 |
+
maxiter = 30
|
435 |
+
with pytest.warns(UserWarning, match="Exited at iteration"):
|
436 |
+
svds(A, k, maxiter=1, solver=self.solver)
|
437 |
+
elif self.solver == 'propack':
|
438 |
+
message = "k=1 singular triplets did not converge within"
|
439 |
+
with pytest.raises(np.linalg.LinAlgError, match=message):
|
440 |
+
svds(A, k, maxiter=1, solver=self.solver)
|
441 |
+
|
442 |
+
ud, sd, vhd = svds(A, k, solver=self.solver, maxiter=maxiter,
|
443 |
+
random_state=0)
|
444 |
+
_check_svds(A, k, ud, sd, vhd, atol=1e-8)
|
445 |
+
assert_allclose(np.abs(ud), np.abs(u), atol=1e-8)
|
446 |
+
assert_allclose(np.abs(vhd), np.abs(vh), atol=1e-8)
|
447 |
+
assert_allclose(np.abs(sd), np.abs(s), atol=1e-9)
|
448 |
+
|
449 |
+
@pytest.mark.parametrize("rsv", (True, False, 'u', 'vh'))
|
450 |
+
@pytest.mark.parametrize("shape", ((5, 7), (6, 6), (7, 5)))
|
451 |
+
def test_svd_return_singular_vectors(self, rsv, shape):
|
452 |
+
# check that the return_singular_vectors parameter works as expected
|
453 |
+
rng = np.random.default_rng(0)
|
454 |
+
A = rng.random(shape)
|
455 |
+
k = 2
|
456 |
+
M, N = shape
|
457 |
+
u, s, vh = sorted_svd(A, k)
|
458 |
+
|
459 |
+
respect_u = True if self.solver == 'propack' else M <= N
|
460 |
+
respect_vh = True if self.solver == 'propack' else M > N
|
461 |
+
|
462 |
+
if self.solver == 'lobpcg':
|
463 |
+
with pytest.warns(UserWarning, match="The problem size"):
|
464 |
+
if rsv is False:
|
465 |
+
s2 = svds(A, k, return_singular_vectors=rsv,
|
466 |
+
solver=self.solver, random_state=rng)
|
467 |
+
assert_allclose(s2, s)
|
468 |
+
elif rsv == 'u' and respect_u:
|
469 |
+
u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv,
|
470 |
+
solver=self.solver, random_state=rng)
|
471 |
+
assert_allclose(np.abs(u2), np.abs(u))
|
472 |
+
assert_allclose(s2, s)
|
473 |
+
assert vh2 is None
|
474 |
+
elif rsv == 'vh' and respect_vh:
|
475 |
+
u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv,
|
476 |
+
solver=self.solver, random_state=rng)
|
477 |
+
assert u2 is None
|
478 |
+
assert_allclose(s2, s)
|
479 |
+
assert_allclose(np.abs(vh2), np.abs(vh))
|
480 |
+
else:
|
481 |
+
u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv,
|
482 |
+
solver=self.solver, random_state=rng)
|
483 |
+
if u2 is not None:
|
484 |
+
assert_allclose(np.abs(u2), np.abs(u))
|
485 |
+
assert_allclose(s2, s)
|
486 |
+
if vh2 is not None:
|
487 |
+
assert_allclose(np.abs(vh2), np.abs(vh))
|
488 |
+
else:
|
489 |
+
if rsv is False:
|
490 |
+
s2 = svds(A, k, return_singular_vectors=rsv,
|
491 |
+
solver=self.solver, random_state=rng)
|
492 |
+
assert_allclose(s2, s)
|
493 |
+
elif rsv == 'u' and respect_u:
|
494 |
+
u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv,
|
495 |
+
solver=self.solver, random_state=rng)
|
496 |
+
assert_allclose(np.abs(u2), np.abs(u))
|
497 |
+
assert_allclose(s2, s)
|
498 |
+
assert vh2 is None
|
499 |
+
elif rsv == 'vh' and respect_vh:
|
500 |
+
u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv,
|
501 |
+
solver=self.solver, random_state=rng)
|
502 |
+
assert u2 is None
|
503 |
+
assert_allclose(s2, s)
|
504 |
+
assert_allclose(np.abs(vh2), np.abs(vh))
|
505 |
+
else:
|
506 |
+
u2, s2, vh2 = svds(A, k, return_singular_vectors=rsv,
|
507 |
+
solver=self.solver, random_state=rng)
|
508 |
+
if u2 is not None:
|
509 |
+
assert_allclose(np.abs(u2), np.abs(u))
|
510 |
+
assert_allclose(s2, s)
|
511 |
+
if vh2 is not None:
|
512 |
+
assert_allclose(np.abs(vh2), np.abs(vh))
|
513 |
+
|
514 |
+
# --- Test Basic Functionality ---
|
515 |
+
# Tests the accuracy of each solver for real and complex matrices provided
|
516 |
+
# as list, dense array, sparse matrix, and LinearOperator.
|
517 |
+
|
518 |
+
A1 = [[1, 2, 3], [3, 4, 3], [1 + 1j, 0, 2], [0, 0, 1]]
|
519 |
+
A2 = [[1, 2, 3, 8 + 5j], [3 - 2j, 4, 3, 5], [1, 0, 2, 3], [0, 0, 1, 0]]
|
520 |
+
|
521 |
+
@pytest.mark.filterwarnings("ignore:k >= N - 1",
|
522 |
+
reason="needed to demonstrate #16725")
|
523 |
+
@pytest.mark.parametrize('A', (A1, A2))
|
524 |
+
@pytest.mark.parametrize('k', range(1, 5))
|
525 |
+
# PROPACK fails a lot if @pytest.mark.parametrize('which', ("SM", "LM"))
|
526 |
+
@pytest.mark.parametrize('real', (True, False))
|
527 |
+
@pytest.mark.parametrize('transpose', (False, True))
|
528 |
+
# In gh-14299, it was suggested the `svds` should _not_ work with lists
|
529 |
+
@pytest.mark.parametrize('lo_type', (np.asarray, csc_matrix,
|
530 |
+
aslinearoperator))
|
531 |
+
def test_svd_simple(self, A, k, real, transpose, lo_type):
|
532 |
+
|
533 |
+
A = np.asarray(A)
|
534 |
+
A = np.real(A) if real else A
|
535 |
+
A = A.T if transpose else A
|
536 |
+
A2 = lo_type(A)
|
537 |
+
|
538 |
+
# could check for the appropriate errors, but that is tested above
|
539 |
+
if k > min(A.shape):
|
540 |
+
pytest.skip("`k` cannot be greater than `min(A.shape)`")
|
541 |
+
if self.solver != 'propack' and k >= min(A.shape):
|
542 |
+
pytest.skip("Only PROPACK supports complete SVD")
|
543 |
+
if self.solver == 'arpack' and not real and k == min(A.shape) - 1:
|
544 |
+
pytest.skip("#16725")
|
545 |
+
|
546 |
+
atol = 3e-10
|
547 |
+
if self.solver == 'propack':
|
548 |
+
atol = 3e-9 # otherwise test fails on Linux aarch64 (see gh-19855)
|
549 |
+
|
550 |
+
if self.solver == 'lobpcg':
|
551 |
+
with pytest.warns(UserWarning, match="The problem size"):
|
552 |
+
u, s, vh = svds(A2, k, solver=self.solver, random_state=0)
|
553 |
+
else:
|
554 |
+
u, s, vh = svds(A2, k, solver=self.solver, random_state=0)
|
555 |
+
_check_svds(A, k, u, s, vh, atol=atol)
|
556 |
+
|
557 |
+
def test_svd_linop(self):
|
558 |
+
solver = self.solver
|
559 |
+
|
560 |
+
nmks = [(6, 7, 3),
|
561 |
+
(9, 5, 4),
|
562 |
+
(10, 8, 5)]
|
563 |
+
|
564 |
+
def reorder(args):
|
565 |
+
U, s, VH = args
|
566 |
+
j = np.argsort(s)
|
567 |
+
return U[:, j], s[j], VH[j, :]
|
568 |
+
|
569 |
+
for n, m, k in nmks:
|
570 |
+
# Test svds on a LinearOperator.
|
571 |
+
A = np.random.RandomState(52).randn(n, m)
|
572 |
+
L = CheckingLinearOperator(A)
|
573 |
+
|
574 |
+
if solver == 'propack':
|
575 |
+
v0 = np.ones(n)
|
576 |
+
else:
|
577 |
+
v0 = np.ones(min(A.shape))
|
578 |
+
if solver == 'lobpcg':
|
579 |
+
with pytest.warns(UserWarning, match="The problem size"):
|
580 |
+
U1, s1, VH1 = reorder(svds(A, k, v0=v0, solver=solver,
|
581 |
+
random_state=0))
|
582 |
+
U2, s2, VH2 = reorder(svds(L, k, v0=v0, solver=solver,
|
583 |
+
random_state=0))
|
584 |
+
else:
|
585 |
+
U1, s1, VH1 = reorder(svds(A, k, v0=v0, solver=solver,
|
586 |
+
random_state=0))
|
587 |
+
U2, s2, VH2 = reorder(svds(L, k, v0=v0, solver=solver,
|
588 |
+
random_state=0))
|
589 |
+
|
590 |
+
assert_allclose(np.abs(U1), np.abs(U2))
|
591 |
+
assert_allclose(s1, s2)
|
592 |
+
assert_allclose(np.abs(VH1), np.abs(VH2))
|
593 |
+
assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)),
|
594 |
+
np.dot(U2, np.dot(np.diag(s2), VH2)))
|
595 |
+
|
596 |
+
# Try again with which="SM".
|
597 |
+
A = np.random.RandomState(1909).randn(n, m)
|
598 |
+
L = CheckingLinearOperator(A)
|
599 |
+
|
600 |
+
# TODO: arpack crashes when v0=v0, which="SM"
|
601 |
+
kwargs = {'v0': v0} if solver not in {None, 'arpack'} else {}
|
602 |
+
if self.solver == 'lobpcg':
|
603 |
+
with pytest.warns(UserWarning, match="The problem size"):
|
604 |
+
U1, s1, VH1 = reorder(svds(A, k, which="SM", solver=solver,
|
605 |
+
random_state=0, **kwargs))
|
606 |
+
U2, s2, VH2 = reorder(svds(L, k, which="SM", solver=solver,
|
607 |
+
random_state=0, **kwargs))
|
608 |
+
else:
|
609 |
+
U1, s1, VH1 = reorder(svds(A, k, which="SM", solver=solver,
|
610 |
+
random_state=0, **kwargs))
|
611 |
+
U2, s2, VH2 = reorder(svds(L, k, which="SM", solver=solver,
|
612 |
+
random_state=0, **kwargs))
|
613 |
+
|
614 |
+
assert_allclose(np.abs(U1), np.abs(U2))
|
615 |
+
assert_allclose(s1 + 1, s2 + 1)
|
616 |
+
assert_allclose(np.abs(VH1), np.abs(VH2))
|
617 |
+
assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)),
|
618 |
+
np.dot(U2, np.dot(np.diag(s2), VH2)))
|
619 |
+
|
620 |
+
if k < min(n, m) - 1:
|
621 |
+
# Complex input and explicit which="LM".
|
622 |
+
for (dt, eps) in [(complex, 1e-7), (np.complex64, 3e-3)]:
|
623 |
+
rng = np.random.RandomState(1648)
|
624 |
+
A = (rng.randn(n, m) + 1j * rng.randn(n, m)).astype(dt)
|
625 |
+
L = CheckingLinearOperator(A)
|
626 |
+
|
627 |
+
if self.solver == 'lobpcg':
|
628 |
+
with pytest.warns(UserWarning,
|
629 |
+
match="The problem size"):
|
630 |
+
U1, s1, VH1 = reorder(svds(A, k, which="LM",
|
631 |
+
solver=solver,
|
632 |
+
random_state=0))
|
633 |
+
U2, s2, VH2 = reorder(svds(L, k, which="LM",
|
634 |
+
solver=solver,
|
635 |
+
random_state=0))
|
636 |
+
else:
|
637 |
+
U1, s1, VH1 = reorder(svds(A, k, which="LM",
|
638 |
+
solver=solver,
|
639 |
+
random_state=0))
|
640 |
+
U2, s2, VH2 = reorder(svds(L, k, which="LM",
|
641 |
+
solver=solver,
|
642 |
+
random_state=0))
|
643 |
+
|
644 |
+
assert_allclose(np.abs(U1), np.abs(U2), rtol=eps)
|
645 |
+
assert_allclose(s1, s2, rtol=eps)
|
646 |
+
assert_allclose(np.abs(VH1), np.abs(VH2), rtol=eps)
|
647 |
+
assert_allclose(np.dot(U1, np.dot(np.diag(s1), VH1)),
|
648 |
+
np.dot(U2, np.dot(np.diag(s2), VH2)),
|
649 |
+
rtol=eps)
|
650 |
+
|
651 |
+
SHAPES = ((100, 100), (100, 101), (101, 100))
|
652 |
+
|
653 |
+
@pytest.mark.filterwarnings("ignore:Exited at iteration")
|
654 |
+
@pytest.mark.filterwarnings("ignore:Exited postprocessing")
|
655 |
+
@pytest.mark.parametrize("shape", SHAPES)
|
656 |
+
# ARPACK supports only dtype float, complex, or np.float32
|
657 |
+
@pytest.mark.parametrize("dtype", (float, complex, np.float32))
|
658 |
+
def test_small_sigma_sparse(self, shape, dtype):
|
659 |
+
# https://github.com/scipy/scipy/pull/11829
|
660 |
+
solver = self.solver
|
661 |
+
# 2do: PROPACK fails orthogonality of singular vectors
|
662 |
+
# if dtype == complex and self.solver == 'propack':
|
663 |
+
# pytest.skip("PROPACK unsupported for complex dtype")
|
664 |
+
rng = np.random.default_rng(0)
|
665 |
+
k = 5
|
666 |
+
(m, n) = shape
|
667 |
+
S = random(m, n, density=0.1, random_state=rng)
|
668 |
+
if dtype == complex:
|
669 |
+
S = + 1j * random(m, n, density=0.1, random_state=rng)
|
670 |
+
e = np.ones(m)
|
671 |
+
e[0:5] *= 1e1 ** np.arange(-5, 0, 1)
|
672 |
+
S = spdiags(e, 0, m, m) @ S
|
673 |
+
S = S.astype(dtype)
|
674 |
+
u, s, vh = svds(S, k, which='SM', solver=solver, maxiter=1000,
|
675 |
+
random_state=0)
|
676 |
+
c_svd = False # partial SVD can be different from full SVD
|
677 |
+
_check_svds_n(S, k, u, s, vh, which="SM", check_svd=c_svd, atol=2e-1)
|
678 |
+
|
679 |
+
# --- Test Edge Cases ---
|
680 |
+
# Checks a few edge cases.
|
681 |
+
|
682 |
+
@pytest.mark.parametrize("shape", ((6, 5), (5, 5), (5, 6)))
|
683 |
+
@pytest.mark.parametrize("dtype", (float, complex))
|
684 |
+
def test_svd_LM_ones_matrix(self, shape, dtype):
|
685 |
+
# Check that svds can deal with matrix_rank less than k in LM mode.
|
686 |
+
k = 3
|
687 |
+
n, m = shape
|
688 |
+
A = np.ones((n, m), dtype=dtype)
|
689 |
+
|
690 |
+
if self.solver == 'lobpcg':
|
691 |
+
with pytest.warns(UserWarning, match="The problem size"):
|
692 |
+
U, s, VH = svds(A, k, solver=self.solver, random_state=0)
|
693 |
+
else:
|
694 |
+
U, s, VH = svds(A, k, solver=self.solver, random_state=0)
|
695 |
+
|
696 |
+
_check_svds(A, k, U, s, VH, check_usvh_A=True, check_svd=False)
|
697 |
+
|
698 |
+
# Check that the largest singular value is near sqrt(n*m)
|
699 |
+
# and the other singular values have been forced to zero.
|
700 |
+
assert_allclose(np.max(s), np.sqrt(n*m))
|
701 |
+
s = np.array(sorted(s)[:-1]) + 1
|
702 |
+
z = np.ones_like(s)
|
703 |
+
assert_allclose(s, z)
|
704 |
+
|
705 |
+
@pytest.mark.filterwarnings("ignore:k >= N - 1",
|
706 |
+
reason="needed to demonstrate #16725")
|
707 |
+
@pytest.mark.parametrize("shape", ((3, 4), (4, 4), (4, 3), (4, 2)))
|
708 |
+
@pytest.mark.parametrize("dtype", (float, complex))
|
709 |
+
def test_zero_matrix(self, shape, dtype):
|
710 |
+
# Check that svds can deal with matrices containing only zeros;
|
711 |
+
# see https://github.com/scipy/scipy/issues/3452/
|
712 |
+
# shape = (4, 2) is included because it is the particular case
|
713 |
+
# reported in the issue
|
714 |
+
k = 1
|
715 |
+
n, m = shape
|
716 |
+
A = np.zeros((n, m), dtype=dtype)
|
717 |
+
|
718 |
+
if (self.solver == 'arpack' and dtype is complex
|
719 |
+
and k == min(A.shape) - 1):
|
720 |
+
pytest.skip("#16725")
|
721 |
+
|
722 |
+
if self.solver == 'propack':
|
723 |
+
pytest.skip("PROPACK failures unrelated to PR #16712")
|
724 |
+
|
725 |
+
if self.solver == 'lobpcg':
|
726 |
+
with pytest.warns(UserWarning, match="The problem size"):
|
727 |
+
U, s, VH = svds(A, k, solver=self.solver, random_state=0)
|
728 |
+
else:
|
729 |
+
U, s, VH = svds(A, k, solver=self.solver, random_state=0)
|
730 |
+
|
731 |
+
# Check some generic properties of svd.
|
732 |
+
_check_svds(A, k, U, s, VH, check_usvh_A=True, check_svd=False)
|
733 |
+
|
734 |
+
# Check that the singular values are zero.
|
735 |
+
assert_array_equal(s, 0)
|
736 |
+
|
737 |
+
@pytest.mark.parametrize("shape", ((20, 20), (20, 21), (21, 20)))
|
738 |
+
# ARPACK supports only dtype float, complex, or np.float32
|
739 |
+
@pytest.mark.parametrize("dtype", (float, complex, np.float32))
|
740 |
+
@pytest.mark.filterwarnings("ignore:Exited",
|
741 |
+
reason="Ignore LOBPCG early exit.")
|
742 |
+
def test_small_sigma(self, shape, dtype):
|
743 |
+
rng = np.random.default_rng(179847540)
|
744 |
+
A = rng.random(shape).astype(dtype)
|
745 |
+
u, _, vh = svd(A, full_matrices=False)
|
746 |
+
if dtype == np.float32:
|
747 |
+
e = 10.0
|
748 |
+
else:
|
749 |
+
e = 100.0
|
750 |
+
t = e**(-np.arange(len(vh))).astype(dtype)
|
751 |
+
A = (u*t).dot(vh)
|
752 |
+
k = 4
|
753 |
+
u, s, vh = svds(A, k, solver=self.solver, maxiter=100, random_state=0)
|
754 |
+
t = np.sum(s > 0)
|
755 |
+
assert_equal(t, k)
|
756 |
+
# LOBPCG needs larger atol and rtol to pass
|
757 |
+
_check_svds_n(A, k, u, s, vh, atol=1e-3, rtol=1e0, check_svd=False)
|
758 |
+
|
759 |
+
# ARPACK supports only dtype float, complex, or np.float32
|
760 |
+
@pytest.mark.filterwarnings("ignore:The problem size")
|
761 |
+
@pytest.mark.parametrize("dtype", (float, complex, np.float32))
|
762 |
+
def test_small_sigma2(self, dtype):
|
763 |
+
rng = np.random.default_rng(179847540)
|
764 |
+
# create a 10x10 singular matrix with a 4-dim null space
|
765 |
+
dim = 4
|
766 |
+
size = 10
|
767 |
+
x = rng.random((size, size-dim))
|
768 |
+
y = x[:, :dim] * rng.random(dim)
|
769 |
+
mat = np.hstack((x, y))
|
770 |
+
mat = mat.astype(dtype)
|
771 |
+
|
772 |
+
nz = null_space(mat)
|
773 |
+
assert_equal(nz.shape[1], dim)
|
774 |
+
|
775 |
+
# Tolerances atol and rtol adjusted to pass np.float32
|
776 |
+
# Use non-sparse svd
|
777 |
+
u, s, vh = svd(mat)
|
778 |
+
# Singular values are 0:
|
779 |
+
assert_allclose(s[-dim:], 0, atol=1e-6, rtol=1e0)
|
780 |
+
# Smallest right singular vectors in null space:
|
781 |
+
assert_allclose(mat @ vh[-dim:, :].T, 0, atol=1e-6, rtol=1e0)
|
782 |
+
|
783 |
+
# Smallest singular values should be 0
|
784 |
+
sp_mat = csc_matrix(mat)
|
785 |
+
su, ss, svh = svds(sp_mat, k=dim, which='SM', solver=self.solver,
|
786 |
+
random_state=0)
|
787 |
+
# Smallest dim singular values are 0:
|
788 |
+
assert_allclose(ss, 0, atol=1e-5, rtol=1e0)
|
789 |
+
# Smallest singular vectors via svds in null space:
|
790 |
+
n, m = mat.shape
|
791 |
+
if n < m: # else the assert fails with some libraries unclear why
|
792 |
+
assert_allclose(sp_mat.transpose() @ su, 0, atol=1e-5, rtol=1e0)
|
793 |
+
assert_allclose(sp_mat @ svh.T, 0, atol=1e-5, rtol=1e0)
|
794 |
+
|
795 |
+
# --- Perform tests with each solver ---
|
796 |
+
|
797 |
+
|
798 |
+
class Test_SVDS_once:
|
799 |
+
@pytest.mark.parametrize("solver", ['ekki', object])
|
800 |
+
def test_svds_input_validation_solver(self, solver):
|
801 |
+
message = "solver must be one of"
|
802 |
+
with pytest.raises(ValueError, match=message):
|
803 |
+
svds(np.ones((3, 4)), k=2, solver=solver)
|
804 |
+
|
805 |
+
|
806 |
+
class Test_SVDS_ARPACK(SVDSCommonTests):
|
807 |
+
|
808 |
+
def setup_method(self):
|
809 |
+
self.solver = 'arpack'
|
810 |
+
|
811 |
+
@pytest.mark.parametrize("ncv", list(range(-1, 8)) + [4.5, "5"])
|
812 |
+
def test_svds_input_validation_ncv_1(self, ncv):
|
813 |
+
rng = np.random.default_rng(0)
|
814 |
+
A = rng.random((6, 7))
|
815 |
+
k = 3
|
816 |
+
if ncv in {4, 5}:
|
817 |
+
u, s, vh = svds(A, k=k, ncv=ncv, solver=self.solver, random_state=0)
|
818 |
+
# partial decomposition, so don't check that u@diag(s)@vh=A;
|
819 |
+
# do check that scipy.sparse.linalg.svds ~ scipy.linalg.svd
|
820 |
+
_check_svds(A, k, u, s, vh)
|
821 |
+
else:
|
822 |
+
message = ("`ncv` must be an integer satisfying")
|
823 |
+
with pytest.raises(ValueError, match=message):
|
824 |
+
svds(A, k=k, ncv=ncv, solver=self.solver)
|
825 |
+
|
826 |
+
def test_svds_input_validation_ncv_2(self):
|
827 |
+
# I think the stack trace is reasonable when `ncv` can't be converted
|
828 |
+
# to an int.
|
829 |
+
message = "int() argument must be a"
|
830 |
+
with pytest.raises(TypeError, match=re.escape(message)):
|
831 |
+
svds(np.eye(10), ncv=[], solver=self.solver)
|
832 |
+
|
833 |
+
message = "invalid literal for int()"
|
834 |
+
with pytest.raises(ValueError, match=message):
|
835 |
+
svds(np.eye(10), ncv="hi", solver=self.solver)
|
836 |
+
|
837 |
+
# I can't see a robust relationship between `ncv` and relevant outputs
|
838 |
+
# (e.g. accuracy, time), so no test of the parameter.
|
839 |
+
|
840 |
+
|
841 |
+
class Test_SVDS_LOBPCG(SVDSCommonTests):
|
842 |
+
|
843 |
+
def setup_method(self):
|
844 |
+
self.solver = 'lobpcg'
|
845 |
+
|
846 |
+
|
847 |
+
class Test_SVDS_PROPACK(SVDSCommonTests):
|
848 |
+
|
849 |
+
def setup_method(self):
|
850 |
+
self.solver = 'propack'
|
851 |
+
|
852 |
+
def test_svd_LM_ones_matrix(self):
|
853 |
+
message = ("PROPACK does not return orthonormal singular vectors "
|
854 |
+
"associated with zero singular values.")
|
855 |
+
# There are some other issues with this matrix of all ones, e.g.
|
856 |
+
# `which='sm'` and `k=1` returns the largest singular value
|
857 |
+
pytest.xfail(message)
|
858 |
+
|
859 |
+
def test_svd_LM_zeros_matrix(self):
|
860 |
+
message = ("PROPACK does not return orthonormal singular vectors "
|
861 |
+
"associated with zero singular values.")
|
862 |
+
pytest.xfail(message)
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (639 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/_gcrotmk.cpython-310.pyc
ADDED
Binary file (11.4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/lgmres.cpython-310.pyc
ADDED
Binary file (7.41 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/lsmr.cpython-310.pyc
ADDED
Binary file (11.6 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/lsqr.cpython-310.pyc
ADDED
Binary file (17 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/minres.cpython-310.pyc
ADDED
Binary file (8.07 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/__pycache__/tfqmr.cpython-310.pyc
ADDED
Binary file (5.73 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/_gcrotmk.py
ADDED
@@ -0,0 +1,514 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (C) 2015, Pauli Virtanen <[email protected]>
|
2 |
+
# Distributed under the same license as SciPy.
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
from numpy.linalg import LinAlgError
|
6 |
+
from scipy.linalg import (get_blas_funcs, qr, solve, svd, qr_insert, lstsq)
|
7 |
+
from .iterative import _get_atol_rtol
|
8 |
+
from scipy.sparse.linalg._isolve.utils import make_system
|
9 |
+
from scipy._lib.deprecation import _NoValue, _deprecate_positional_args
|
10 |
+
|
11 |
+
|
12 |
+
__all__ = ['gcrotmk']
|
13 |
+
|
14 |
+
|
15 |
+
def _fgmres(matvec, v0, m, atol, lpsolve=None, rpsolve=None, cs=(), outer_v=(),
|
16 |
+
prepend_outer_v=False):
|
17 |
+
"""
|
18 |
+
FGMRES Arnoldi process, with optional projection or augmentation
|
19 |
+
|
20 |
+
Parameters
|
21 |
+
----------
|
22 |
+
matvec : callable
|
23 |
+
Operation A*x
|
24 |
+
v0 : ndarray
|
25 |
+
Initial vector, normalized to nrm2(v0) == 1
|
26 |
+
m : int
|
27 |
+
Number of GMRES rounds
|
28 |
+
atol : float
|
29 |
+
Absolute tolerance for early exit
|
30 |
+
lpsolve : callable
|
31 |
+
Left preconditioner L
|
32 |
+
rpsolve : callable
|
33 |
+
Right preconditioner R
|
34 |
+
cs : list of (ndarray, ndarray)
|
35 |
+
Columns of matrices C and U in GCROT
|
36 |
+
outer_v : list of ndarrays
|
37 |
+
Augmentation vectors in LGMRES
|
38 |
+
prepend_outer_v : bool, optional
|
39 |
+
Whether augmentation vectors come before or after
|
40 |
+
Krylov iterates
|
41 |
+
|
42 |
+
Raises
|
43 |
+
------
|
44 |
+
LinAlgError
|
45 |
+
If nans encountered
|
46 |
+
|
47 |
+
Returns
|
48 |
+
-------
|
49 |
+
Q, R : ndarray
|
50 |
+
QR decomposition of the upper Hessenberg H=QR
|
51 |
+
B : ndarray
|
52 |
+
Projections corresponding to matrix C
|
53 |
+
vs : list of ndarray
|
54 |
+
Columns of matrix V
|
55 |
+
zs : list of ndarray
|
56 |
+
Columns of matrix Z
|
57 |
+
y : ndarray
|
58 |
+
Solution to ||H y - e_1||_2 = min!
|
59 |
+
res : float
|
60 |
+
The final (preconditioned) residual norm
|
61 |
+
|
62 |
+
"""
|
63 |
+
|
64 |
+
if lpsolve is None:
|
65 |
+
def lpsolve(x):
|
66 |
+
return x
|
67 |
+
if rpsolve is None:
|
68 |
+
def rpsolve(x):
|
69 |
+
return x
|
70 |
+
|
71 |
+
axpy, dot, scal, nrm2 = get_blas_funcs(['axpy', 'dot', 'scal', 'nrm2'], (v0,))
|
72 |
+
|
73 |
+
vs = [v0]
|
74 |
+
zs = []
|
75 |
+
y = None
|
76 |
+
res = np.nan
|
77 |
+
|
78 |
+
m = m + len(outer_v)
|
79 |
+
|
80 |
+
# Orthogonal projection coefficients
|
81 |
+
B = np.zeros((len(cs), m), dtype=v0.dtype)
|
82 |
+
|
83 |
+
# H is stored in QR factorized form
|
84 |
+
Q = np.ones((1, 1), dtype=v0.dtype)
|
85 |
+
R = np.zeros((1, 0), dtype=v0.dtype)
|
86 |
+
|
87 |
+
eps = np.finfo(v0.dtype).eps
|
88 |
+
|
89 |
+
breakdown = False
|
90 |
+
|
91 |
+
# FGMRES Arnoldi process
|
92 |
+
for j in range(m):
|
93 |
+
# L A Z = C B + V H
|
94 |
+
|
95 |
+
if prepend_outer_v and j < len(outer_v):
|
96 |
+
z, w = outer_v[j]
|
97 |
+
elif prepend_outer_v and j == len(outer_v):
|
98 |
+
z = rpsolve(v0)
|
99 |
+
w = None
|
100 |
+
elif not prepend_outer_v and j >= m - len(outer_v):
|
101 |
+
z, w = outer_v[j - (m - len(outer_v))]
|
102 |
+
else:
|
103 |
+
z = rpsolve(vs[-1])
|
104 |
+
w = None
|
105 |
+
|
106 |
+
if w is None:
|
107 |
+
w = lpsolve(matvec(z))
|
108 |
+
else:
|
109 |
+
# w is clobbered below
|
110 |
+
w = w.copy()
|
111 |
+
|
112 |
+
w_norm = nrm2(w)
|
113 |
+
|
114 |
+
# GCROT projection: L A -> (1 - C C^H) L A
|
115 |
+
# i.e. orthogonalize against C
|
116 |
+
for i, c in enumerate(cs):
|
117 |
+
alpha = dot(c, w)
|
118 |
+
B[i,j] = alpha
|
119 |
+
w = axpy(c, w, c.shape[0], -alpha) # w -= alpha*c
|
120 |
+
|
121 |
+
# Orthogonalize against V
|
122 |
+
hcur = np.zeros(j+2, dtype=Q.dtype)
|
123 |
+
for i, v in enumerate(vs):
|
124 |
+
alpha = dot(v, w)
|
125 |
+
hcur[i] = alpha
|
126 |
+
w = axpy(v, w, v.shape[0], -alpha) # w -= alpha*v
|
127 |
+
hcur[i+1] = nrm2(w)
|
128 |
+
|
129 |
+
with np.errstate(over='ignore', divide='ignore'):
|
130 |
+
# Careful with denormals
|
131 |
+
alpha = 1/hcur[-1]
|
132 |
+
|
133 |
+
if np.isfinite(alpha):
|
134 |
+
w = scal(alpha, w)
|
135 |
+
|
136 |
+
if not (hcur[-1] > eps * w_norm):
|
137 |
+
# w essentially in the span of previous vectors,
|
138 |
+
# or we have nans. Bail out after updating the QR
|
139 |
+
# solution.
|
140 |
+
breakdown = True
|
141 |
+
|
142 |
+
vs.append(w)
|
143 |
+
zs.append(z)
|
144 |
+
|
145 |
+
# Arnoldi LSQ problem
|
146 |
+
|
147 |
+
# Add new column to H=Q@R, padding other columns with zeros
|
148 |
+
Q2 = np.zeros((j+2, j+2), dtype=Q.dtype, order='F')
|
149 |
+
Q2[:j+1,:j+1] = Q
|
150 |
+
Q2[j+1,j+1] = 1
|
151 |
+
|
152 |
+
R2 = np.zeros((j+2, j), dtype=R.dtype, order='F')
|
153 |
+
R2[:j+1,:] = R
|
154 |
+
|
155 |
+
Q, R = qr_insert(Q2, R2, hcur, j, which='col',
|
156 |
+
overwrite_qru=True, check_finite=False)
|
157 |
+
|
158 |
+
# Transformed least squares problem
|
159 |
+
# || Q R y - inner_res_0 * e_1 ||_2 = min!
|
160 |
+
# Since R = [R'; 0], solution is y = inner_res_0 (R')^{-1} (Q^H)[:j,0]
|
161 |
+
|
162 |
+
# Residual is immediately known
|
163 |
+
res = abs(Q[0,-1])
|
164 |
+
|
165 |
+
# Check for termination
|
166 |
+
if res < atol or breakdown:
|
167 |
+
break
|
168 |
+
|
169 |
+
if not np.isfinite(R[j,j]):
|
170 |
+
# nans encountered, bail out
|
171 |
+
raise LinAlgError()
|
172 |
+
|
173 |
+
# -- Get the LSQ problem solution
|
174 |
+
|
175 |
+
# The problem is triangular, but the condition number may be
|
176 |
+
# bad (or in case of breakdown the last diagonal entry may be
|
177 |
+
# zero), so use lstsq instead of trtrs.
|
178 |
+
y, _, _, _, = lstsq(R[:j+1,:j+1], Q[0,:j+1].conj())
|
179 |
+
|
180 |
+
B = B[:,:j+1]
|
181 |
+
|
182 |
+
return Q, R, B, vs, zs, y, res
|
183 |
+
|
184 |
+
|
185 |
+
@_deprecate_positional_args(version="1.14.0")
|
186 |
+
def gcrotmk(A, b, x0=None, *, tol=_NoValue, maxiter=1000, M=None, callback=None,
|
187 |
+
m=20, k=None, CU=None, discard_C=False, truncate='oldest',
|
188 |
+
atol=None, rtol=1e-5):
|
189 |
+
"""
|
190 |
+
Solve a matrix equation using flexible GCROT(m,k) algorithm.
|
191 |
+
|
192 |
+
Parameters
|
193 |
+
----------
|
194 |
+
A : {sparse matrix, ndarray, LinearOperator}
|
195 |
+
The real or complex N-by-N matrix of the linear system.
|
196 |
+
Alternatively, ``A`` can be a linear operator which can
|
197 |
+
produce ``Ax`` using, e.g.,
|
198 |
+
``scipy.sparse.linalg.LinearOperator``.
|
199 |
+
b : ndarray
|
200 |
+
Right hand side of the linear system. Has shape (N,) or (N,1).
|
201 |
+
x0 : ndarray
|
202 |
+
Starting guess for the solution.
|
203 |
+
rtol, atol : float, optional
|
204 |
+
Parameters for the convergence test. For convergence,
|
205 |
+
``norm(b - A @ x) <= max(rtol*norm(b), atol)`` should be satisfied.
|
206 |
+
The default is ``rtol=1e-5``, the default for ``atol`` is ``rtol``.
|
207 |
+
|
208 |
+
.. warning::
|
209 |
+
|
210 |
+
The default value for ``atol`` will be changed to ``0.0`` in
|
211 |
+
SciPy 1.14.0.
|
212 |
+
maxiter : int, optional
|
213 |
+
Maximum number of iterations. Iteration will stop after maxiter
|
214 |
+
steps even if the specified tolerance has not been achieved.
|
215 |
+
M : {sparse matrix, ndarray, LinearOperator}, optional
|
216 |
+
Preconditioner for A. The preconditioner should approximate the
|
217 |
+
inverse of A. gcrotmk is a 'flexible' algorithm and the preconditioner
|
218 |
+
can vary from iteration to iteration. Effective preconditioning
|
219 |
+
dramatically improves the rate of convergence, which implies that
|
220 |
+
fewer iterations are needed to reach a given error tolerance.
|
221 |
+
callback : function, optional
|
222 |
+
User-supplied function to call after each iteration. It is called
|
223 |
+
as callback(xk), where xk is the current solution vector.
|
224 |
+
m : int, optional
|
225 |
+
Number of inner FGMRES iterations per each outer iteration.
|
226 |
+
Default: 20
|
227 |
+
k : int, optional
|
228 |
+
Number of vectors to carry between inner FGMRES iterations.
|
229 |
+
According to [2]_, good values are around m.
|
230 |
+
Default: m
|
231 |
+
CU : list of tuples, optional
|
232 |
+
List of tuples ``(c, u)`` which contain the columns of the matrices
|
233 |
+
C and U in the GCROT(m,k) algorithm. For details, see [2]_.
|
234 |
+
The list given and vectors contained in it are modified in-place.
|
235 |
+
If not given, start from empty matrices. The ``c`` elements in the
|
236 |
+
tuples can be ``None``, in which case the vectors are recomputed
|
237 |
+
via ``c = A u`` on start and orthogonalized as described in [3]_.
|
238 |
+
discard_C : bool, optional
|
239 |
+
Discard the C-vectors at the end. Useful if recycling Krylov subspaces
|
240 |
+
for different linear systems.
|
241 |
+
truncate : {'oldest', 'smallest'}, optional
|
242 |
+
Truncation scheme to use. Drop: oldest vectors, or vectors with
|
243 |
+
smallest singular values using the scheme discussed in [1,2].
|
244 |
+
See [2]_ for detailed comparison.
|
245 |
+
Default: 'oldest'
|
246 |
+
tol : float, optional, deprecated
|
247 |
+
|
248 |
+
.. deprecated:: 1.12.0
|
249 |
+
`gcrotmk` keyword argument ``tol`` is deprecated in favor of
|
250 |
+
``rtol`` and will be removed in SciPy 1.14.0.
|
251 |
+
|
252 |
+
Returns
|
253 |
+
-------
|
254 |
+
x : ndarray
|
255 |
+
The solution found.
|
256 |
+
info : int
|
257 |
+
Provides convergence information:
|
258 |
+
|
259 |
+
* 0 : successful exit
|
260 |
+
* >0 : convergence to tolerance not achieved, number of iterations
|
261 |
+
|
262 |
+
Examples
|
263 |
+
--------
|
264 |
+
>>> import numpy as np
|
265 |
+
>>> from scipy.sparse import csc_matrix
|
266 |
+
>>> from scipy.sparse.linalg import gcrotmk
|
267 |
+
>>> R = np.random.randn(5, 5)
|
268 |
+
>>> A = csc_matrix(R)
|
269 |
+
>>> b = np.random.randn(5)
|
270 |
+
>>> x, exit_code = gcrotmk(A, b, atol=1e-5)
|
271 |
+
>>> print(exit_code)
|
272 |
+
0
|
273 |
+
>>> np.allclose(A.dot(x), b)
|
274 |
+
True
|
275 |
+
|
276 |
+
References
|
277 |
+
----------
|
278 |
+
.. [1] E. de Sturler, ''Truncation strategies for optimal Krylov subspace
|
279 |
+
methods'', SIAM J. Numer. Anal. 36, 864 (1999).
|
280 |
+
.. [2] J.E. Hicken and D.W. Zingg, ''A simplified and flexible variant
|
281 |
+
of GCROT for solving nonsymmetric linear systems'',
|
282 |
+
SIAM J. Sci. Comput. 32, 172 (2010).
|
283 |
+
.. [3] M.L. Parks, E. de Sturler, G. Mackey, D.D. Johnson, S. Maiti,
|
284 |
+
''Recycling Krylov subspaces for sequences of linear systems'',
|
285 |
+
SIAM J. Sci. Comput. 28, 1651 (2006).
|
286 |
+
|
287 |
+
"""
|
288 |
+
A,M,x,b,postprocess = make_system(A,M,x0,b)
|
289 |
+
|
290 |
+
if not np.isfinite(b).all():
|
291 |
+
raise ValueError("RHS must contain only finite numbers")
|
292 |
+
|
293 |
+
if truncate not in ('oldest', 'smallest'):
|
294 |
+
raise ValueError(f"Invalid value for 'truncate': {truncate!r}")
|
295 |
+
|
296 |
+
matvec = A.matvec
|
297 |
+
psolve = M.matvec
|
298 |
+
|
299 |
+
if CU is None:
|
300 |
+
CU = []
|
301 |
+
|
302 |
+
if k is None:
|
303 |
+
k = m
|
304 |
+
|
305 |
+
axpy, dot, scal = None, None, None
|
306 |
+
|
307 |
+
if x0 is None:
|
308 |
+
r = b.copy()
|
309 |
+
else:
|
310 |
+
r = b - matvec(x)
|
311 |
+
|
312 |
+
axpy, dot, scal, nrm2 = get_blas_funcs(['axpy', 'dot', 'scal', 'nrm2'], (x, r))
|
313 |
+
|
314 |
+
b_norm = nrm2(b)
|
315 |
+
|
316 |
+
# we call this to get the right atol/rtol and raise warnings as necessary
|
317 |
+
atol, rtol = _get_atol_rtol('gcrotmk', b_norm, tol, atol, rtol)
|
318 |
+
|
319 |
+
if b_norm == 0:
|
320 |
+
x = b
|
321 |
+
return (postprocess(x), 0)
|
322 |
+
|
323 |
+
if discard_C:
|
324 |
+
CU[:] = [(None, u) for c, u in CU]
|
325 |
+
|
326 |
+
# Reorthogonalize old vectors
|
327 |
+
if CU:
|
328 |
+
# Sort already existing vectors to the front
|
329 |
+
CU.sort(key=lambda cu: cu[0] is not None)
|
330 |
+
|
331 |
+
# Fill-in missing ones
|
332 |
+
C = np.empty((A.shape[0], len(CU)), dtype=r.dtype, order='F')
|
333 |
+
us = []
|
334 |
+
j = 0
|
335 |
+
while CU:
|
336 |
+
# More memory-efficient: throw away old vectors as we go
|
337 |
+
c, u = CU.pop(0)
|
338 |
+
if c is None:
|
339 |
+
c = matvec(u)
|
340 |
+
C[:,j] = c
|
341 |
+
j += 1
|
342 |
+
us.append(u)
|
343 |
+
|
344 |
+
# Orthogonalize
|
345 |
+
Q, R, P = qr(C, overwrite_a=True, mode='economic', pivoting=True)
|
346 |
+
del C
|
347 |
+
|
348 |
+
# C := Q
|
349 |
+
cs = list(Q.T)
|
350 |
+
|
351 |
+
# U := U P R^-1, back-substitution
|
352 |
+
new_us = []
|
353 |
+
for j in range(len(cs)):
|
354 |
+
u = us[P[j]]
|
355 |
+
for i in range(j):
|
356 |
+
u = axpy(us[P[i]], u, u.shape[0], -R[i,j])
|
357 |
+
if abs(R[j,j]) < 1e-12 * abs(R[0,0]):
|
358 |
+
# discard rest of the vectors
|
359 |
+
break
|
360 |
+
u = scal(1.0/R[j,j], u)
|
361 |
+
new_us.append(u)
|
362 |
+
|
363 |
+
# Form the new CU lists
|
364 |
+
CU[:] = list(zip(cs, new_us))[::-1]
|
365 |
+
|
366 |
+
if CU:
|
367 |
+
axpy, dot = get_blas_funcs(['axpy', 'dot'], (r,))
|
368 |
+
|
369 |
+
# Solve first the projection operation with respect to the CU
|
370 |
+
# vectors. This corresponds to modifying the initial guess to
|
371 |
+
# be
|
372 |
+
#
|
373 |
+
# x' = x + U y
|
374 |
+
# y = argmin_y || b - A (x + U y) ||^2
|
375 |
+
#
|
376 |
+
# The solution is y = C^H (b - A x)
|
377 |
+
for c, u in CU:
|
378 |
+
yc = dot(c, r)
|
379 |
+
x = axpy(u, x, x.shape[0], yc)
|
380 |
+
r = axpy(c, r, r.shape[0], -yc)
|
381 |
+
|
382 |
+
# GCROT main iteration
|
383 |
+
for j_outer in range(maxiter):
|
384 |
+
# -- callback
|
385 |
+
if callback is not None:
|
386 |
+
callback(x)
|
387 |
+
|
388 |
+
beta = nrm2(r)
|
389 |
+
|
390 |
+
# -- check stopping condition
|
391 |
+
beta_tol = max(atol, rtol * b_norm)
|
392 |
+
|
393 |
+
if beta <= beta_tol and (j_outer > 0 or CU):
|
394 |
+
# recompute residual to avoid rounding error
|
395 |
+
r = b - matvec(x)
|
396 |
+
beta = nrm2(r)
|
397 |
+
|
398 |
+
if beta <= beta_tol:
|
399 |
+
j_outer = -1
|
400 |
+
break
|
401 |
+
|
402 |
+
ml = m + max(k - len(CU), 0)
|
403 |
+
|
404 |
+
cs = [c for c, u in CU]
|
405 |
+
|
406 |
+
try:
|
407 |
+
Q, R, B, vs, zs, y, pres = _fgmres(matvec,
|
408 |
+
r/beta,
|
409 |
+
ml,
|
410 |
+
rpsolve=psolve,
|
411 |
+
atol=max(atol, rtol*b_norm)/beta,
|
412 |
+
cs=cs)
|
413 |
+
y *= beta
|
414 |
+
except LinAlgError:
|
415 |
+
# Floating point over/underflow, non-finite result from
|
416 |
+
# matmul etc. -- report failure.
|
417 |
+
break
|
418 |
+
|
419 |
+
#
|
420 |
+
# At this point,
|
421 |
+
#
|
422 |
+
# [A U, A Z] = [C, V] G; G = [ I B ]
|
423 |
+
# [ 0 H ]
|
424 |
+
#
|
425 |
+
# where [C, V] has orthonormal columns, and r = beta v_0. Moreover,
|
426 |
+
#
|
427 |
+
# || b - A (x + Z y + U q) ||_2 = || r - C B y - V H y - C q ||_2 = min!
|
428 |
+
#
|
429 |
+
# from which y = argmin_y || beta e_1 - H y ||_2, and q = -B y
|
430 |
+
#
|
431 |
+
|
432 |
+
#
|
433 |
+
# GCROT(m,k) update
|
434 |
+
#
|
435 |
+
|
436 |
+
# Define new outer vectors
|
437 |
+
|
438 |
+
# ux := (Z - U B) y
|
439 |
+
ux = zs[0]*y[0]
|
440 |
+
for z, yc in zip(zs[1:], y[1:]):
|
441 |
+
ux = axpy(z, ux, ux.shape[0], yc) # ux += z*yc
|
442 |
+
by = B.dot(y)
|
443 |
+
for cu, byc in zip(CU, by):
|
444 |
+
c, u = cu
|
445 |
+
ux = axpy(u, ux, ux.shape[0], -byc) # ux -= u*byc
|
446 |
+
|
447 |
+
# cx := V H y
|
448 |
+
hy = Q.dot(R.dot(y))
|
449 |
+
cx = vs[0] * hy[0]
|
450 |
+
for v, hyc in zip(vs[1:], hy[1:]):
|
451 |
+
cx = axpy(v, cx, cx.shape[0], hyc) # cx += v*hyc
|
452 |
+
|
453 |
+
# Normalize cx, maintaining cx = A ux
|
454 |
+
# This new cx is orthogonal to the previous C, by construction
|
455 |
+
try:
|
456 |
+
alpha = 1/nrm2(cx)
|
457 |
+
if not np.isfinite(alpha):
|
458 |
+
raise FloatingPointError()
|
459 |
+
except (FloatingPointError, ZeroDivisionError):
|
460 |
+
# Cannot update, so skip it
|
461 |
+
continue
|
462 |
+
|
463 |
+
cx = scal(alpha, cx)
|
464 |
+
ux = scal(alpha, ux)
|
465 |
+
|
466 |
+
# Update residual and solution
|
467 |
+
gamma = dot(cx, r)
|
468 |
+
r = axpy(cx, r, r.shape[0], -gamma) # r -= gamma*cx
|
469 |
+
x = axpy(ux, x, x.shape[0], gamma) # x += gamma*ux
|
470 |
+
|
471 |
+
# Truncate CU
|
472 |
+
if truncate == 'oldest':
|
473 |
+
while len(CU) >= k and CU:
|
474 |
+
del CU[0]
|
475 |
+
elif truncate == 'smallest':
|
476 |
+
if len(CU) >= k and CU:
|
477 |
+
# cf. [1,2]
|
478 |
+
D = solve(R[:-1,:].T, B.T).T
|
479 |
+
W, sigma, V = svd(D)
|
480 |
+
|
481 |
+
# C := C W[:,:k-1], U := U W[:,:k-1]
|
482 |
+
new_CU = []
|
483 |
+
for j, w in enumerate(W[:,:k-1].T):
|
484 |
+
c, u = CU[0]
|
485 |
+
c = c * w[0]
|
486 |
+
u = u * w[0]
|
487 |
+
for cup, wp in zip(CU[1:], w[1:]):
|
488 |
+
cp, up = cup
|
489 |
+
c = axpy(cp, c, c.shape[0], wp)
|
490 |
+
u = axpy(up, u, u.shape[0], wp)
|
491 |
+
|
492 |
+
# Reorthogonalize at the same time; not necessary
|
493 |
+
# in exact arithmetic, but floating point error
|
494 |
+
# tends to accumulate here
|
495 |
+
for cp, up in new_CU:
|
496 |
+
alpha = dot(cp, c)
|
497 |
+
c = axpy(cp, c, c.shape[0], -alpha)
|
498 |
+
u = axpy(up, u, u.shape[0], -alpha)
|
499 |
+
alpha = nrm2(c)
|
500 |
+
c = scal(1.0/alpha, c)
|
501 |
+
u = scal(1.0/alpha, u)
|
502 |
+
|
503 |
+
new_CU.append((c, u))
|
504 |
+
CU[:] = new_CU
|
505 |
+
|
506 |
+
# Add new vector to CU
|
507 |
+
CU.append((cx, ux))
|
508 |
+
|
509 |
+
# Include the solution vector to the span
|
510 |
+
CU.append((None, x.copy()))
|
511 |
+
if discard_C:
|
512 |
+
CU[:] = [(None, uz) for cz, uz in CU]
|
513 |
+
|
514 |
+
return postprocess(x), j_outer + 1
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/lsmr.py
ADDED
@@ -0,0 +1,486 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Copyright (C) 2010 David Fong and Michael Saunders
|
3 |
+
|
4 |
+
LSMR uses an iterative method.
|
5 |
+
|
6 |
+
07 Jun 2010: Documentation updated
|
7 |
+
03 Jun 2010: First release version in Python
|
8 |
+
|
9 |
+
David Chin-lung Fong [email protected]
|
10 |
+
Institute for Computational and Mathematical Engineering
|
11 |
+
Stanford University
|
12 |
+
|
13 |
+
Michael Saunders [email protected]
|
14 |
+
Systems Optimization Laboratory
|
15 |
+
Dept of MS&E, Stanford University.
|
16 |
+
|
17 |
+
"""
|
18 |
+
|
19 |
+
__all__ = ['lsmr']
|
20 |
+
|
21 |
+
from numpy import zeros, inf, atleast_1d, result_type
|
22 |
+
from numpy.linalg import norm
|
23 |
+
from math import sqrt
|
24 |
+
from scipy.sparse.linalg._interface import aslinearoperator
|
25 |
+
|
26 |
+
from scipy.sparse.linalg._isolve.lsqr import _sym_ortho
|
27 |
+
|
28 |
+
|
29 |
+
def lsmr(A, b, damp=0.0, atol=1e-6, btol=1e-6, conlim=1e8,
|
30 |
+
maxiter=None, show=False, x0=None):
|
31 |
+
"""Iterative solver for least-squares problems.
|
32 |
+
|
33 |
+
lsmr solves the system of linear equations ``Ax = b``. If the system
|
34 |
+
is inconsistent, it solves the least-squares problem ``min ||b - Ax||_2``.
|
35 |
+
``A`` is a rectangular matrix of dimension m-by-n, where all cases are
|
36 |
+
allowed: m = n, m > n, or m < n. ``b`` is a vector of length m.
|
37 |
+
The matrix A may be dense or sparse (usually sparse).
|
38 |
+
|
39 |
+
Parameters
|
40 |
+
----------
|
41 |
+
A : {sparse matrix, ndarray, LinearOperator}
|
42 |
+
Matrix A in the linear system.
|
43 |
+
Alternatively, ``A`` can be a linear operator which can
|
44 |
+
produce ``Ax`` and ``A^H x`` using, e.g.,
|
45 |
+
``scipy.sparse.linalg.LinearOperator``.
|
46 |
+
b : array_like, shape (m,)
|
47 |
+
Vector ``b`` in the linear system.
|
48 |
+
damp : float
|
49 |
+
Damping factor for regularized least-squares. `lsmr` solves
|
50 |
+
the regularized least-squares problem::
|
51 |
+
|
52 |
+
min ||(b) - ( A )x||
|
53 |
+
||(0) (damp*I) ||_2
|
54 |
+
|
55 |
+
where damp is a scalar. If damp is None or 0, the system
|
56 |
+
is solved without regularization. Default is 0.
|
57 |
+
atol, btol : float, optional
|
58 |
+
Stopping tolerances. `lsmr` continues iterations until a
|
59 |
+
certain backward error estimate is smaller than some quantity
|
60 |
+
depending on atol and btol. Let ``r = b - Ax`` be the
|
61 |
+
residual vector for the current approximate solution ``x``.
|
62 |
+
If ``Ax = b`` seems to be consistent, `lsmr` terminates
|
63 |
+
when ``norm(r) <= atol * norm(A) * norm(x) + btol * norm(b)``.
|
64 |
+
Otherwise, `lsmr` terminates when ``norm(A^H r) <=
|
65 |
+
atol * norm(A) * norm(r)``. If both tolerances are 1.0e-6 (default),
|
66 |
+
the final ``norm(r)`` should be accurate to about 6
|
67 |
+
digits. (The final ``x`` will usually have fewer correct digits,
|
68 |
+
depending on ``cond(A)`` and the size of LAMBDA.) If `atol`
|
69 |
+
or `btol` is None, a default value of 1.0e-6 will be used.
|
70 |
+
Ideally, they should be estimates of the relative error in the
|
71 |
+
entries of ``A`` and ``b`` respectively. For example, if the entries
|
72 |
+
of ``A`` have 7 correct digits, set ``atol = 1e-7``. This prevents
|
73 |
+
the algorithm from doing unnecessary work beyond the
|
74 |
+
uncertainty of the input data.
|
75 |
+
conlim : float, optional
|
76 |
+
`lsmr` terminates if an estimate of ``cond(A)`` exceeds
|
77 |
+
`conlim`. For compatible systems ``Ax = b``, conlim could be
|
78 |
+
as large as 1.0e+12 (say). For least-squares problems,
|
79 |
+
`conlim` should be less than 1.0e+8. If `conlim` is None, the
|
80 |
+
default value is 1e+8. Maximum precision can be obtained by
|
81 |
+
setting ``atol = btol = conlim = 0``, but the number of
|
82 |
+
iterations may then be excessive. Default is 1e8.
|
83 |
+
maxiter : int, optional
|
84 |
+
`lsmr` terminates if the number of iterations reaches
|
85 |
+
`maxiter`. The default is ``maxiter = min(m, n)``. For
|
86 |
+
ill-conditioned systems, a larger value of `maxiter` may be
|
87 |
+
needed. Default is False.
|
88 |
+
show : bool, optional
|
89 |
+
Print iterations logs if ``show=True``. Default is False.
|
90 |
+
x0 : array_like, shape (n,), optional
|
91 |
+
Initial guess of ``x``, if None zeros are used. Default is None.
|
92 |
+
|
93 |
+
.. versionadded:: 1.0.0
|
94 |
+
|
95 |
+
Returns
|
96 |
+
-------
|
97 |
+
x : ndarray of float
|
98 |
+
Least-square solution returned.
|
99 |
+
istop : int
|
100 |
+
istop gives the reason for stopping::
|
101 |
+
|
102 |
+
istop = 0 means x=0 is a solution. If x0 was given, then x=x0 is a
|
103 |
+
solution.
|
104 |
+
= 1 means x is an approximate solution to A@x = B,
|
105 |
+
according to atol and btol.
|
106 |
+
= 2 means x approximately solves the least-squares problem
|
107 |
+
according to atol.
|
108 |
+
= 3 means COND(A) seems to be greater than CONLIM.
|
109 |
+
= 4 is the same as 1 with atol = btol = eps (machine
|
110 |
+
precision)
|
111 |
+
= 5 is the same as 2 with atol = eps.
|
112 |
+
= 6 is the same as 3 with CONLIM = 1/eps.
|
113 |
+
= 7 means ITN reached maxiter before the other stopping
|
114 |
+
conditions were satisfied.
|
115 |
+
|
116 |
+
itn : int
|
117 |
+
Number of iterations used.
|
118 |
+
normr : float
|
119 |
+
``norm(b-Ax)``
|
120 |
+
normar : float
|
121 |
+
``norm(A^H (b - Ax))``
|
122 |
+
norma : float
|
123 |
+
``norm(A)``
|
124 |
+
conda : float
|
125 |
+
Condition number of A.
|
126 |
+
normx : float
|
127 |
+
``norm(x)``
|
128 |
+
|
129 |
+
Notes
|
130 |
+
-----
|
131 |
+
|
132 |
+
.. versionadded:: 0.11.0
|
133 |
+
|
134 |
+
References
|
135 |
+
----------
|
136 |
+
.. [1] D. C.-L. Fong and M. A. Saunders,
|
137 |
+
"LSMR: An iterative algorithm for sparse least-squares problems",
|
138 |
+
SIAM J. Sci. Comput., vol. 33, pp. 2950-2971, 2011.
|
139 |
+
:arxiv:`1006.0758`
|
140 |
+
.. [2] LSMR Software, https://web.stanford.edu/group/SOL/software/lsmr/
|
141 |
+
|
142 |
+
Examples
|
143 |
+
--------
|
144 |
+
>>> import numpy as np
|
145 |
+
>>> from scipy.sparse import csc_matrix
|
146 |
+
>>> from scipy.sparse.linalg import lsmr
|
147 |
+
>>> A = csc_matrix([[1., 0.], [1., 1.], [0., 1.]], dtype=float)
|
148 |
+
|
149 |
+
The first example has the trivial solution ``[0, 0]``
|
150 |
+
|
151 |
+
>>> b = np.array([0., 0., 0.], dtype=float)
|
152 |
+
>>> x, istop, itn, normr = lsmr(A, b)[:4]
|
153 |
+
>>> istop
|
154 |
+
0
|
155 |
+
>>> x
|
156 |
+
array([0., 0.])
|
157 |
+
|
158 |
+
The stopping code `istop=0` returned indicates that a vector of zeros was
|
159 |
+
found as a solution. The returned solution `x` indeed contains
|
160 |
+
``[0., 0.]``. The next example has a non-trivial solution:
|
161 |
+
|
162 |
+
>>> b = np.array([1., 0., -1.], dtype=float)
|
163 |
+
>>> x, istop, itn, normr = lsmr(A, b)[:4]
|
164 |
+
>>> istop
|
165 |
+
1
|
166 |
+
>>> x
|
167 |
+
array([ 1., -1.])
|
168 |
+
>>> itn
|
169 |
+
1
|
170 |
+
>>> normr
|
171 |
+
4.440892098500627e-16
|
172 |
+
|
173 |
+
As indicated by `istop=1`, `lsmr` found a solution obeying the tolerance
|
174 |
+
limits. The given solution ``[1., -1.]`` obviously solves the equation. The
|
175 |
+
remaining return values include information about the number of iterations
|
176 |
+
(`itn=1`) and the remaining difference of left and right side of the solved
|
177 |
+
equation.
|
178 |
+
The final example demonstrates the behavior in the case where there is no
|
179 |
+
solution for the equation:
|
180 |
+
|
181 |
+
>>> b = np.array([1., 0.01, -1.], dtype=float)
|
182 |
+
>>> x, istop, itn, normr = lsmr(A, b)[:4]
|
183 |
+
>>> istop
|
184 |
+
2
|
185 |
+
>>> x
|
186 |
+
array([ 1.00333333, -0.99666667])
|
187 |
+
>>> A.dot(x)-b
|
188 |
+
array([ 0.00333333, -0.00333333, 0.00333333])
|
189 |
+
>>> normr
|
190 |
+
0.005773502691896255
|
191 |
+
|
192 |
+
`istop` indicates that the system is inconsistent and thus `x` is rather an
|
193 |
+
approximate solution to the corresponding least-squares problem. `normr`
|
194 |
+
contains the minimal distance that was found.
|
195 |
+
"""
|
196 |
+
|
197 |
+
A = aslinearoperator(A)
|
198 |
+
b = atleast_1d(b)
|
199 |
+
if b.ndim > 1:
|
200 |
+
b = b.squeeze()
|
201 |
+
|
202 |
+
msg = ('The exact solution is x = 0, or x = x0, if x0 was given ',
|
203 |
+
'Ax - b is small enough, given atol, btol ',
|
204 |
+
'The least-squares solution is good enough, given atol ',
|
205 |
+
'The estimate of cond(Abar) has exceeded conlim ',
|
206 |
+
'Ax - b is small enough for this machine ',
|
207 |
+
'The least-squares solution is good enough for this machine',
|
208 |
+
'Cond(Abar) seems to be too large for this machine ',
|
209 |
+
'The iteration limit has been reached ')
|
210 |
+
|
211 |
+
hdg1 = ' itn x(1) norm r norm Ar'
|
212 |
+
hdg2 = ' compatible LS norm A cond A'
|
213 |
+
pfreq = 20 # print frequency (for repeating the heading)
|
214 |
+
pcount = 0 # print counter
|
215 |
+
|
216 |
+
m, n = A.shape
|
217 |
+
|
218 |
+
# stores the num of singular values
|
219 |
+
minDim = min([m, n])
|
220 |
+
|
221 |
+
if maxiter is None:
|
222 |
+
maxiter = minDim
|
223 |
+
|
224 |
+
if x0 is None:
|
225 |
+
dtype = result_type(A, b, float)
|
226 |
+
else:
|
227 |
+
dtype = result_type(A, b, x0, float)
|
228 |
+
|
229 |
+
if show:
|
230 |
+
print(' ')
|
231 |
+
print('LSMR Least-squares solution of Ax = b\n')
|
232 |
+
print(f'The matrix A has {m} rows and {n} columns')
|
233 |
+
print('damp = %20.14e\n' % (damp))
|
234 |
+
print(f'atol = {atol:8.2e} conlim = {conlim:8.2e}\n')
|
235 |
+
print(f'btol = {btol:8.2e} maxiter = {maxiter:8g}\n')
|
236 |
+
|
237 |
+
u = b
|
238 |
+
normb = norm(b)
|
239 |
+
if x0 is None:
|
240 |
+
x = zeros(n, dtype)
|
241 |
+
beta = normb.copy()
|
242 |
+
else:
|
243 |
+
x = atleast_1d(x0.copy())
|
244 |
+
u = u - A.matvec(x)
|
245 |
+
beta = norm(u)
|
246 |
+
|
247 |
+
if beta > 0:
|
248 |
+
u = (1 / beta) * u
|
249 |
+
v = A.rmatvec(u)
|
250 |
+
alpha = norm(v)
|
251 |
+
else:
|
252 |
+
v = zeros(n, dtype)
|
253 |
+
alpha = 0
|
254 |
+
|
255 |
+
if alpha > 0:
|
256 |
+
v = (1 / alpha) * v
|
257 |
+
|
258 |
+
# Initialize variables for 1st iteration.
|
259 |
+
|
260 |
+
itn = 0
|
261 |
+
zetabar = alpha * beta
|
262 |
+
alphabar = alpha
|
263 |
+
rho = 1
|
264 |
+
rhobar = 1
|
265 |
+
cbar = 1
|
266 |
+
sbar = 0
|
267 |
+
|
268 |
+
h = v.copy()
|
269 |
+
hbar = zeros(n, dtype)
|
270 |
+
|
271 |
+
# Initialize variables for estimation of ||r||.
|
272 |
+
|
273 |
+
betadd = beta
|
274 |
+
betad = 0
|
275 |
+
rhodold = 1
|
276 |
+
tautildeold = 0
|
277 |
+
thetatilde = 0
|
278 |
+
zeta = 0
|
279 |
+
d = 0
|
280 |
+
|
281 |
+
# Initialize variables for estimation of ||A|| and cond(A)
|
282 |
+
|
283 |
+
normA2 = alpha * alpha
|
284 |
+
maxrbar = 0
|
285 |
+
minrbar = 1e+100
|
286 |
+
normA = sqrt(normA2)
|
287 |
+
condA = 1
|
288 |
+
normx = 0
|
289 |
+
|
290 |
+
# Items for use in stopping rules, normb set earlier
|
291 |
+
istop = 0
|
292 |
+
ctol = 0
|
293 |
+
if conlim > 0:
|
294 |
+
ctol = 1 / conlim
|
295 |
+
normr = beta
|
296 |
+
|
297 |
+
# Reverse the order here from the original matlab code because
|
298 |
+
# there was an error on return when arnorm==0
|
299 |
+
normar = alpha * beta
|
300 |
+
if normar == 0:
|
301 |
+
if show:
|
302 |
+
print(msg[0])
|
303 |
+
return x, istop, itn, normr, normar, normA, condA, normx
|
304 |
+
|
305 |
+
if normb == 0:
|
306 |
+
x[()] = 0
|
307 |
+
return x, istop, itn, normr, normar, normA, condA, normx
|
308 |
+
|
309 |
+
if show:
|
310 |
+
print(' ')
|
311 |
+
print(hdg1, hdg2)
|
312 |
+
test1 = 1
|
313 |
+
test2 = alpha / beta
|
314 |
+
str1 = f'{itn:6g} {x[0]:12.5e}'
|
315 |
+
str2 = f' {normr:10.3e} {normar:10.3e}'
|
316 |
+
str3 = f' {test1:8.1e} {test2:8.1e}'
|
317 |
+
print(''.join([str1, str2, str3]))
|
318 |
+
|
319 |
+
# Main iteration loop.
|
320 |
+
while itn < maxiter:
|
321 |
+
itn = itn + 1
|
322 |
+
|
323 |
+
# Perform the next step of the bidiagonalization to obtain the
|
324 |
+
# next beta, u, alpha, v. These satisfy the relations
|
325 |
+
# beta*u = A@v - alpha*u,
|
326 |
+
# alpha*v = A'@u - beta*v.
|
327 |
+
|
328 |
+
u *= -alpha
|
329 |
+
u += A.matvec(v)
|
330 |
+
beta = norm(u)
|
331 |
+
|
332 |
+
if beta > 0:
|
333 |
+
u *= (1 / beta)
|
334 |
+
v *= -beta
|
335 |
+
v += A.rmatvec(u)
|
336 |
+
alpha = norm(v)
|
337 |
+
if alpha > 0:
|
338 |
+
v *= (1 / alpha)
|
339 |
+
|
340 |
+
# At this point, beta = beta_{k+1}, alpha = alpha_{k+1}.
|
341 |
+
|
342 |
+
# Construct rotation Qhat_{k,2k+1}.
|
343 |
+
|
344 |
+
chat, shat, alphahat = _sym_ortho(alphabar, damp)
|
345 |
+
|
346 |
+
# Use a plane rotation (Q_i) to turn B_i to R_i
|
347 |
+
|
348 |
+
rhoold = rho
|
349 |
+
c, s, rho = _sym_ortho(alphahat, beta)
|
350 |
+
thetanew = s*alpha
|
351 |
+
alphabar = c*alpha
|
352 |
+
|
353 |
+
# Use a plane rotation (Qbar_i) to turn R_i^T to R_i^bar
|
354 |
+
|
355 |
+
rhobarold = rhobar
|
356 |
+
zetaold = zeta
|
357 |
+
thetabar = sbar * rho
|
358 |
+
rhotemp = cbar * rho
|
359 |
+
cbar, sbar, rhobar = _sym_ortho(cbar * rho, thetanew)
|
360 |
+
zeta = cbar * zetabar
|
361 |
+
zetabar = - sbar * zetabar
|
362 |
+
|
363 |
+
# Update h, h_hat, x.
|
364 |
+
|
365 |
+
hbar *= - (thetabar * rho / (rhoold * rhobarold))
|
366 |
+
hbar += h
|
367 |
+
x += (zeta / (rho * rhobar)) * hbar
|
368 |
+
h *= - (thetanew / rho)
|
369 |
+
h += v
|
370 |
+
|
371 |
+
# Estimate of ||r||.
|
372 |
+
|
373 |
+
# Apply rotation Qhat_{k,2k+1}.
|
374 |
+
betaacute = chat * betadd
|
375 |
+
betacheck = -shat * betadd
|
376 |
+
|
377 |
+
# Apply rotation Q_{k,k+1}.
|
378 |
+
betahat = c * betaacute
|
379 |
+
betadd = -s * betaacute
|
380 |
+
|
381 |
+
# Apply rotation Qtilde_{k-1}.
|
382 |
+
# betad = betad_{k-1} here.
|
383 |
+
|
384 |
+
thetatildeold = thetatilde
|
385 |
+
ctildeold, stildeold, rhotildeold = _sym_ortho(rhodold, thetabar)
|
386 |
+
thetatilde = stildeold * rhobar
|
387 |
+
rhodold = ctildeold * rhobar
|
388 |
+
betad = - stildeold * betad + ctildeold * betahat
|
389 |
+
|
390 |
+
# betad = betad_k here.
|
391 |
+
# rhodold = rhod_k here.
|
392 |
+
|
393 |
+
tautildeold = (zetaold - thetatildeold * tautildeold) / rhotildeold
|
394 |
+
taud = (zeta - thetatilde * tautildeold) / rhodold
|
395 |
+
d = d + betacheck * betacheck
|
396 |
+
normr = sqrt(d + (betad - taud)**2 + betadd * betadd)
|
397 |
+
|
398 |
+
# Estimate ||A||.
|
399 |
+
normA2 = normA2 + beta * beta
|
400 |
+
normA = sqrt(normA2)
|
401 |
+
normA2 = normA2 + alpha * alpha
|
402 |
+
|
403 |
+
# Estimate cond(A).
|
404 |
+
maxrbar = max(maxrbar, rhobarold)
|
405 |
+
if itn > 1:
|
406 |
+
minrbar = min(minrbar, rhobarold)
|
407 |
+
condA = max(maxrbar, rhotemp) / min(minrbar, rhotemp)
|
408 |
+
|
409 |
+
# Test for convergence.
|
410 |
+
|
411 |
+
# Compute norms for convergence testing.
|
412 |
+
normar = abs(zetabar)
|
413 |
+
normx = norm(x)
|
414 |
+
|
415 |
+
# Now use these norms to estimate certain other quantities,
|
416 |
+
# some of which will be small near a solution.
|
417 |
+
|
418 |
+
test1 = normr / normb
|
419 |
+
if (normA * normr) != 0:
|
420 |
+
test2 = normar / (normA * normr)
|
421 |
+
else:
|
422 |
+
test2 = inf
|
423 |
+
test3 = 1 / condA
|
424 |
+
t1 = test1 / (1 + normA * normx / normb)
|
425 |
+
rtol = btol + atol * normA * normx / normb
|
426 |
+
|
427 |
+
# The following tests guard against extremely small values of
|
428 |
+
# atol, btol or ctol. (The user may have set any or all of
|
429 |
+
# the parameters atol, btol, conlim to 0.)
|
430 |
+
# The effect is equivalent to the normAl tests using
|
431 |
+
# atol = eps, btol = eps, conlim = 1/eps.
|
432 |
+
|
433 |
+
if itn >= maxiter:
|
434 |
+
istop = 7
|
435 |
+
if 1 + test3 <= 1:
|
436 |
+
istop = 6
|
437 |
+
if 1 + test2 <= 1:
|
438 |
+
istop = 5
|
439 |
+
if 1 + t1 <= 1:
|
440 |
+
istop = 4
|
441 |
+
|
442 |
+
# Allow for tolerances set by the user.
|
443 |
+
|
444 |
+
if test3 <= ctol:
|
445 |
+
istop = 3
|
446 |
+
if test2 <= atol:
|
447 |
+
istop = 2
|
448 |
+
if test1 <= rtol:
|
449 |
+
istop = 1
|
450 |
+
|
451 |
+
# See if it is time to print something.
|
452 |
+
|
453 |
+
if show:
|
454 |
+
if (n <= 40) or (itn <= 10) or (itn >= maxiter - 10) or \
|
455 |
+
(itn % 10 == 0) or (test3 <= 1.1 * ctol) or \
|
456 |
+
(test2 <= 1.1 * atol) or (test1 <= 1.1 * rtol) or \
|
457 |
+
(istop != 0):
|
458 |
+
|
459 |
+
if pcount >= pfreq:
|
460 |
+
pcount = 0
|
461 |
+
print(' ')
|
462 |
+
print(hdg1, hdg2)
|
463 |
+
pcount = pcount + 1
|
464 |
+
str1 = f'{itn:6g} {x[0]:12.5e}'
|
465 |
+
str2 = f' {normr:10.3e} {normar:10.3e}'
|
466 |
+
str3 = f' {test1:8.1e} {test2:8.1e}'
|
467 |
+
str4 = f' {normA:8.1e} {condA:8.1e}'
|
468 |
+
print(''.join([str1, str2, str3, str4]))
|
469 |
+
|
470 |
+
if istop > 0:
|
471 |
+
break
|
472 |
+
|
473 |
+
# Print the stopping condition.
|
474 |
+
|
475 |
+
if show:
|
476 |
+
print(' ')
|
477 |
+
print('LSMR finished')
|
478 |
+
print(msg[istop])
|
479 |
+
print(f'istop ={istop:8g} normr ={normr:8.1e}')
|
480 |
+
print(f' normA ={normA:8.1e} normAr ={normar:8.1e}')
|
481 |
+
print(f'itn ={itn:8g} condA ={condA:8.1e}')
|
482 |
+
print(' normx =%8.1e' % (normx))
|
483 |
+
print(str1, str2)
|
484 |
+
print(str3, str4)
|
485 |
+
|
486 |
+
return x, istop, itn, normr, normar, normA, condA, normx
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__init__.py
ADDED
File without changes
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (198 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/test_gcrotmk.cpython-310.pyc
ADDED
Binary file (5.28 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/test_iterative.cpython-310.pyc
ADDED
Binary file (21.3 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/test_lgmres.cpython-310.pyc
ADDED
Binary file (6.67 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/test_lsmr.cpython-310.pyc
ADDED
Binary file (6.63 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/test_lsqr.cpython-310.pyc
ADDED
Binary file (3.04 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/scipy/sparse/linalg/_isolve/tests/__pycache__/test_minres.cpython-310.pyc
ADDED
Binary file (3.15 kB). View file
|
|